code
stringlengths 0
23.9M
|
---|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Apple iPhone X (GSM), D221, iPhone10,6 (A1901)
* Copyright (c) 2022, Konrad Dybcio <[email protected]>
*/
/dts-v1/;
#include "t8015-x.dtsi"
/ {
compatible = "apple,d221", "apple,t8015", "apple,arm-platform";
model = "Apple iPhone X (GSM)";
};
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018 MediaTek Inc.
*
*/
#ifndef __MTK_CMDQ_H__
#define __MTK_CMDQ_H__
#include <linux/mailbox_client.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
#include <linux/timer.h>
#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
/*
* Every cmdq thread has its own SPRs (Specific Purpose Registers),
* so there are 4 * N (threads) SPRs in GCE that shares the same indexes below.
*/
#define CMDQ_THR_SPR_IDX0 (0)
#define CMDQ_THR_SPR_IDX1 (1)
#define CMDQ_THR_SPR_IDX2 (2)
#define CMDQ_THR_SPR_IDX3 (3)
struct cmdq_pkt;
enum cmdq_logic_op {
CMDQ_LOGIC_ASSIGN = 0,
CMDQ_LOGIC_ADD = 1,
CMDQ_LOGIC_SUBTRACT = 2,
CMDQ_LOGIC_MULTIPLY = 3,
CMDQ_LOGIC_XOR = 8,
CMDQ_LOGIC_NOT = 9,
CMDQ_LOGIC_OR = 10,
CMDQ_LOGIC_AND = 11,
CMDQ_LOGIC_LEFT_SHIFT = 12,
CMDQ_LOGIC_RIGHT_SHIFT = 13,
CMDQ_LOGIC_MAX,
};
struct cmdq_operand {
/* register type */
bool reg;
union {
/* index */
u16 idx;
/* value */
u16 value;
};
};
struct cmdq_client_reg {
u8 subsys;
u16 offset;
u16 size;
};
struct cmdq_client {
struct mbox_client client;
struct mbox_chan *chan;
};
#if IS_ENABLED(CONFIG_MTK_CMDQ)
/**
* cmdq_dev_get_client_reg() - parse cmdq client reg from the device
* node of CMDQ client
* @dev: device of CMDQ mailbox client
* @client_reg: CMDQ client reg pointer
* @idx: the index of desired reg
*
* Return: 0 for success; else the error code is returned
*
* Help CMDQ client parsing the cmdq client reg
* from the device node of CMDQ client.
*/
int cmdq_dev_get_client_reg(struct device *dev,
struct cmdq_client_reg *client_reg, int idx);
/**
* cmdq_mbox_create() - create CMDQ mailbox client and channel
* @dev: device of CMDQ mailbox client
* @index: index of CMDQ mailbox channel
*
* Return: CMDQ mailbox client pointer
*/
struct cmdq_client *cmdq_mbox_create(struct device *dev, int index);
/**
* cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel
* @client: the CMDQ mailbox client
*/
void cmdq_mbox_destroy(struct cmdq_client *client);
/**
* cmdq_pkt_create() - create a CMDQ packet
* @client: the CMDQ mailbox client
* @pkt: the CMDQ packet
* @size: required CMDQ buffer size
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size);
/**
* cmdq_pkt_destroy() - destroy the CMDQ packet
* @client: the CMDQ mailbox client
* @pkt: the CMDQ packet
*/
void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
/**
* cmdq_pkt_write() - append write command to the CMDQ packet
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
* @offset: register offset from CMDQ sub system
* @value: the specified target register value
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value);
/**
* cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
* @offset: register offset from CMDQ sub system
* @value: the specified target register value
* @mask: the specified target register mask
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
/*
* cmdq_pkt_read_s() - append read_s command to the CMDQ packet
* @pkt: the CMDQ packet
* @high_addr_reg_idx: internal register ID which contains high address of pa
* @addr_low: low address of pa
* @reg_idx: the CMDQ internal register ID to cache read data
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
u16 reg_idx);
/**
* cmdq_pkt_write_s() - append write_s command to the CMDQ packet
* @pkt: the CMDQ packet
* @high_addr_reg_idx: internal register ID which contains high address of pa
* @addr_low: low address of pa
* @src_reg_idx: the CMDQ internal register ID which cache source value
*
* Return: 0 for success; else the error code is returned
*
* Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
* to get high address and call cmdq_pkt_assign() to assign value into internal
* reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
* call to this function.
*/
int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx);
/**
* cmdq_pkt_write_s_mask() - append write_s with mask command to the CMDQ packet
* @pkt: the CMDQ packet
* @high_addr_reg_idx: internal register ID which contains high address of pa
* @addr_low: low address of pa
* @src_reg_idx: the CMDQ internal register ID which cache source value
* @mask: the specified target address mask, use U32_MAX if no need
*
* Return: 0 for success; else the error code is returned
*
* Support write value to physical address without subsys. Use CMDQ_ADDR_HIGH()
* to get high address and call cmdq_pkt_assign() to assign value into internal
* reg. Also use CMDQ_ADDR_LOW() to get low address for addr_low parameter when
* call to this function.
*/
int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx, u32 mask);
/**
* cmdq_pkt_write_s_value() - append write_s command to the CMDQ packet which
* write value to a physical address
* @pkt: the CMDQ packet
* @high_addr_reg_idx: internal register ID which contains high address of pa
* @addr_low: low address of pa
* @value: the specified target value
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value);
/**
* cmdq_pkt_write_s_mask_value() - append write_s command with mask to the CMDQ
* packet which write value to a physical
* address
* @pkt: the CMDQ packet
* @high_addr_reg_idx: internal register ID which contains high address of pa
* @addr_low: low address of pa
* @value: the specified target value
* @mask: the specified target mask
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask);
/**
* cmdq_pkt_mem_move() - append memory move command to the CMDQ packet
* @pkt: the CMDQ packet
* @src_addr: source address
* @dst_addr: destination address
*
* Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`.
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr);
/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event type to wait
* @clear: clear event or not after event arrive
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
* cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be acquired
*
* User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event()
* as `mutex_unlock` to protect some `critical section` instructions between them.
* cmdq_pkt_acquire_event() would wait for event to be cleared.
* After event is cleared by cmdq_pkt_clear_event in other GCE threads,
* cmdq_pkt_acquire_event() would set event and keep executing next instruction.
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event);
/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be cleared
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
* cmdq_pkt_set_event() - append set event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be set
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event);
/**
* cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
* execute an instruction that wait for a specified
* hardware register to check for the value w/o mask.
* All GCE hardware threads will be blocked by this
* instruction.
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
* @offset: register offset from CMDQ sub system
* @value: the specified target register value
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value);
/**
* cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to
* execute an instruction that wait for a specified
* hardware register to check for the value w/ mask.
* All GCE hardware threads will be blocked by this
* instruction.
* @pkt: the CMDQ packet
* @subsys: the CMDQ sub system code
* @offset: register offset from CMDQ sub system
* @value: the specified target register value
* @mask: the specified target register mask
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
/**
* cmdq_pkt_logic_command() - Append logic command to the CMDQ packet, ask GCE to
* execute an instruction that store the result of logic operation
* with left and right operand into result_reg_idx.
* @pkt: the CMDQ packet
* @result_reg_idx: SPR index that store operation result of left_operand and right_operand
* @left_operand: left operand
* @s_op: the logic operator enum
* @right_operand: right operand
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
struct cmdq_operand *left_operand,
enum cmdq_logic_op s_op,
struct cmdq_operand *right_operand);
/**
* cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
* to execute an instruction that set a constant value into
* internal register and use as value, mask or address in
* read/write instruction.
* @pkt: the CMDQ packet
* @reg_idx: the CMDQ internal register ID
* @value: the specified value
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
/**
* cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet
* @pkt: the CMDQ packet
* @addr: the hardware register address
* @value: the specified target register value
* @mask: the specified target register mask
*
* Appends a polling (POLL) command to the CMDQ packet and asks the GCE
* to execute an instruction that checks for the specified `value` (with
* or without `mask`) to appear in the specified hardware register `addr`.
* All GCE threads will be blocked by this instruction.
*
* Return: 0 for success or negative error code
*/
int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask);
/**
* cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE
* to execute an instruction that change current thread
* PC to a absolute physical address which should
* contains more instruction.
* @pkt: the CMDQ packet
* @addr: absolute physical address of target instruction buffer
* @shift_pa: shift bits of physical address in CMDQ instruction. This value
* is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa);
/* This wrapper has to be removed after all users migrated to jump_abs */
static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
{
return cmdq_pkt_jump_abs(pkt, addr, shift_pa);
}
/**
* cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE
* to execute an instruction that change current thread
* PC to a physical address with relative offset. The
* target address should contains more instruction.
* @pkt: the CMDQ packet
* @offset: relative offset of target instruction buffer from current PC.
* @shift_pa: shift bits of physical address in CMDQ instruction. This value
* is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
/**
* cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution
* @pkt: The CMDQ packet
*
* Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE
* to generate an interrupt at the end of the execution of all commands in
* the pipeline.
* The EOC command is usually appended to the end of the pipeline to notify
* that all commands are done.
*
* Return: 0 for success or negative error number
*/
int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
/**
* cmdq_pkt_finalize() - Append EOC and jump command to pkt.
* @pkt: the CMDQ packet
*
* Return: 0 for success; else the error code is returned
*/
int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
static inline int cmdq_dev_get_client_reg(struct device *dev,
struct cmdq_client_reg *client_reg, int idx)
{
return -ENODEV;
}
static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
{
return ERR_PTR(-EINVAL);
}
static inline void cmdq_mbox_destroy(struct cmdq_client *client) { }
static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
{
return -EINVAL;
}
static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { }
static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
return -ENOENT;
}
static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
return -ENOENT;
}
static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 reg_idx)
{
return -ENOENT;
}
static inline int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx)
{
return -ENOENT;
}
static inline int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
u16 addr_low, u16 src_reg_idx, u32 mask)
{
return -ENOENT;
}
static inline int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value)
{
return -ENOENT;
}
static inline int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask)
{
return -ENOENT;
}
static inline int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
{
return -EINVAL;
}
static inline int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
return -EINVAL;
}
static inline int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
{
return -EINVAL;
}
static inline int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value)
{
return -EINVAL;
}
static inline int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
return -EINVAL;
}
static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
{
return -EINVAL;
}
static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
{
return -EINVAL;
}
static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
{
return -EINVAL;
}
static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
{
return -EINVAL;
}
static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
{
return -EINVAL;
}
static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
{
return -EINVAL;
}
static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
return -EINVAL;
}
#endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */
#endif /* __MTK_CMDQ_H__ */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AHCI SATA platform driver
*
* Copyright 2004-2005 Red Hat, Inc.
* Jeff Garzik <[email protected]>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <[email protected]>
*/
#ifndef _AHCI_PLATFORM_H
#define _AHCI_PLATFORM_H
#include <linux/compiler.h>
struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
struct platform_device *pdev, unsigned int flags);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
const struct scsi_host_template *sht);
void ahci_platform_shutdown(struct platform_device *pdev);
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
#define AHCI_PLATFORM_GET_RESETS BIT(0)
#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
#endif /* _AHCI_PLATFORM_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright SUSE Linux Products GmbH 2009
*
* Authors: Alexander Graf <[email protected]>
*/
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_book3s.h>
#include <asm/reg.h>
#include <asm/switch_to.h>
#include <asm/time.h>
#include <asm/tm.h>
#include "book3s.h"
#include <asm/asm-prototypes.h>
#define OP_19_XOP_RFID 18
#define OP_19_XOP_RFI 50
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_MTMSRD 178
#define OP_31_XOP_MTSR 210
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
#define OP_31_XOP_FAKE_SC1 308
#define OP_31_XOP_SLBMTE 402
#define OP_31_XOP_SLBIE 434
#define OP_31_XOP_SLBIA 498
#define OP_31_XOP_MFSR 595
#define OP_31_XOP_MFSRIN 659
#define OP_31_XOP_DCBA 758
#define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915
#define OP_31_XOP_SLBFEE 979
#define OP_31_XOP_TBEGIN 654
#define OP_31_XOP_TABORT 910
#define OP_31_XOP_TRECLAIM 942
#define OP_31_XOP_TRCHKPT 1006
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
#define OP_31_XOP_DCBZ 1010
#define OP_LFS 48
#define OP_LFD 50
#define OP_STFS 52
#define OP_STFD 54
#define SPRN_GQR0 912
#define SPRN_GQR1 913
#define SPRN_GQR2 914
#define SPRN_GQR3 915
#define SPRN_GQR4 916
#define SPRN_GQR5 917
#define SPRN_GQR6 918
#define SPRN_GQR7 919
enum priv_level {
PRIV_PROBLEM = 0,
PRIV_SUPER = 1,
PRIV_HYPER = 2,
};
static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
{
/* PAPR VMs only access supervisor SPRs */
if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
return false;
/* Limit user space to its own small SPR set */
if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
return false;
return true;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
sizeof(vcpu->arch.gpr_tm));
memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
sizeof(struct thread_fp_state));
memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
sizeof(struct thread_vr_state));
vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.tar_tm = vcpu->arch.tar;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}
static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
sizeof(vcpu->arch.regs.gpr));
memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
sizeof(struct thread_fp_state));
memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
sizeof(struct thread_vr_state));
vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}
static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
{
unsigned long guest_msr = kvmppc_get_msr(vcpu);
int fc_val = ra_val ? ra_val : 1;
uint64_t texasr;
/* CR0 = 0 | MSR[TS] | 0 */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
preempt_disable();
tm_enable();
texasr = mfspr(SPRN_TEXASR);
kvmppc_save_tm_pr(vcpu);
kvmppc_copyfrom_vcpu_tm(vcpu);
/* failure recording depends on Failure Summary bit */
if (!(texasr & TEXASR_FS)) {
texasr &= ~TEXASR_FC;
texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
texasr &= ~(TEXASR_PR | TEXASR_HV);
if (kvmppc_get_msr(vcpu) & MSR_PR)
texasr |= TEXASR_PR;
if (kvmppc_get_msr(vcpu) & MSR_HV)
texasr |= TEXASR_HV;
vcpu->arch.texasr = texasr;
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
mtspr(SPRN_TEXASR, texasr);
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
}
tm_disable();
/*
* treclaim need quit to non-transactional state.
*/
guest_msr &= ~(MSR_TS_MASK);
kvmppc_set_msr(vcpu, guest_msr);
preempt_enable();
if (vcpu->arch.shadow_fscr & FSCR_TAR)
mtspr(SPRN_TAR, vcpu->arch.tar);
}
static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
{
unsigned long guest_msr = kvmppc_get_msr(vcpu);
preempt_disable();
/*
* need flush FP/VEC/VSX to vcpu save area before
* copy.
*/
kvmppc_giveup_ext(vcpu, MSR_VSX);
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
kvmppc_copyto_vcpu_tm(vcpu);
kvmppc_save_tm_sprs(vcpu);
/*
* as a result of trecheckpoint. set TS to suspended.
*/
guest_msr &= ~(MSR_TS_MASK);
guest_msr |= MSR_TS_S;
kvmppc_set_msr(vcpu, guest_msr);
kvmppc_restore_tm_pr(vcpu);
preempt_enable();
}
/* emulate tabort. at guest privilege state */
void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
{
/* currently we only emulate tabort. but no emulation of other
* tabort variants since there is no kernel usage of them at
* present.
*/
unsigned long guest_msr = kvmppc_get_msr(vcpu);
uint64_t org_texasr;
preempt_disable();
tm_enable();
org_texasr = mfspr(SPRN_TEXASR);
tm_abort(ra_val);
/* CR0 = 0 | MSR[TS] | 0 */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
/* failure recording depends on Failure Summary bit,
* and tabort will be treated as nops in non-transactional
* state.
*/
if (!(org_texasr & TEXASR_FS) &&
MSR_TM_ACTIVE(guest_msr)) {
vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
if (guest_msr & MSR_PR)
vcpu->arch.texasr |= TEXASR_PR;
if (guest_msr & MSR_HV)
vcpu->arch.texasr |= TEXASR_HV;
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
}
tm_disable();
preempt_enable();
}
#endif
int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rt = get_rt(inst);
int rs = get_rs(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
u32 inst_sc = 0x44000002;
switch (get_op(inst)) {
case 0:
emulated = EMULATE_FAIL;
if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
(inst == swab32(inst_sc))) {
/*
* This is the byte reversed syscall instruction of our
* hypercall handler. Early versions of LE Linux didn't
* swap the instructions correctly and ended up in
* illegal instructions.
* Just always fail hypercalls on these broken systems.
*/
kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
emulated = EMULATE_DONE;
}
break;
case 19:
switch (get_xop(inst)) {
case OP_19_XOP_RFID:
case OP_19_XOP_RFI: {
unsigned long srr1 = kvmppc_get_srr1(vcpu);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long cur_msr = kvmppc_get_msr(vcpu);
/*
* add rules to fit in ISA specification regarding TM
* state transition in TM disable/Suspended state,
* and target TM state is TM inactive(00) state. (the
* change should be suppressed).
*/
if (((cur_msr & MSR_TM) == 0) &&
((srr1 & MSR_TM) == 0) &&
MSR_TM_SUSPENDED(cur_msr) &&
!MSR_TM_ACTIVE(srr1))
srr1 |= MSR_TS_S;
#endif
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
kvmppc_set_msr(vcpu, srr1);
*advance = 0;
break;
}
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
break;
case OP_31_XOP_MTMSRD:
{
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) {
ulong new_msr = kvmppc_get_msr(vcpu);
new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
kvmppc_set_msr_fast(vcpu, new_msr);
} else
kvmppc_set_msr(vcpu, rs_val);
break;
}
case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MFSR:
{
int srnum;
srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
case OP_31_XOP_MFSRIN:
{
int srnum;
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf,
kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
#ifdef CONFIG_PPC_BOOK3S_64
case OP_31_XOP_FAKE_SC1:
{
/* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
!vcpu->arch.papr_enabled) {
emulated = EMULATE_FAIL;
break;
}
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
break;
vcpu->run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
vcpu->run->papr_hcall.args[i] = gpr;
}
vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
emulated = EMULATE_EXIT_USER;
break;
}
#endif
case OP_31_XOP_EIOIO:
break;
case OP_31_XOP_SLBMTE:
if (!vcpu->arch.mmu.slbmte)
return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu,
kvmppc_get_gpr(vcpu, rs),
kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu,
kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
return EMULATE_FAIL;
vcpu->arch.mmu.slbia(vcpu);
break;
case OP_31_XOP_SLBFEE:
if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
return EMULATE_FAIL;
} else {
ulong b, t;
ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
b = kvmppc_get_gpr(vcpu, rb);
if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
cr |= 2 << CR0_SHIFT;
kvmppc_set_gpr(vcpu, rt, t);
/* copy XER[SO] bit to CR0[SO] */
cr |= (vcpu->arch.regs.xer & 0x80000000) >>
(31 - CR0_SHIFT);
kvmppc_set_cr(vcpu, cr);
}
break;
case OP_31_XOP_SLBMFEE:
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;
} else {
ulong t, rb_val;
rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL;
} else {
ulong t, rb_val;
rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_DCBA:
/* Gets treated as NOP */
break;
case OP_31_XOP_DCBZ:
{
ulong rb_val = kvmppc_get_gpr(vcpu, rb);
ulong ra_val = 0;
ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr;
int r;
if (ra)
ra_val = kvmppc_get_gpr(vcpu, ra);
addr = (ra_val + rb_val) & ~31ULL;
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
addr &= 0xffffffff;
vaddr = addr;
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) {
*advance = 0;
kvmppc_set_dar(vcpu, vaddr);
vcpu->arch.fault_dar = vaddr;
dsisr = DSISR_ISSTORE;
if (r == -ENOENT)
dsisr |= DSISR_NOHPTE;
else if (r == -EPERM)
dsisr |= DSISR_PROTFAULT;
kvmppc_set_dsisr(vcpu, dsisr);
vcpu->arch.fault_dsisr = dsisr;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
}
break;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case OP_31_XOP_TBEGIN:
{
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
preempt_disable();
vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
(vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
<< TEXASR_FC_LG));
if ((inst >> 21) & 0x1)
vcpu->arch.texasr |= TEXASR_ROT;
if (kvmppc_get_msr(vcpu) & MSR_HV)
vcpu->arch.texasr |= TEXASR_HV;
vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
kvmppc_restore_tm_sprs(vcpu);
preempt_enable();
} else
emulated = EMULATE_FAIL;
break;
}
case OP_31_XOP_TABORT:
{
ulong guest_msr = kvmppc_get_msr(vcpu);
unsigned long ra_val = 0;
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
/* only emulate for privilege guest, since problem state
* guest can run with TM enabled and we don't expect to
* trap at here for that case.
*/
WARN_ON(guest_msr & MSR_PR);
if (ra)
ra_val = kvmppc_get_gpr(vcpu, ra);
kvmppc_emulate_tabort(vcpu, ra_val);
break;
}
case OP_31_XOP_TRECLAIM:
{
ulong guest_msr = kvmppc_get_msr(vcpu);
unsigned long ra_val = 0;
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
/* generate interrupts based on priorities */
if (guest_msr & MSR_PR) {
/* Privileged Instruction type Program Interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
break;
}
if (!MSR_TM_ACTIVE(guest_msr)) {
/* TM bad thing interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
emulated = EMULATE_AGAIN;
break;
}
if (ra)
ra_val = kvmppc_get_gpr(vcpu, ra);
kvmppc_emulate_treclaim(vcpu, ra_val);
break;
}
case OP_31_XOP_TRCHKPT:
{
ulong guest_msr = kvmppc_get_msr(vcpu);
unsigned long texasr;
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
/* generate interrupt based on priorities */
if (guest_msr & MSR_PR) {
/* Privileged Instruction type Program Intr */
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
break;
}
tm_enable();
texasr = mfspr(SPRN_TEXASR);
tm_disable();
if (MSR_TM_ACTIVE(guest_msr) ||
!(texasr & (TEXASR_FS))) {
/* TM bad thing interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
emulated = EMULATE_AGAIN;
break;
}
kvmppc_emulate_trchkpt(vcpu);
break;
}
#endif
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_emulate_paired_single(vcpu);
return emulated;
}
void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
u32 val)
{
if (upper) {
/* Upper BAT */
u32 bl = (val >> 2) & 0x7ff;
bat->bepi_mask = (~bl << 17);
bat->bepi = val & 0xfffe0000;
bat->vs = (val & 2) ? 1 : 0;
bat->vp = (val & 1) ? 1 : 0;
bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
} else {
/* Lower BAT */
bat->brpn = val & 0xfffe0000;
bat->wimg = (val >> 3) & 0xf;
bat->pp = val & 3;
bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
}
}
static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_bat *bat;
switch (sprn) {
case SPRN_IBAT0U ... SPRN_IBAT3L:
bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
break;
case SPRN_IBAT4U ... SPRN_IBAT7L:
bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
break;
case SPRN_DBAT0U ... SPRN_DBAT3L:
bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
break;
case SPRN_DBAT4U ... SPRN_DBAT7L:
bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
break;
default:
BUG();
}
return bat;
}
int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged;
to_book3s(vcpu)->sdr1 = spr_val;
break;
case SPRN_DSISR:
kvmppc_set_dsisr(vcpu, spr_val);
break;
case SPRN_DAR:
kvmppc_set_dar(vcpu, spr_val);
break;
case SPRN_HIOR:
to_book3s(vcpu)->hior = spr_val;
break;
case SPRN_IBAT0U ... SPRN_IBAT3L:
case SPRN_IBAT4U ... SPRN_IBAT7L:
case SPRN_DBAT0U ... SPRN_DBAT3L:
case SPRN_DBAT4U ... SPRN_DBAT7L:
{
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
kvmppc_mmu_flush_segments(vcpu);
break;
}
case SPRN_HID0:
to_book3s(vcpu)->hid[0] = spr_val;
break;
case SPRN_HID1:
to_book3s(vcpu)->hid[1] = spr_val;
break;
case SPRN_HID2_750FX:
to_book3s(vcpu)->hid[2] = spr_val;
break;
case SPRN_HID2_GEKKO:
to_book3s(vcpu)->hid[2] = spr_val;
/* HID2.PSE controls paired single on gekko */
switch (vcpu->arch.pvr) {
case 0x00080200: /* lonestar 2.0 */
case 0x00088202: /* lonestar 2.2 */
case 0x70000100: /* gekko 1.0 */
case 0x00080100: /* gekko 2.0 */
case 0x00083203: /* gekko 2.3a */
case 0x00083213: /* gekko 2.3b */
case 0x00083204: /* gekko 2.4 */
case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
case 0x00087200: /* broadway */
if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
/* Native paired singles */
} else if (spr_val & (1 << 29)) { /* HID2.PSE */
vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
kvmppc_giveup_ext(vcpu, MSR_FP);
} else {
vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
}
break;
}
break;
case SPRN_HID4:
case SPRN_HID4_GEKKO:
to_book3s(vcpu)->hid[4] = spr_val;
break;
case SPRN_HID5:
to_book3s(vcpu)->hid[5] = spr_val;
/* guest HID5 set can change is_dcbz32 */
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break;
case SPRN_GQR0:
case SPRN_GQR1:
case SPRN_GQR2:
case SPRN_GQR3:
case SPRN_GQR4:
case SPRN_GQR5:
case SPRN_GQR6:
case SPRN_GQR7:
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
break;
#ifdef CONFIG_PPC_BOOK3S_64
case SPRN_FSCR:
kvmppc_set_fscr(vcpu, spr_val);
break;
case SPRN_BESCR:
vcpu->arch.bescr = spr_val;
break;
case SPRN_EBBHR:
vcpu->arch.ebbhr = spr_val;
break;
case SPRN_EBBRR:
vcpu->arch.ebbrr = spr_val;
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case SPRN_TFHAR:
case SPRN_TEXASR:
case SPRN_TFIAR:
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
(sprn == SPRN_TFHAR))) {
/* it is illegal to mtspr() TM regs in
* other than non-transactional state, with
* the exception of TFHAR in suspend state.
*/
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
emulated = EMULATE_AGAIN;
break;
}
tm_enable();
if (sprn == SPRN_TFHAR)
mtspr(SPRN_TFHAR, spr_val);
else if (sprn == SPRN_TEXASR)
mtspr(SPRN_TEXASR, spr_val);
else
mtspr(SPRN_TFIAR, spr_val);
tm_disable();
break;
#endif
#endif
case SPRN_ICTC:
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
case SPRN_L2CR:
case SPRN_DSCR:
case SPRN_MMCR0_GEKKO:
case SPRN_MMCR1_GEKKO:
case SPRN_PMC1_GEKKO:
case SPRN_PMC2_GEKKO:
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
case SPRN_DABR:
#ifdef CONFIG_PPC_BOOK3S_64
case SPRN_MMCRS:
case SPRN_MMCRA:
case SPRN_MMCR0:
case SPRN_MMCR1:
case SPRN_MMCR2:
case SPRN_UMMCR2:
case SPRN_UAMOR:
case SPRN_IAMR:
case SPRN_AMR:
#endif
break;
unprivileged:
default:
pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
if (sprn & 0x10) {
if (kvmppc_get_msr(vcpu) & MSR_PR) {
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else {
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
}
break;
}
return emulated;
}
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IBAT0U ... SPRN_IBAT3L:
case SPRN_IBAT4U ... SPRN_IBAT7L:
case SPRN_DBAT0U ... SPRN_DBAT3L:
case SPRN_DBAT4U ... SPRN_DBAT7L:
{
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2)
*spr_val = bat->raw >> 32;
else
*spr_val = bat->raw;
break;
}
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged;
*spr_val = to_book3s(vcpu)->sdr1;
break;
case SPRN_DSISR:
*spr_val = kvmppc_get_dsisr(vcpu);
break;
case SPRN_DAR:
*spr_val = kvmppc_get_dar(vcpu);
break;
case SPRN_HIOR:
*spr_val = to_book3s(vcpu)->hior;
break;
case SPRN_HID0:
*spr_val = to_book3s(vcpu)->hid[0];
break;
case SPRN_HID1:
*spr_val = to_book3s(vcpu)->hid[1];
break;
case SPRN_HID2_750FX:
case SPRN_HID2_GEKKO:
*spr_val = to_book3s(vcpu)->hid[2];
break;
case SPRN_HID4:
case SPRN_HID4_GEKKO:
*spr_val = to_book3s(vcpu)->hid[4];
break;
case SPRN_HID5:
*spr_val = to_book3s(vcpu)->hid[5];
break;
case SPRN_CFAR:
case SPRN_DSCR:
*spr_val = 0;
break;
case SPRN_PURR:
/*
* On exit we would have updated purr
*/
*spr_val = vcpu->arch.purr;
break;
case SPRN_SPURR:
/*
* On exit we would have updated spurr
*/
*spr_val = vcpu->arch.spurr;
break;
case SPRN_VTB:
*spr_val = to_book3s(vcpu)->vtb;
break;
case SPRN_IC:
*spr_val = vcpu->arch.ic;
break;
case SPRN_GQR0:
case SPRN_GQR1:
case SPRN_GQR2:
case SPRN_GQR3:
case SPRN_GQR4:
case SPRN_GQR5:
case SPRN_GQR6:
case SPRN_GQR7:
*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
break;
#ifdef CONFIG_PPC_BOOK3S_64
case SPRN_FSCR:
*spr_val = vcpu->arch.fscr;
break;
case SPRN_BESCR:
*spr_val = vcpu->arch.bescr;
break;
case SPRN_EBBHR:
*spr_val = vcpu->arch.ebbhr;
break;
case SPRN_EBBRR:
*spr_val = vcpu->arch.ebbrr;
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case SPRN_TFHAR:
case SPRN_TEXASR:
case SPRN_TFIAR:
if (!cpu_has_feature(CPU_FTR_TM))
break;
if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
emulated = EMULATE_AGAIN;
break;
}
tm_enable();
if (sprn == SPRN_TFHAR)
*spr_val = mfspr(SPRN_TFHAR);
else if (sprn == SPRN_TEXASR)
*spr_val = mfspr(SPRN_TEXASR);
else if (sprn == SPRN_TFIAR)
*spr_val = mfspr(SPRN_TFIAR);
tm_disable();
break;
#endif
#endif
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
case SPRN_L2CR:
case SPRN_MMCR0_GEKKO:
case SPRN_MMCR1_GEKKO:
case SPRN_PMC1_GEKKO:
case SPRN_PMC2_GEKKO:
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
case SPRN_DABR:
#ifdef CONFIG_PPC_BOOK3S_64
case SPRN_MMCRS:
case SPRN_MMCRA:
case SPRN_MMCR0:
case SPRN_MMCR1:
case SPRN_MMCR2:
case SPRN_UMMCR2:
case SPRN_TIR:
case SPRN_UAMOR:
case SPRN_IAMR:
case SPRN_AMR:
#endif
*spr_val = 0;
break;
default:
unprivileged:
pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
if (sprn & 0x10) {
if (kvmppc_get_msr(vcpu) & MSR_PR) {
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else {
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
sprn == 4 || sprn == 5 || sprn == 6) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
}
break;
}
return emulated;
}
u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
{
return make_dsisr(inst);
}
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Linux's fix_alignment() assumes that DAR is valid, so can we
*/
return vcpu->arch.fault_dar;
#else
ulong dar = 0;
ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
switch (get_op(inst)) {
case OP_LFS:
case OP_LFD:
case OP_STFD:
case OP_STFS:
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst);
break;
case 31:
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += kvmppc_get_gpr(vcpu, rb);
break;
default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
break;
}
return dar;
#endif
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <[email protected]>
*
* CPER is the format used to describe platform hardware error by
* various tables, such as ERST, BERT and HEST etc.
*
* For more information about CPER, please refer to Appendix N of UEFI
* Specification version 2.4.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/printk.h>
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
#include "cper_cxl.h"
/*
* CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from
* multiple boot may co-exist in ERST.
*/
u64 cper_next_record_id(void)
{
static atomic64_t seq;
if (!atomic64_read(&seq)) {
time64_t time = ktime_get_real_seconds();
/*
* This code is unlikely to still be needed in year 2106,
* but just in case, let's use a few more bits for timestamps
* after y2038 to be sure they keep increasing monotonically
* for the next few hundred years...
*/
if (time < 0x80000000)
atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
else
atomic64_set(&seq, 0x8000000000000000ull |
ktime_get_real_seconds() << 24);
}
return atomic64_inc_return(&seq);
}
EXPORT_SYMBOL_GPL(cper_next_record_id);
static const char * const severity_strs[] = {
"recoverable",
"fatal",
"corrected",
"info",
};
const char *cper_severity_str(unsigned int severity)
{
return severity < ARRAY_SIZE(severity_strs) ?
severity_strs[severity] : "unknown";
}
EXPORT_SYMBOL_GPL(cper_severity_str);
/*
* cper_print_bits - print strings for set bits
* @pfx: prefix for each line, including log level and prefix string
* @bits: bit mask
* @strs: string array, indexed by bit position
* @strs_size: size of the string array: @strs
*
* For each set bit in @bits, print the corresponding string in @strs.
* If the output length is longer than 80, multiple line will be
* printed, with @pfx is printed at the beginning of each line.
*/
void cper_print_bits(const char *pfx, unsigned int bits,
const char * const strs[], unsigned int strs_size)
{
int i, len = 0;
const char *str;
char buf[84];
for (i = 0; i < strs_size; i++) {
if (!(bits & (1U << i)))
continue;
str = strs[i];
if (!str)
continue;
if (len && len + strlen(str) + 2 > 80) {
printk("%s\n", buf);
len = 0;
}
if (!len)
len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
else
len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
}
if (len)
printk("%s\n", buf);
}
static const char * const proc_type_strs[] = {
"IA32/X64",
"IA64",
"ARM",
};
static const char * const proc_isa_strs[] = {
"IA32",
"IA64",
"X64",
"ARM A32/T32",
"ARM A64",
};
const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
"micro-architectural error",
};
static const char * const proc_op_strs[] = {
"unknown or generic",
"data read",
"data write",
"instruction execution",
};
static const char * const proc_flag_strs[] = {
"restartable",
"precise IP",
"overflow",
"corrected",
};
static void cper_print_proc_generic(const char *pfx,
const struct cper_sec_proc_generic *proc)
{
if (proc->validation_bits & CPER_PROC_VALID_TYPE)
printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
proc->proc_type < ARRAY_SIZE(proc_type_strs) ?
proc_type_strs[proc->proc_type] : "unknown");
if (proc->validation_bits & CPER_PROC_VALID_ISA)
printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ?
proc_isa_strs[proc->proc_isa] : "unknown");
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
cper_proc_error_type_strs,
ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
proc->operation < ARRAY_SIZE(proc_op_strs) ?
proc_op_strs[proc->operation] : "unknown");
if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
printk("%s""flags: 0x%02x\n", pfx, proc->flags);
cper_print_bits(pfx, proc->flags, proc_flag_strs,
ARRAY_SIZE(proc_flag_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
printk("%s""level: %d\n", pfx, proc->level);
if (proc->validation_bits & CPER_PROC_VALID_VERSION)
printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
if (proc->validation_bits & CPER_PROC_VALID_ID)
printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
printk("%s""target_address: 0x%016llx\n",
pfx, proc->target_addr);
if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
printk("%s""requestor_id: 0x%016llx\n",
pfx, proc->requestor_id);
if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
printk("%s""responder_id: 0x%016llx\n",
pfx, proc->responder_id);
if (proc->validation_bits & CPER_PROC_VALID_IP)
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
"single-bit ECC",
"multi-bit ECC",
"single-symbol chipkill ECC",
"multi-symbol chipkill ECC",
"master abort",
"target abort",
"parity error",
"watchdog timeout",
"invalid address",
"mirror Broken",
"memory sparing",
"scrub corrected error",
"scrub uncorrected error",
"physical memory map-out event",
};
const char *cper_mem_err_type_str(unsigned int etype)
{
return etype < ARRAY_SIZE(mem_err_type_strs) ?
mem_err_type_strs[etype] : "unknown";
}
EXPORT_SYMBOL_GPL(cper_mem_err_type_str);
const char *cper_mem_err_status_str(u64 status)
{
switch ((status >> 8) & 0xff) {
case 1: return "Error detected internal to the component";
case 4: return "Storage error in DRAM memory";
case 5: return "Storage error in TLB";
case 6: return "Storage error in cache";
case 7: return "Error in one or more functional units";
case 8: return "Component failed self test";
case 9: return "Overflow or undervalue of internal queue";
case 16: return "Error detected in the bus";
case 17: return "Virtual address not found on IO-TLB or IO-PDIR";
case 18: return "Improper access error";
case 19: return "Access to a memory address which is not mapped to any component";
case 20: return "Loss of Lockstep";
case 21: return "Response not associated with a request";
case 22: return "Bus parity error - must also set the A, C, or D Bits";
case 23: return "Detection of a protocol error";
case 24: return "Detection of a PATH_ERROR";
case 25: return "Bus operation timeout";
case 26: return "A read was issued to data that has been poisoned";
default: return "Reserved";
}
}
EXPORT_SYMBOL_GPL(cper_mem_err_status_str);
int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
{
u32 len, n;
if (!msg)
return 0;
n = 0;
len = CPER_REC_LEN;
if (mem->validation_bits & CPER_MEM_VALID_NODE)
n += scnprintf(msg + n, len - n, "node:%d ", mem->node);
if (mem->validation_bits & CPER_MEM_VALID_CARD)
n += scnprintf(msg + n, len - n, "card:%d ", mem->card);
if (mem->validation_bits & CPER_MEM_VALID_MODULE)
n += scnprintf(msg + n, len - n, "module:%d ", mem->module);
if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
n += scnprintf(msg + n, len - n, "rank:%d ", mem->rank);
if (mem->validation_bits & CPER_MEM_VALID_BANK)
n += scnprintf(msg + n, len - n, "bank:%d ", mem->bank);
if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP)
n += scnprintf(msg + n, len - n, "bank_group:%d ",
mem->bank >> CPER_MEM_BANK_GROUP_SHIFT);
if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
n += scnprintf(msg + n, len - n, "bank_address:%d ",
mem->bank & CPER_MEM_BANK_ADDRESS_MASK);
if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
n += scnprintf(msg + n, len - n, "device:%d ", mem->device);
if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
u32 row = mem->row;
row |= cper_get_mem_extension(mem->validation_bits, mem->extended);
n += scnprintf(msg + n, len - n, "row:%d ", row);
}
if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
n += scnprintf(msg + n, len - n, "column:%d ", mem->column);
if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
n += scnprintf(msg + n, len - n, "bit_position:%d ",
mem->bit_pos);
if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
n += scnprintf(msg + n, len - n, "requestor_id:0x%016llx ",
mem->requestor_id);
if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
n += scnprintf(msg + n, len - n, "responder_id:0x%016llx ",
mem->responder_id);
if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
n += scnprintf(msg + n, len - n, "target_id:0x%016llx ",
mem->target_id);
if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
n += scnprintf(msg + n, len - n, "chip_id:%d ",
mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
return n;
}
EXPORT_SYMBOL_GPL(cper_mem_err_location);
int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
{
u32 len, n;
const char *bank = NULL, *device = NULL;
if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
return 0;
len = CPER_REC_LEN;
dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
if (bank && device)
n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
else
n = snprintf(msg, len,
"DIMM location: not present. DMI handle: 0x%.4x ",
mem->mem_dev_handle);
return n;
}
EXPORT_SYMBOL_GPL(cper_dimm_err_location);
void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
struct cper_mem_err_compact *cmem)
{
cmem->validation_bits = mem->validation_bits;
cmem->node = mem->node;
cmem->card = mem->card;
cmem->module = mem->module;
cmem->bank = mem->bank;
cmem->device = mem->device;
cmem->row = mem->row;
cmem->column = mem->column;
cmem->bit_pos = mem->bit_pos;
cmem->requestor_id = mem->requestor_id;
cmem->responder_id = mem->responder_id;
cmem->target_id = mem->target_id;
cmem->extended = mem->extended;
cmem->rank = mem->rank;
cmem->mem_array_handle = mem->mem_array_handle;
cmem->mem_dev_handle = mem->mem_dev_handle;
}
EXPORT_SYMBOL_GPL(cper_mem_err_pack);
const char *cper_mem_err_unpack(struct trace_seq *p,
struct cper_mem_err_compact *cmem)
{
const char *ret = trace_seq_buffer_ptr(p);
char rcd_decode_str[CPER_REC_LEN];
if (cper_mem_err_location(cmem, rcd_decode_str))
trace_seq_printf(p, "%s", rcd_decode_str);
if (cper_dimm_err_location(cmem, rcd_decode_str))
trace_seq_printf(p, "%s", rcd_decode_str);
trace_seq_putc(p, '\0');
return ret;
}
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
int len)
{
struct cper_mem_err_compact cmem;
char rcd_decode_str[CPER_REC_LEN];
/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
if (len == sizeof(struct cper_sec_mem_err_old) &&
(mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
pr_err(FW_WARN "valid bits set for fields beyond structure\n");
return;
}
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
printk("%s error_status: %s (0x%016llx)\n",
pfx, cper_mem_err_status_str(mem->error_status),
mem->error_status);
if (mem->validation_bits & CPER_MEM_VALID_PA)
printk("%s""physical_address: 0x%016llx\n",
pfx, mem->physical_addr);
if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
printk("%s""physical_address_mask: 0x%016llx\n",
pfx, mem->physical_addr_mask);
cper_mem_err_pack(mem, &cmem);
if (cper_mem_err_location(&cmem, rcd_decode_str))
printk("%s%s\n", pfx, rcd_decode_str);
if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
u8 etype = mem->error_type;
printk("%s""error_type: %d, %s\n", pfx, etype,
cper_mem_err_type_str(etype));
}
if (cper_dimm_err_location(&cmem, rcd_decode_str))
printk("%s%s\n", pfx, rcd_decode_str);
}
static const char * const pcie_port_type_strs[] = {
"PCIe end point",
"legacy PCI end point",
"unknown",
"unknown",
"root port",
"upstream switch port",
"downstream switch port",
"PCIe to PCI/PCI-X bridge",
"PCI/PCI-X to PCIe bridge",
"root complex integrated endpoint device",
"root complex event collector",
};
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
const struct acpi_hest_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ?
pcie_port_type_strs[pcie->port_type] : "unknown");
if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
printk("%s""version: %d.%d\n", pfx,
pcie->version.major, pcie->version.minor);
if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
pcie->command, pcie->status);
if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
const __u8 *p;
printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
pcie->device_id.segment, pcie->device_id.bus,
pcie->device_id.device, pcie->device_id.function);
printk("%s""slot: %d\n", pfx,
pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
printk("%s""secondary_bus: 0x%02x\n", pfx,
pcie->device_id.secondary_bus);
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
pcie->serial_number.lower, pcie->serial_number.upper);
if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
/*
* Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time).
*
* Fatal errors call __ghes_panic() before AER handler prints this.
*/
if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
struct aer_capability_regs *aer;
aer = (struct aer_capability_regs *)pcie->aer_info;
printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n",
pfx, aer->cor_status, aer->cor_mask);
printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
pfx, aer->uncor_status, aer->uncor_mask);
printk("%saer_uncor_severity: 0x%08x\n",
pfx, aer->uncor_severity);
printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
aer->header_log.dw[0], aer->header_log.dw[1],
aer->header_log.dw[2], aer->header_log.dw[3]);
}
}
static const char * const fw_err_rec_type_strs[] = {
"IPF SAL Error Record",
"SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
"SOC Firmware Error Record Type2",
};
static void cper_print_fw_err(const char *pfx,
struct acpi_hest_generic_data *gdata,
const struct cper_sec_fw_err_rec_ref *fw_err)
{
void *buf = acpi_hest_get_payload(gdata);
u32 offset, length = gdata->error_data_length;
printk("%s""Firmware Error Record Type: %s\n", pfx,
fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
fw_err_rec_type_strs[fw_err->record_type] : "unknown");
printk("%s""Revision: %d\n", pfx, fw_err->revision);
/* Record Type based on UEFI 2.7 */
if (fw_err->revision == 0) {
printk("%s""Record Identifier: %08llx\n", pfx,
fw_err->record_identifier);
} else if (fw_err->revision == 2) {
printk("%s""Record Identifier: %pUl\n", pfx,
&fw_err->record_identifier_guid);
}
/*
* The FW error record may contain trailing data beyond the
* structure defined by the specification. As the fields
* defined (and hence the offset of any trailing data) vary
* with the revision, set the offset to account for this
* variation.
*/
if (fw_err->revision == 0) {
/* record_identifier_guid not defined */
offset = offsetof(struct cper_sec_fw_err_rec_ref,
record_identifier_guid);
} else if (fw_err->revision == 1) {
/* record_identifier not defined */
offset = offsetof(struct cper_sec_fw_err_rec_ref,
record_identifier);
} else {
offset = sizeof(*fw_err);
}
buf += offset;
length -= offset;
print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
}
static void cper_print_tstamp(const char *pfx,
struct acpi_hest_generic_data_v300 *gdata)
{
__u8 hour, min, sec, day, mon, year, century, *timestamp;
if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) {
timestamp = (__u8 *)&(gdata->time_stamp);
sec = bcd2bin(timestamp[0]);
min = bcd2bin(timestamp[1]);
hour = bcd2bin(timestamp[2]);
day = bcd2bin(timestamp[4]);
mon = bcd2bin(timestamp[5]);
year = bcd2bin(timestamp[6]);
century = bcd2bin(timestamp[7]);
printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx,
(timestamp[3] & 0x1 ? "precise " : "imprecise "),
century, year, mon, day, hour, min, sec);
}
}
struct ignore_section {
guid_t guid;
const char *name;
};
static const struct ignore_section ignore_sections[] = {
{ .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
{ .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
{ .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
};
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
{
guid_t *sec_type = (guid_t *)gdata->section_type;
__u16 severity;
char newpfx[64];
if (acpi_hest_get_version(gdata) >= 3)
cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata);
severity = gdata->error_severity;
printk("%s""Error %d, type: %s\n", pfx, sec_no,
cper_severity_str(severity));
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
if (guid_equal(sec_type, &ignore_sections[i].guid)) {
printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
return;
}
}
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: general processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*proc_err))
cper_print_proc_generic(newpfx, proc_err);
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: memory error\n", newpfx);
if (gdata->error_data_length >=
sizeof(struct cper_sec_mem_err_old))
cper_print_mem(newpfx, mem_err,
gdata->error_data_length);
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
printk("%s""section_type: PCIe error\n", newpfx);
if (gdata->error_data_length >= sizeof(*pcie))
cper_print_pcie(newpfx, pcie, gdata);
else
goto err_section_too_small;
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
} else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*arm_err))
cper_print_proc_arm(newpfx, arm_err);
else
goto err_section_too_small;
#endif
#if defined(CONFIG_UEFI_CPER_X86)
} else if (guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
struct cper_sec_proc_ia *ia_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: IA32/X64 processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*ia_err))
cper_print_proc_ia(newpfx, ia_err);
else
goto err_section_too_small;
#endif
} else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: Firmware Error Record Reference\n",
newpfx);
/* The minimal FW Error Record contains 16 bytes */
if (gdata->error_data_length >= SZ_16)
cper_print_fw_err(newpfx, gdata, fw_err);
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: CXL Protocol Error\n", newpfx);
if (gdata->error_data_length >= sizeof(*prot_err))
cper_print_prot_err(newpfx, prot_err);
else
goto err_section_too_small;
} else {
const void *err = acpi_hest_get_payload(gdata);
printk("%ssection type: unknown, %pUl\n", newpfx, sec_type);
printk("%ssection length: %#x\n", newpfx,
gdata->error_data_length);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err,
gdata->error_data_length, true);
}
return;
err_section_too_small:
pr_err(FW_WARN "error section length is too small\n");
}
void cper_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
int sec_no = 0;
char newpfx[64];
__u16 severity;
severity = estatus->error_severity;
if (severity == CPER_SEV_CORRECTED)
printk("%s%s\n", pfx,
"It has been corrected by h/w "
"and requires no further action");
printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
apei_estatus_for_each_section(estatus, gdata) {
cper_estatus_print_section(newpfx, gdata, sec_no);
sec_no++;
}
}
EXPORT_SYMBOL_GPL(cper_estatus_print);
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
{
if (estatus->data_length &&
estatus->data_length < sizeof(struct acpi_hest_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(cper_estatus_check_header);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
unsigned int data_len, record_size;
int rc;
rc = cper_estatus_check_header(estatus);
if (rc)
return rc;
data_len = estatus->data_length;
apei_estatus_for_each_section(estatus, gdata) {
if (acpi_hest_get_size(gdata) > data_len)
return -EINVAL;
record_size = acpi_hest_get_record_size(gdata);
if (record_size > data_len)
return -EINVAL;
data_len -= record_size;
}
if (data_len)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(cper_estatus_check);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2013 John Crispin <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/mach-ralink/ralink_regs.h>
#define REG_ILL_ACC_ADDR 0x10
#define REG_ILL_ACC_TYPE 0x14
#define ILL_INT_STATUS BIT(31)
#define ILL_ACC_WRITE BIT(30)
#define ILL_ACC_LEN_M 0xff
#define ILL_ACC_OFF_M 0xf
#define ILL_ACC_OFF_S 16
#define ILL_ACC_ID_M 0x7
#define ILL_ACC_ID_S 8
#define DRV_NAME "ill_acc"
static const char * const ill_acc_ids[] = {
"cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb",
};
static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
{
struct device *dev = (struct device *) _priv;
u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR);
u32 type = rt_memc_r32(REG_ILL_ACC_TYPE);
dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n",
(type & ILL_ACC_WRITE) ? ("write") : ("read"),
ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M],
addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
type & ILL_ACC_LEN_M);
rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
return IRQ_HANDLED;
}
static int __init ill_acc_of_setup(void)
{
struct platform_device *pdev;
struct device_node *np;
int irq;
/* somehow this driver breaks on RT5350 */
if (of_machine_is_compatible("ralink,rt5350-soc"))
return -EINVAL;
np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc");
if (!np)
return -EINVAL;
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("%pOFn: failed to lookup pdev\n", np);
of_node_put(np);
return -EINVAL;
}
irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if (!irq) {
dev_err(&pdev->dev, "failed to get irq\n");
put_device(&pdev->dev);
return -EINVAL;
}
if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
dev_err(&pdev->dev, "failed to request irq\n");
put_device(&pdev->dev);
return -EINVAL;
}
rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
dev_info(&pdev->dev, "irq registered\n");
return 0;
}
arch_initcall(ill_acc_of_setup);
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_global_func1.skel.h"
#include "test_global_func2.skel.h"
#include "test_global_func3.skel.h"
#include "test_global_func4.skel.h"
#include "test_global_func5.skel.h"
#include "test_global_func6.skel.h"
#include "test_global_func7.skel.h"
#include "test_global_func8.skel.h"
#include "test_global_func9.skel.h"
#include "test_global_func10.skel.h"
#include "test_global_func11.skel.h"
#include "test_global_func12.skel.h"
#include "test_global_func13.skel.h"
#include "test_global_func14.skel.h"
#include "test_global_func15.skel.h"
#include "test_global_func16.skel.h"
#include "test_global_func17.skel.h"
#include "test_global_func_ctx_args.skel.h"
#include "bpf/libbpf_internal.h"
#include "btf_helpers.h"
static void check_ctx_arg_type(const struct btf *btf, const struct btf_param *p)
{
const struct btf_type *t;
const char *s;
t = btf__type_by_id(btf, p->type);
if (!ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_t"))
return;
s = btf_type_raw_dump(btf, t->type);
if (!ASSERT_HAS_SUBSTR(s, "STRUCT 'bpf_perf_event_data' size=0 vlen=0",
"ctx_struct_t"))
return;
}
static void subtest_ctx_arg_rewrite(void)
{
struct test_global_func_ctx_args *skel = NULL;
struct bpf_prog_info info;
char func_info_buf[1024] __attribute__((aligned(8)));
struct bpf_func_info_min *rec;
struct btf *btf = NULL;
__u32 info_len = sizeof(info);
int err, fd, i;
struct btf *kern_btf = NULL;
kern_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(kern_btf, "kern_btf_load"))
return;
/* simple detection of kernel native arg:ctx tag support */
if (btf__find_by_name_kind(kern_btf, "bpf_subprog_arg_info", BTF_KIND_STRUCT) > 0) {
test__skip();
btf__free(kern_btf);
return;
}
btf__free(kern_btf);
skel = test_global_func_ctx_args__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
bpf_program__set_autoload(skel->progs.arg_tag_ctx_perf, true);
err = test_global_func_ctx_args__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
memset(&info, 0, sizeof(info));
info.func_info = ptr_to_u64(&func_info_buf);
info.nr_func_info = 3;
info.func_info_rec_size = sizeof(struct bpf_func_info_min);
fd = bpf_program__fd(skel->progs.arg_tag_ctx_perf);
err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (!ASSERT_OK(err, "prog_info"))
goto out;
if (!ASSERT_EQ(info.nr_func_info, 3, "nr_func_info"))
goto out;
btf = btf__load_from_kernel_by_id(info.btf_id);
if (!ASSERT_OK_PTR(btf, "obj_kern_btf"))
goto out;
rec = (struct bpf_func_info_min *)func_info_buf;
for (i = 0; i < info.nr_func_info; i++, rec = (void *)rec + info.func_info_rec_size) {
const struct btf_type *fn_t, *proto_t;
const char *name;
if (rec->insn_off == 0)
continue; /* main prog, skip */
fn_t = btf__type_by_id(btf, rec->type_id);
if (!ASSERT_OK_PTR(fn_t, "fn_type"))
goto out;
if (!ASSERT_EQ(btf_kind(fn_t), BTF_KIND_FUNC, "fn_type_kind"))
goto out;
proto_t = btf__type_by_id(btf, fn_t->type);
if (!ASSERT_OK_PTR(proto_t, "proto_type"))
goto out;
name = btf__name_by_offset(btf, fn_t->name_off);
if (strcmp(name, "subprog_ctx_tag") == 0) {
/* int subprog_ctx_tag(void *ctx __arg_ctx) */
if (!ASSERT_EQ(btf_vlen(proto_t), 1, "arg_cnt"))
goto out;
/* arg 0 is PTR -> STRUCT bpf_perf_event_data */
check_ctx_arg_type(btf, &btf_params(proto_t)[0]);
} else if (strcmp(name, "subprog_multi_ctx_tags") == 0) {
/* int subprog_multi_ctx_tags(void *ctx1 __arg_ctx,
* struct my_struct *mem,
* void *ctx2 __arg_ctx)
*/
if (!ASSERT_EQ(btf_vlen(proto_t), 3, "arg_cnt"))
goto out;
/* arg 0 is PTR -> STRUCT bpf_perf_event_data */
check_ctx_arg_type(btf, &btf_params(proto_t)[0]);
/* arg 2 is PTR -> STRUCT bpf_perf_event_data */
check_ctx_arg_type(btf, &btf_params(proto_t)[2]);
} else {
ASSERT_FAIL("unexpected subprog %s", name);
goto out;
}
}
out:
btf__free(btf);
test_global_func_ctx_args__destroy(skel);
}
void test_test_global_funcs(void)
{
RUN_TESTS(test_global_func1);
RUN_TESTS(test_global_func2);
RUN_TESTS(test_global_func3);
RUN_TESTS(test_global_func4);
RUN_TESTS(test_global_func5);
RUN_TESTS(test_global_func6);
RUN_TESTS(test_global_func7);
RUN_TESTS(test_global_func8);
RUN_TESTS(test_global_func9);
RUN_TESTS(test_global_func10);
RUN_TESTS(test_global_func11);
RUN_TESTS(test_global_func12);
RUN_TESTS(test_global_func13);
RUN_TESTS(test_global_func14);
RUN_TESTS(test_global_func15);
RUN_TESTS(test_global_func16);
RUN_TESTS(test_global_func17);
RUN_TESTS(test_global_func_ctx_args);
if (test__start_subtest("ctx_arg_rewrite"))
subtest_ctx_arg_rewrite();
}
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/aperture.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "loongson_module.h"
#include "lsdc_drv.h"
#include "lsdc_gem.h"
#include "lsdc_ttm.h"
#define DRIVER_AUTHOR "Sui Jingfeng <[email protected]>"
#define DRIVER_NAME "loongson"
#define DRIVER_DESC "drm driver for loongson graphics"
#define DRIVER_DATE "20220701"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
DEFINE_DRM_GEM_FOPS(lsdc_gem_fops);
static const struct drm_driver lsdc_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &lsdc_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.debugfs_init = lsdc_debugfs_init,
.dumb_create = lsdc_dumb_create,
.dumb_map_offset = lsdc_dumb_map_offset,
.gem_prime_import_sg_table = lsdc_prime_import_sg_table,
DRM_FBDEV_TTM_DRIVER_OPS,
};
static const struct drm_mode_config_funcs lsdc_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
/* Display related */
static int lsdc_modeset_init(struct lsdc_device *ldev,
unsigned int num_crtc,
const struct lsdc_kms_funcs *funcs,
bool has_vblank)
{
struct drm_device *ddev = &ldev->base;
struct lsdc_display_pipe *dispipe;
unsigned int i;
int ret;
for (i = 0; i < num_crtc; i++) {
dispipe = &ldev->dispipe[i];
/* We need an index before crtc is initialized */
dispipe->index = i;
ret = funcs->create_i2c(ddev, dispipe, i);
if (ret)
return ret;
}
for (i = 0; i < num_crtc; i++) {
struct i2c_adapter *ddc = NULL;
dispipe = &ldev->dispipe[i];
if (dispipe->li2c)
ddc = &dispipe->li2c->adapter;
ret = funcs->output_init(ddev, dispipe, ddc, i);
if (ret)
return ret;
ldev->num_output++;
}
for (i = 0; i < num_crtc; i++) {
dispipe = &ldev->dispipe[i];
ret = funcs->primary_plane_init(ddev, &dispipe->primary.base, i);
if (ret)
return ret;
ret = funcs->cursor_plane_init(ddev, &dispipe->cursor.base, i);
if (ret)
return ret;
ret = funcs->crtc_init(ddev, &dispipe->crtc.base,
&dispipe->primary.base,
&dispipe->cursor.base,
i, has_vblank);
if (ret)
return ret;
}
drm_info(ddev, "Total %u outputs\n", ldev->num_output);
return 0;
}
static const struct drm_mode_config_helper_funcs lsdc_mode_config_helper_funcs = {
.atomic_commit_tail = drm_atomic_helper_commit_tail,
};
static int lsdc_mode_config_init(struct drm_device *ddev,
const struct lsdc_desc *descp)
{
int ret;
ret = drmm_mode_config_init(ddev);
if (ret)
return ret;
ddev->mode_config.funcs = &lsdc_mode_config_funcs;
ddev->mode_config.min_width = 1;
ddev->mode_config.min_height = 1;
ddev->mode_config.max_width = descp->max_width * LSDC_NUM_CRTC;
ddev->mode_config.max_height = descp->max_height * LSDC_NUM_CRTC;
ddev->mode_config.preferred_depth = 24;
ddev->mode_config.prefer_shadow = 1;
ddev->mode_config.cursor_width = descp->hw_cursor_h;
ddev->mode_config.cursor_height = descp->hw_cursor_h;
ddev->mode_config.helper_private = &lsdc_mode_config_helper_funcs;
if (descp->has_vblank_counter)
ddev->max_vblank_count = 0xffffffff;
return ret;
}
/*
* The GPU and display controller in the LS7A1000/LS7A2000/LS2K2000 are
* separated PCIE devices. They are two devices, not one. Bar 2 of the GPU
* device contains the base address and size of the VRAM, both the GPU and
* the DC could access the on-board VRAM.
*/
static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
struct pci_dev *pdev_dc,
const struct lsdc_desc *descp)
{
struct drm_device *ddev = &ldev->base;
struct pci_dev *pdev_gpu;
resource_size_t base, size;
/*
* The GPU has 00:06.0 as its BDF, while the DC has 00:06.1
* This is true for the LS7A1000, LS7A2000 and LS2K2000.
*/
pdev_gpu = pci_get_domain_bus_and_slot(pci_domain_nr(pdev_dc->bus),
pdev_dc->bus->number,
PCI_DEVFN(6, 0));
if (!pdev_gpu) {
drm_err(ddev, "No GPU device, then no VRAM\n");
return -ENODEV;
}
base = pci_resource_start(pdev_gpu, 2);
size = pci_resource_len(pdev_gpu, 2);
ldev->vram_base = base;
ldev->vram_size = size;
ldev->gpu = pdev_gpu;
drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
(u64)base, (u32)(size >> 20));
return (size > SZ_1M) ? 0 : -ENODEV;
}
static struct lsdc_device *
lsdc_create_device(struct pci_dev *pdev,
const struct lsdc_desc *descp,
const struct drm_driver *driver)
{
struct lsdc_device *ldev;
struct drm_device *ddev;
int ret;
ldev = devm_drm_dev_alloc(&pdev->dev, driver, struct lsdc_device, base);
if (IS_ERR(ldev))
return ldev;
ldev->dc = pdev;
ldev->descp = descp;
ddev = &ldev->base;
loongson_gfxpll_create(ddev, &ldev->gfxpll);
ret = lsdc_get_dedicated_vram(ldev, pdev, descp);
if (ret) {
drm_err(ddev, "Init VRAM failed: %d\n", ret);
return ERR_PTR(ret);
}
ret = aperture_remove_conflicting_devices(ldev->vram_base,
ldev->vram_size,
driver->name);
if (ret) {
drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret);
return ERR_PTR(ret);
}
ret = lsdc_ttm_init(ldev);
if (ret) {
drm_err(ddev, "Memory manager init failed: %d\n", ret);
return ERR_PTR(ret);
}
lsdc_gem_init(ddev);
/* Bar 0 of the DC device contains the MMIO register's base address */
ldev->reg_base = pcim_iomap(pdev, 0, 0);
if (!ldev->reg_base)
return ERR_PTR(-ENODEV);
spin_lock_init(&ldev->reglock);
ret = lsdc_mode_config_init(ddev, descp);
if (ret)
return ERR_PTR(ret);
ret = lsdc_modeset_init(ldev, descp->num_of_crtc, descp->funcs,
loongson_vblank);
if (ret)
return ERR_PTR(ret);
drm_mode_config_reset(ddev);
return ldev;
}
/* For multiple GPU driver instance co-exixt in the system */
static unsigned int lsdc_vga_set_decode(struct pci_dev *pdev, bool state)
{
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct lsdc_desc *descp;
struct drm_device *ddev;
struct lsdc_device *ldev;
int ret;
descp = lsdc_device_probe(pdev, ent->driver_data);
if (IS_ERR_OR_NULL(descp))
return -ENODEV;
pci_set_master(pdev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
dev_info(&pdev->dev, "Found %s, revision: %u",
to_loongson_gfx(descp)->model, pdev->revision);
ldev = lsdc_create_device(pdev, descp, &lsdc_drm_driver);
if (IS_ERR(ldev))
return PTR_ERR(ldev);
ddev = &ldev->base;
pci_set_drvdata(pdev, ddev);
vga_client_register(pdev, lsdc_vga_set_decode);
drm_kms_helper_poll_init(ddev);
if (loongson_vblank) {
ret = drm_vblank_init(ddev, descp->num_of_crtc);
if (ret)
return ret;
ret = devm_request_irq(&pdev->dev, pdev->irq,
descp->funcs->irq_handler,
IRQF_SHARED,
dev_name(&pdev->dev), ddev);
if (ret) {
drm_err(ddev, "Failed to register interrupt: %d\n", ret);
return ret;
}
drm_info(ddev, "registered irq: %u\n", pdev->irq);
}
ret = drm_dev_register(ddev, 0);
if (ret)
return ret;
drm_client_setup(ddev, NULL);
return 0;
}
static void lsdc_pci_remove(struct pci_dev *pdev)
{
struct drm_device *ddev = pci_get_drvdata(pdev);
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
}
static void lsdc_pci_shutdown(struct pci_dev *pdev)
{
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
}
static int lsdc_drm_freeze(struct drm_device *ddev)
{
struct lsdc_device *ldev = to_lsdc(ddev);
struct lsdc_bo *lbo;
int ret;
/* unpin all of buffers in the VRAM */
mutex_lock(&ldev->gem.mutex);
list_for_each_entry(lbo, &ldev->gem.objects, list) {
struct ttm_buffer_object *tbo = &lbo->tbo;
struct ttm_resource *resource = tbo->resource;
unsigned int pin_count = tbo->pin_count;
drm_dbg(ddev, "bo[%p], size: %zuKiB, type: %s, pin count: %u\n",
lbo, lsdc_bo_size(lbo) >> 10,
lsdc_mem_type_to_str(resource->mem_type), pin_count);
if (!pin_count)
continue;
if (resource->mem_type == TTM_PL_VRAM) {
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {
drm_err(ddev, "bo reserve failed: %d\n", ret);
continue;
}
do {
lsdc_bo_unpin(lbo);
--pin_count;
} while (pin_count);
lsdc_bo_unreserve(lbo);
}
}
mutex_unlock(&ldev->gem.mutex);
lsdc_bo_evict_vram(ddev);
ret = drm_mode_config_helper_suspend(ddev);
if (unlikely(ret)) {
drm_err(ddev, "Freeze error: %d", ret);
return ret;
}
return 0;
}
static int lsdc_drm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return drm_mode_config_helper_resume(ddev);
}
static int lsdc_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return lsdc_drm_freeze(ddev);
}
static int lsdc_pm_thaw(struct device *dev)
{
return lsdc_drm_resume(dev);
}
static int lsdc_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int error;
error = lsdc_pm_freeze(dev);
if (error)
return error;
pci_save_state(pdev);
/* Shut down the device */
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int lsdc_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pcim_enable_device(pdev))
return -EIO;
return lsdc_pm_thaw(dev);
}
static const struct dev_pm_ops lsdc_pm_ops = {
.suspend = lsdc_pm_suspend,
.resume = lsdc_pm_resume,
.freeze = lsdc_pm_freeze,
.thaw = lsdc_pm_thaw,
.poweroff = lsdc_pm_freeze,
.restore = lsdc_pm_resume,
};
static const struct pci_device_id lsdc_pciid_list[] = {
{PCI_VDEVICE(LOONGSON, 0x7a06), CHIP_LS7A1000},
{PCI_VDEVICE(LOONGSON, 0x7a36), CHIP_LS7A2000},
{ }
};
struct pci_driver lsdc_pci_driver = {
.name = DRIVER_NAME,
.id_table = lsdc_pciid_list,
.probe = lsdc_pci_probe,
.remove = lsdc_pci_remove,
.shutdown = lsdc_pci_shutdown,
.driver.pm = &lsdc_pm_ops,
};
MODULE_DEVICE_TABLE(pci, lsdc_pciid_list);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
|
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2007 Nokia Corporation. All rights reserved.
* Copyright © 2004-2010 David Woodhouse <[email protected]>
*
* Created by Richard Purdie <[email protected]>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/lzo.h>
#include "compr.h"
static void *lzo_mem;
static void *lzo_compress_buf;
static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
static void free_workspace(void)
{
vfree(lzo_mem);
vfree(lzo_compress_buf);
}
static int __init alloc_workspace(void)
{
lzo_mem = vmalloc(LZO1X_MEM_COMPRESS);
lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
if (!lzo_mem || !lzo_compress_buf) {
free_workspace();
return -ENOMEM;
}
return 0;
}
static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
size_t compress_size;
int ret;
mutex_lock(&deflate_mutex);
ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
if (ret != LZO_E_OK)
goto fail;
if (compress_size > *dstlen)
goto fail;
memcpy(cpage_out, lzo_compress_buf, compress_size);
mutex_unlock(&deflate_mutex);
*dstlen = compress_size;
return 0;
fail:
mutex_unlock(&deflate_mutex);
return -1;
}
static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
size_t dl = destlen;
int ret;
ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl);
if (ret != LZO_E_OK || dl != destlen)
return -1;
return 0;
}
static struct jffs2_compressor jffs2_lzo_comp = {
.priority = JFFS2_LZO_PRIORITY,
.name = "lzo",
.compr = JFFS2_COMPR_LZO,
.compress = &jffs2_lzo_compress,
.decompress = &jffs2_lzo_decompress,
.disabled = 0,
};
int __init jffs2_lzo_init(void)
{
int ret;
ret = alloc_workspace();
if (ret < 0)
return ret;
ret = jffs2_register_compressor(&jffs2_lzo_comp);
if (ret)
free_workspace();
return ret;
}
void jffs2_lzo_exit(void)
{
jffs2_unregister_compressor(&jffs2_lzo_comp);
free_workspace();
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015 Cumulus Networks, Inc.
*/
#ifndef _NET_MPLS_IPTUNNEL_H
#define _NET_MPLS_IPTUNNEL_H 1
#include <linux/types.h>
#include <net/lwtunnel.h>
struct mpls_iptunnel_encap {
u8 labels;
u8 ttl_propagate;
u8 default_ttl;
u8 reserved1;
u32 label[];
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
{
return (struct mpls_iptunnel_encap *)lwtstate->data;
}
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DTS file for SPEAr320s SoC
*
* Copyright 2021 Herve Codina <[email protected]>
*/
/include/ "spear320.dtsi"
/ {
ahb {
apb {
gpiopinctrl: gpio@b3000000 {
/*
* The "RM0321 SPEAr320s address and map
* registers" document mentions interrupt 6
* (NPGIO_INTR) for the PL_GPIO interrupt.
*/
interrupts = <6>;
interrupt-parent = <&shirq>;
};
};
};
};
|
/*
* Copyright (C) 2013-2015 ARM Limited
* Author: Liviu Dudau <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* ARM HDLCD Driver
*/
#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/console.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct hdlcd_drm_private *hdlcd = arg;
unsigned long irq_status;
irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
#ifdef CONFIG_DEBUG_FS
if (irq_status & HDLCD_INTERRUPT_UNDERRUN)
atomic_inc(&hdlcd->buffer_underrun_count);
if (irq_status & HDLCD_INTERRUPT_DMA_END)
atomic_inc(&hdlcd->dma_end_count);
if (irq_status & HDLCD_INTERRUPT_BUS_ERROR)
atomic_inc(&hdlcd->bus_error_count);
if (irq_status & HDLCD_INTERRUPT_VSYNC)
atomic_inc(&hdlcd->vsync_count);
#endif
if (irq_status & HDLCD_INTERRUPT_VSYNC)
drm_crtc_handle_vblank(&hdlcd->crtc);
/* acknowledge interrupt(s) */
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
return IRQ_HANDLED;
}
static int hdlcd_irq_install(struct hdlcd_drm_private *hdlcd)
{
int ret;
/* Ensure interrupts are disabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
ret = request_irq(hdlcd->irq, hdlcd_irq, 0, "hdlcd", hdlcd);
if (ret)
return ret;
#ifdef CONFIG_DEBUG_FS
/* enable debug interrupts */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, HDLCD_DEBUG_INT_MASK);
#endif
return 0;
}
static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
{
/* disable all the interrupts that we might have enabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
free_irq(hdlcd->irq, hdlcd);
}
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
u32 version;
int ret;
hdlcd->clk = devm_clk_get(drm->dev, "pxlclk");
if (IS_ERR(hdlcd->clk))
return PTR_ERR(hdlcd->clk);
#ifdef CONFIG_DEBUG_FS
atomic_set(&hdlcd->buffer_underrun_count, 0);
atomic_set(&hdlcd->bus_error_count, 0);
atomic_set(&hdlcd->vsync_count, 0);
atomic_set(&hdlcd->dma_end_count, 0);
#endif
hdlcd->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdlcd->mmio)) {
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
hdlcd->mmio = NULL;
return ret;
}
version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
DRM_ERROR("unknown product id: 0x%x\n", version);
return -EINVAL;
}
DRM_INFO("found ARM HDLCD version r%dp%d\n",
(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
version & HDLCD_VERSION_MINOR_MASK);
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
if (ret)
goto setup_fail;
ret = hdlcd_setup_crtc(drm);
if (ret < 0) {
DRM_ERROR("failed to create crtc\n");
goto setup_fail;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto irq_fail;
hdlcd->irq = ret;
ret = hdlcd_irq_install(hdlcd);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
}
return 0;
irq_fail:
drm_crtc_cleanup(&hdlcd->crtc);
setup_fail:
of_reserved_mem_device_release(drm->dev);
return ret;
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int hdlcd_setup_mode_config(struct drm_device *drm)
{
int ret;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = HDLCD_MAX_XRES;
drm->mode_config.max_height = HDLCD_MAX_YRES;
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count));
seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count));
seq_printf(m, "vsync : %d\n", atomic_read(&hdlcd->vsync_count));
return 0;
}
static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
seq_printf(m, "hw : %lu\n", clkrate);
seq_printf(m, "mode: %lu\n", mode_clock);
return 0;
}
static struct drm_debugfs_info hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
};
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
.date = "20151021",
.major = 1,
.minor = 0,
};
static int hdlcd_drm_bind(struct device *dev)
{
struct drm_device *drm;
struct hdlcd_drm_private *hdlcd;
int ret;
hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base);
if (IS_ERR(hdlcd))
return PTR_ERR(hdlcd);
drm = &hdlcd->base;
dev_set_drvdata(dev, drm);
ret = hdlcd_setup_mode_config(drm);
if (ret)
goto err_free;
ret = hdlcd_load(drm, 0);
if (ret)
goto err_free;
/* Set the CRTC's port so that the encoder component can find it */
hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
goto err_unload;
}
ret = pm_runtime_set_active(dev);
if (ret)
goto err_pm_active;
pm_runtime_enable(dev);
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto err_vblank;
}
/*
* If EFI left us running, take over from simple framebuffer
* drivers. Read HDLCD_REG_COMMAND to see if we are enabled.
*/
if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
aperture_remove_all_conflicting_devices(hdlcd_driver.name);
}
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
#ifdef CONFIG_DEBUG_FS
drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list));
#endif
ret = drm_dev_register(drm, 0);
if (ret)
goto err_register;
drm_client_setup(drm, NULL);
return 0;
err_register:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
hdlcd_irq_uninstall(hdlcd);
of_reserved_mem_device_release(drm->dev);
err_free:
dev_set_drvdata(dev, NULL);
return ret;
}
static void hdlcd_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
pm_runtime_get_sync(dev);
drm_atomic_helper_shutdown(drm);
hdlcd_irq_uninstall(hdlcd);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
of_reserved_mem_device_release(dev);
dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops hdlcd_master_ops = {
.bind = hdlcd_drm_bind,
.unbind = hdlcd_drm_unbind,
};
static int compare_dev(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int hdlcd_probe(struct platform_device *pdev)
{
struct device_node *port;
struct component_match *match = NULL;
/* there is only one output port inside each device, find it */
port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
if (!port)
return -ENODEV;
drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
of_node_put(port);
return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
match);
}
static void hdlcd_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &hdlcd_master_ops);
}
static void hdlcd_shutdown(struct platform_device *pdev)
{
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
static const struct of_device_id hdlcd_of_match[] = {
{ .compatible = "arm,hdlcd" },
{},
};
MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_mode_config_helper_resume(drm);
return 0;
}
static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
static struct platform_driver hdlcd_platform_driver = {
.probe = hdlcd_probe,
.remove = hdlcd_remove,
.shutdown = hdlcd_shutdown,
.driver = {
.name = "hdlcd",
.pm = &hdlcd_pm_ops,
.of_match_table = hdlcd_of_match,
},
};
drm_module_platform_driver(hdlcd_platform_driver);
MODULE_AUTHOR("Liviu Dudau");
MODULE_DESCRIPTION("ARM HDLCD DRM driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
/*
* Tests for sockmap/sockhash holding kTLS sockets.
*/
#include <netinet/tcp.h>
#include "test_progs.h"
#define MAX_TEST_NAME 80
#define TCP_ULP 31
static int tcp_server(int family)
{
int err, s;
s = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
return -1;
err = listen(s, SOMAXCONN);
if (!ASSERT_OK(err, "listen"))
return -1;
return s;
}
static int disconnect(int fd)
{
struct sockaddr unspec = { AF_UNSPEC };
return connect(fd, &unspec, sizeof(unspec));
}
/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */
static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
{
struct sockaddr_storage addr = {0};
socklen_t len = sizeof(addr);
int err, cli, srv, zero = 0;
srv = tcp_server(family);
if (srv == -1)
return;
err = getsockname(srv, (struct sockaddr *)&addr, &len);
if (!ASSERT_OK(err, "getsockopt"))
goto close_srv;
cli = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(cli, 0, "socket"))
goto close_srv;
err = connect(cli, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "connect"))
goto close_cli;
err = bpf_map_update_elem(map, &zero, &cli, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_cli;
err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
goto close_cli;
err = bpf_map_delete_elem(map, &zero);
if (!ASSERT_OK(err, "bpf_map_delete_elem"))
goto close_cli;
err = disconnect(cli);
ASSERT_OK(err, "disconnect");
close_cli:
close(cli);
close_srv:
close(srv);
}
static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
{
struct sockaddr_storage addr = {};
socklen_t len = sizeof(addr);
struct sockaddr_in6 *v6;
struct sockaddr_in *v4;
int err, s, zero = 0;
switch (family) {
case AF_INET:
v4 = (struct sockaddr_in *)&addr;
v4->sin_family = AF_INET;
break;
case AF_INET6:
v6 = (struct sockaddr_in6 *)&addr;
v6->sin6_family = AF_INET6;
break;
default:
PRINT_FAIL("unsupported socket family %d", family);
return;
}
s = socket(family, SOCK_STREAM, 0);
if (!ASSERT_GE(s, 0, "socket"))
return;
err = bind(s, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "bind"))
goto close;
err = getsockname(s, (struct sockaddr *)&addr, &len);
if (!ASSERT_OK(err, "getsockname"))
goto close;
err = connect(s, (struct sockaddr *)&addr, len);
if (!ASSERT_OK(err, "connect"))
goto close;
/* save sk->sk_prot and set it to tls_prots */
err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
goto close;
/* sockmap update should not affect saved sk_prot */
err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
if (!ASSERT_ERR(err, "sockmap update elem"))
goto close;
/* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */
err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
ASSERT_OK(err, "setsockopt(TCP_NODELAY)");
close:
close(s);
}
static const char *fmt_test_name(const char *subtest_name, int family,
enum bpf_map_type map_type)
{
const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH";
const char *family_str = AF_INET ? "IPv4" : "IPv6";
static char test_name[MAX_TEST_NAME];
snprintf(test_name, MAX_TEST_NAME,
"sockmap_ktls %s %s %s",
subtest_name, family_str, map_type_str);
return test_name;
}
static void run_tests(int family, enum bpf_map_type map_type)
{
int map;
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
if (!ASSERT_GE(map, 0, "bpf_map_create"))
return;
if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type)))
test_sockmap_ktls_disconnect_after_delete(family, map);
if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type)))
test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
close(map);
}
void test_sockmap_ktls(void)
{
run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP);
run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH);
}
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 BayLibre SAS. All rights reserved.
*/
/dts-v1/;
#include "meson-sm1.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/meson-g12a-gpio.h>
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
compatible = "seirobotics,sei610", "amlogic,sm1";
model = "SEI Robotics SEI610";
aliases {
serial0 = &uart_AO;
ethernet0 = ðmac;
};
mono_dac: audio-codec-0 {
compatible = "maxim,max98357a";
#sound-dai-cells = <0>;
sound-name-prefix = "U16";
sdmode-gpios = <&gpio GPIOX_8 GPIO_ACTIVE_HIGH>;
};
dmics: audio-codec-1 {
#sound-dai-cells = <0>;
compatible = "dmic-codec";
num-channels = <2>;
wakeup-delay-ms = <50>;
sound-name-prefix = "MIC";
};
chosen {
stdout-path = "serial0:115200n8";
};
emmc_pwrseq: emmc-pwrseq {
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
};
gpio-keys {
compatible = "gpio-keys";
key-1 {
label = "A";
linux,code = <BTN_0>;
gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOH_6 IRQ_TYPE_EDGE_BOTH>;
};
key-2 {
label = "B";
linux,code = <BTN_1>;
gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOH_7 IRQ_TYPE_EDGE_BOTH>;
};
key-3 {
label = "C";
linux,code = <BTN_2>;
gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOAO_2 IRQ_TYPE_EDGE_BOTH>;
};
key-mic-mute {
label = "MicMute";
linux,code = <SW_MUTE_DEVICE>;
linux,input-type = <EV_SW>;
gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOE_2 IRQ_TYPE_EDGE_BOTH>;
};
key-power {
label = "PowerKey";
linux,code = <KEY_POWER>;
gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOAO_3 IRQ_TYPE_EDGE_BOTH>;
};
};
hdmi-connector {
compatible = "hdmi-connector";
type = "a";
port {
hdmi_connector_in: endpoint {
remote-endpoint = <&hdmi_tx_tmds_out>;
};
};
};
led-controller-1 {
compatible = "gpio-leds";
led-1 {
label = "sei610:blue:bt";
gpios = <&gpio GPIOC_7 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
default-state = "off";
};
};
led-controller-2 {
compatible = "pwm-leds";
led-2 {
label = "sei610:red:power";
pwms = <&pwm_AO_ab 0 30518 0>;
max-brightness = <255>;
linux,default-trigger = "default-on";
active-low;
};
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
ao_5v: regulator-ao-5v {
compatible = "regulator-fixed";
regulator-name = "AO_5V";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
vin-supply = <&dc_in>;
regulator-always-on;
};
dc_in: regulator-dc-in {
compatible = "regulator-fixed";
regulator-name = "DC_IN";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
};
emmc_1v8: regulator-emmc-1v8 {
compatible = "regulator-fixed";
regulator-name = "EMMC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vin-supply = <&vddao_3v3>;
regulator-always-on;
};
vddao_3v3: regulator-vddao-3v3 {
compatible = "regulator-fixed";
regulator-name = "VDDAO_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&dc_in>;
regulator-always-on;
};
/* Used by Tuner, RGB Led & IR Emitter LED array */
vddao_3v3_t: regulator-vddao-3v3-t {
compatible = "regulator-fixed";
regulator-name = "VDDAO_3V3_T";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vddao_3v3>;
gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
regulator-always-on;
};
vddcpu: regulator-vddcpu {
/*
* SY8120B1ABC DC/DC Regulator.
*/
compatible = "pwm-regulator";
regulator-name = "VDDCPU";
regulator-min-microvolt = <690000>;
regulator-max-microvolt = <1050000>;
pwm-supply = <&dc_in>;
pwms = <&pwm_AO_cd 1 1500 0>;
pwm-dutycycle-range = <100 0>;
regulator-boot-on;
regulator-always-on;
};
vddio_ao1v8: regulator-vddio-ao1v8 {
compatible = "regulator-fixed";
regulator-name = "VDDIO_AO1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
vin-supply = <&vddao_3v3>;
regulator-always-on;
};
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
sound {
compatible = "amlogic,axg-sound-card";
model = "SEI610";
audio-aux-devs = <&tdmout_a>, <&tdmout_b>,
<&tdmin_a>, <&tdmin_b>;
audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
"TDMOUT_A IN 1", "FRDDR_B OUT 0",
"TDMOUT_A IN 2", "FRDDR_C OUT 0",
"TDM_A Playback", "TDMOUT_A OUT",
"TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
"TDM_B Playback", "TDMOUT_B OUT",
"TODDR_A IN 4", "PDM Capture",
"TODDR_B IN 4", "PDM Capture",
"TODDR_C IN 4", "PDM Capture",
"TDMIN_A IN 0", "TDM_A Capture",
"TDMIN_A IN 3", "TDM_A Loopback",
"TDMIN_B IN 0", "TDM_A Capture",
"TDMIN_B IN 3", "TDM_A Loopback",
"TDMIN_A IN 1", "TDM_B Capture",
"TDMIN_A IN 4", "TDM_B Loopback",
"TDMIN_B IN 1", "TDM_B Capture",
"TDMIN_B IN 4", "TDM_B Loopback",
"TODDR_A IN 0", "TDMIN_A OUT",
"TODDR_B IN 0", "TDMIN_A OUT",
"TODDR_C IN 0", "TDMIN_A OUT",
"TODDR_A IN 1", "TDMIN_B OUT",
"TODDR_B IN 1", "TDMIN_B OUT",
"TODDR_C IN 1", "TDMIN_B OUT";
clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
<&clkc CLKID_MPLL1>;
assigned-clock-parents = <0>, <0>, <0>;
assigned-clock-rates = <294912000>,
<270950400>,
<393216000>;
dai-link-0 {
sound-dai = <&frddr_a>;
};
dai-link-1 {
sound-dai = <&frddr_b>;
};
dai-link-2 {
sound-dai = <&frddr_c>;
};
dai-link-3 {
sound-dai = <&toddr_a>;
};
dai-link-4 {
sound-dai = <&toddr_b>;
};
dai-link-5 {
sound-dai = <&toddr_c>;
};
/* internal speaker interface */
dai-link-6 {
sound-dai = <&tdmif_a>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
mclk-fs = <256>;
codec-0 {
sound-dai = <&mono_dac>;
};
codec-1 {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_A>;
};
};
/* 8ch hdmi interface */
dai-link-7 {
sound-dai = <&tdmif_b>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
dai-tdm-slot-tx-mask-1 = <1 1>;
dai-tdm-slot-tx-mask-2 = <1 1>;
dai-tdm-slot-tx-mask-3 = <1 1>;
mclk-fs = <256>;
codec {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
};
};
/* internal digital mics */
dai-link-8 {
sound-dai = <&pdm>;
codec {
sound-dai = <&dmics>;
};
};
/* hdmi glue */
dai-link-9 {
sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
codec {
sound-dai = <&hdmi_tx>;
};
};
};
wifi32k: wifi32k {
compatible = "pwm-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
};
};
&arb {
status = "okay";
};
&cec_AO {
pinctrl-0 = <&cec_ao_a_h_pins>;
pinctrl-names = "default";
status = "disabled";
hdmi-phandle = <&hdmi_tx>;
};
&cecb_AO {
pinctrl-0 = <&cec_ao_b_h_pins>;
pinctrl-names = "default";
status = "okay";
hdmi-phandle = <&hdmi_tx>;
};
&clkc_audio {
status = "okay";
};
&cpu0 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU1_CLK>;
clock-latency = <50000>;
};
&cpu2 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU2_CLK>;
clock-latency = <50000>;
};
&cpu3 {
cpu-supply = <&vddcpu>;
operating-points-v2 = <&cpu_opp_table>;
clocks = <&clkc CLKID_CPU3_CLK>;
clock-latency = <50000>;
};
ðmac {
status = "okay";
phy-handle = <&internal_ephy>;
phy-mode = "rmii";
};
&frddr_a {
status = "okay";
};
&frddr_b {
status = "okay";
};
&frddr_c {
status = "okay";
};
&hdmi_tx {
status = "okay";
pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
pinctrl-names = "default";
};
&hdmi_tx_tmds_port {
hdmi_tx_tmds_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
&i2c3 {
status = "okay";
pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
pinctrl-names = "default";
};
&ir {
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
pinctrl-names = "default";
};
&pdm {
pinctrl-0 = <&pdm_din0_z_pins>, <&pdm_dclk_z_pins>;
pinctrl-names = "default";
status = "okay";
};
&pwm_AO_ab {
status = "okay";
pinctrl-0 = <&pwm_ao_a_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin0";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin1";
status = "okay";
};
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin0";
};
&saradc {
status = "okay";
vref-supply = <&vddio_ao1v8>;
};
/* SDIO */
&sd_emmc_a {
status = "okay";
pinctrl-0 = <&sdio_pins>;
pinctrl-1 = <&sdio_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
#address-cells = <1>;
#size-cells = <0>;
bus-width = <4>;
cap-sd-highspeed;
sd-uhs-sdr50;
max-frequency = <100000000>;
non-removable;
disable-wp;
/* WiFi firmware requires power to be kept while in suspend */
keep-power-in-suspend;
mmc-pwrseq = <&sdio_pwrseq>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddio_ao1v8>;
brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
};
};
/* SD card */
&sd_emmc_b {
status = "okay";
pinctrl-0 = <&sdcard_c_pins>;
pinctrl-1 = <&sdcard_clk_gate_c_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <4>;
cap-sd-highspeed;
max-frequency = <50000000>;
disable-wp;
cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&vddao_3v3>;
};
/* eMMC */
&sd_emmc_c {
status = "okay";
pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
bus-width = <8>;
cap-mmc-highspeed;
mmc-ddr-1_8v;
mmc-hs200-1_8v;
max-frequency = <200000000>;
non-removable;
disable-wp;
mmc-pwrseq = <&emmc_pwrseq>;
vmmc-supply = <&vddao_3v3>;
vqmmc-supply = <&emmc_1v8>;
};
&tdmif_a {
pinctrl-0 = <&tdm_a_dout0_pins>, <&tdm_a_fs_pins>, <&tdm_a_sclk_pins>;
pinctrl-names = "default";
status = "okay";
assigned-clocks = <&clkc_audio AUD_CLKID_TDM_SCLK_PAD0>,
<&clkc_audio AUD_CLKID_TDM_LRCLK_PAD0>;
assigned-clock-parents = <&clkc_audio AUD_CLKID_MST_A_SCLK>,
<&clkc_audio AUD_CLKID_MST_A_LRCLK>;
assigned-clock-rates = <0>, <0>;
};
&tdmif_b {
status = "okay";
};
&tdmin_a {
status = "okay";
};
&tdmin_b {
status = "okay";
};
&tdmout_a {
status = "okay";
};
&tdmout_b {
status = "okay";
};
&toddr_a {
status = "okay";
};
&toddr_b {
status = "okay";
};
&toddr_c {
status = "okay";
};
&tohdmitx {
status = "okay";
};
&uart_A {
status = "okay";
pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
pinctrl-names = "default";
uart-has-rtscts;
bluetooth {
compatible = "brcm,bcm43438-bt";
interrupt-parent = <&gpio_intc>;
interrupts = <IRQID_GPIOX_18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "host-wakeup";
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;
clock-names = "lpo";
vbat-supply = <&vddao_3v3>;
vddio-supply = <&vddio_ao1v8>;
};
};
/* Exposed via the on-board USB to Serial FT232RL IC */
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb {
status = "okay";
dr_mode = "otg";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Maxime Ripard. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
#include "ccu_div.h"
#include "ccu_gate.h"
#include "ccu_mp.h"
#include "ccu_mult.h"
#include "ccu_nk.h"
#include "ccu_nkm.h"
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
#include "ccu-sun50i-a64.h"
static struct ccu_nkmp pll_cpux_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 5),
.k = _SUNXI_CCU_MULT(4, 2),
.m = _SUNXI_CCU_DIV(0, 2),
.p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
.common = {
.reg = 0x000,
.hw.init = CLK_HW_INIT("pll-cpux",
"osc24M",
&ccu_nkmp_ops,
CLK_SET_RATE_UNGATE),
},
};
/*
* The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
* With sigma-delta modulation for fractional-N on the audio PLL,
* we have to use specific dividers. This means the variable divider
* can no longer be used, as the audio codec requests the exact clock
* rates we support through this mechanism. So we now hard code the
* variable divider to 1. This means the clock rates will no longer
* match the clock names.
*/
#define SUN50I_A64_PLL_AUDIO_REG 0x008
static struct ccu_sdm_setting pll_audio_sdm_table[] = {
{ .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
{ .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
};
static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
"osc24M", 0x008,
8, 7, /* N */
0, 5, /* M */
pll_audio_sdm_table, BIT(24),
0x284, BIT(31),
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX_CLOSEST(pll_video0_clk, "pll-video0",
"osc24M", 0x010,
192000000, /* Minimum rate */
1008000000, /* Maximum rate */
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x018,
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
"osc24M", 0x020,
8, 5, /* N */
4, 2, /* K */
0, 2, /* M */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static struct ccu_nk pll_periph0_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 5),
.k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
.fixed_post_div = 2,
.common = {
.reg = 0x028,
.features = CCU_FEATURE_FIXED_POSTDIV,
.hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
&ccu_nk_ops, CLK_SET_RATE_UNGATE),
},
};
static struct ccu_nk pll_periph1_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 5),
.k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
.fixed_post_div = 2,
.common = {
.reg = 0x02c,
.features = CCU_FEATURE_FIXED_POSTDIV,
.hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
&ccu_nk_ops, CLK_SET_RATE_UNGATE),
},
};
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video1_clk, "pll-video1",
"osc24M", 0x030,
192000000, /* Minimum rate */
1008000000, /* Maximum rate */
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
"osc24M", 0x038,
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
/*
* The output function can be changed to something more complex that
* we do not handle yet.
*
* Hardcode the mode so that we don't fall in that case.
*/
#define SUN50I_A64_PLL_MIPI_REG 0x040
static struct ccu_nkm pll_mipi_clk = {
/*
* The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's
* user manual, and by experiments the PLL doesn't work without
* these bits toggled.
*/
.enable = BIT(31) | BIT(23) | BIT(22),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 4),
.k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
.m = _SUNXI_CCU_DIV(0, 4),
.max_m_n_ratio = 3,
.min_parent_m_ratio = 24000000,
.common = {
.reg = 0x040,
.hw.init = CLK_HW_INIT("pll-mipi", "pll-video0",
&ccu_nkm_ops,
CLK_SET_RATE_UNGATE | CLK_SET_RATE_PARENT),
.features = CCU_FEATURE_CLOSEST_RATE,
.min_rate = 500000000,
.max_rate = 1400000000,
},
};
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_hsic_clk, "pll-hsic",
"osc24M", 0x044,
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
"osc24M", 0x048,
8, 7, /* N */
0, 4, /* M */
BIT(24), /* frac enable */
BIT(25), /* frac select */
270000000, /* frac rate 0 */
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
"osc24M", 0x04c,
8, 7, /* N */
0, 2, /* M */
BIT(31), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
static const char * const cpux_parents[] = { "osc32k", "osc24M",
"pll-cpux", "pll-cpux" };
static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
0x050, 16, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph0" };
static const struct ccu_mux_var_prediv ahb1_predivs[] = {
{ .index = 3, .shift = 6, .width = 2 },
};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
.mux = {
.shift = 12,
.width = 2,
.var_predivs = ahb1_predivs,
.n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
.reg = 0x054,
.features = CCU_FEATURE_VARIABLE_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("ahb1",
ahb1_parents,
&ccu_div_ops,
0),
},
};
static struct clk_div_table apb1_div_table[] = {
{ .val = 0, .div = 2 },
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 8 },
{ /* Sentinel */ },
};
static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
0x054, 8, 2, apb1_div_table, 0);
static const char * const apb2_parents[] = { "osc32k", "osc24M",
"pll-periph0-2x",
"pll-periph0-2x" };
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
0, 5, /* M */
16, 2, /* P */
24, 2, /* mux */
0);
static const char * const ahb2_parents[] = { "ahb1", "pll-periph0" };
static const struct ccu_mux_fixed_prediv ahb2_fixed_predivs[] = {
{ .index = 1, .div = 2 },
};
static struct ccu_mux ahb2_clk = {
.mux = {
.shift = 0,
.width = 1,
.fixed_predivs = ahb2_fixed_predivs,
.n_predivs = ARRAY_SIZE(ahb2_fixed_predivs),
},
.common = {
.reg = 0x05c,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("ahb2",
ahb2_parents,
&ccu_mux_ops,
0),
},
};
static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
0x060, BIT(1), 0);
static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1",
0x060, BIT(5), 0);
static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
0x060, BIT(6), 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
0x060, BIT(8), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
0x060, BIT(9), 0);
static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
0x060, BIT(10), 0);
static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
0x060, BIT(13), 0);
static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
0x060, BIT(14), 0);
static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2",
0x060, BIT(17), 0);
static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1",
0x060, BIT(18), 0);
static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
0x060, BIT(19), 0);
static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
0x060, BIT(20), 0);
static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
0x060, BIT(21), 0);
static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
0x060, BIT(23), 0);
static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1",
0x060, BIT(24), 0);
static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2",
0x060, BIT(25), 0);
static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1",
0x060, BIT(28), 0);
static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2",
0x060, BIT(29), 0);
static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
0x064, BIT(0), 0);
static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1",
0x064, BIT(3), 0);
static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1",
0x064, BIT(4), 0);
static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1",
0x064, BIT(5), 0);
static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
0x064, BIT(8), 0);
static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1",
0x064, BIT(11), 0);
static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
0x064, BIT(12), 0);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
0x064, BIT(20), 0);
static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
0x064, BIT(21), 0);
static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
0x064, BIT(22), 0);
static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
0x068, BIT(0), 0);
static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
0x068, BIT(1), 0);
static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
0x068, BIT(5), 0);
static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1",
0x068, BIT(8), 0);
static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
0x068, BIT(12), 0);
static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
0x068, BIT(13), 0);
static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
0x068, BIT(14), 0);
static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
0x06c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
0x06c, BIT(1), 0);
static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
0x06c, BIT(2), 0);
static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2",
0x06c, BIT(5), 0);
static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
0x06c, BIT(16), 0);
static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
0x06c, BIT(17), 0);
static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
0x06c, BIT(18), 0);
static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
0x06c, BIT(19), 0);
static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
0x06c, BIT(20), 0);
static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1",
0x070, BIT(7), 0);
static struct clk_div_table ths_div_table[] = {
{ .val = 0, .div = 1 },
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
{ /* Sentinel */ },
};
static const char * const ths_parents[] = { "osc24M" };
static struct ccu_div ths_clk = {
.enable = BIT(31),
.div = _SUNXI_CCU_DIV_TABLE(0, 2, ths_div_table),
.mux = _SUNXI_CCU_MUX(24, 2),
.common = {
.reg = 0x074,
.hw.init = CLK_HW_INIT_PARENTS("ths",
ths_parents,
&ccu_div_ops,
0),
},
};
static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0",
"pll-periph1" };
static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
0);
/*
* MMC clocks are the new timing mode (see A83T & H3) variety, but without
* the mode switch. This means they have a 2x post divider between the clock
* and the MMC module. This is not documented in the manual, but is taken
* into consideration when setting the mmc module clocks in the BSP kernel.
* Without it, MMC performance is degraded.
*
* We model it here to be consistent with other SoCs supporting this mode.
* The alternative would be to add the 2x multiplier when setting the MMC
* module clock in the MMC driver, just for the A64.
*/
static const char * const mmc_default_parents[] = { "osc24M", "pll-periph0-2x",
"pll-periph1-2x" };
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
mmc_default_parents, 0x088,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
mmc_default_parents, 0x08c,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2",
mmc_default_parents, 0x090,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static const char * const ts_parents[] = { "osc24M", "pll-periph0", };
static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098,
0, 4, /* M */
16, 2, /* P */
24, 4, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mmc_default_parents, 0x09c,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
0, 4, /* M */
16, 2, /* P */
24, 2, /* mux */
BIT(31), /* gate */
0);
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
0x0b8, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
0x0cc, BIT(8), 0);
static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
0x0cc, BIT(9), 0);
static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
0x0cc, BIT(10), 0);
static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M",
0x0cc, BIT(11), 0);
static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M",
0x0cc, BIT(16), 0);
static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "usb-ohci0",
0x0cc, BIT(17), 0);
static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1" };
static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
0x100, BIT(0), 0);
static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
0x100, BIT(1), 0);
static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
0x100, BIT(2), 0);
static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
0x100, BIT(3), 0);
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
0x104, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
/*
* DSI output seems to work only when PLL_MIPI selected. Set it and prevent
* the mux from reparenting.
*/
#define SUN50I_A64_TCON0_CLK_REG 0x118
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
tcon0_table, 0x118, 24, 3, BIT(31),
CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT);
static const char * const tcon1_parents[] = { "pll-video0", "pll-video1" };
static const u8 tcon1_table[] = { 0, 2, };
static SUNXI_CCU_M_WITH_MUX_TABLE_GATE_CLOSEST(tcon1_clk, "tcon1", tcon1_parents,
tcon1_table, 0x11c,
0, 4, /* M */
24, 2, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents,
0x124, 0, 4, 24, 3, BIT(31), 0);
static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
0x130, BIT(31), 0);
static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
static const char * const csi_mclk_parents[] = { "osc24M", "pll-video1", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
0x140, BIT(30), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
static const char * const hdmi_parents[] = { "pll-video0", "pll-video1" };
static SUNXI_CCU_M_WITH_MUX_GATE_CLOSEST(hdmi_clk, "hdmi", hdmi_parents,
0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
0x154, BIT(31), 0);
static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
"pll-ddr0", "pll-ddr1" };
static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" };
static const u8 dsi_dphy_table[] = { 0, 2, };
static SUNXI_CCU_M_WITH_MUX_TABLE_GATE_CLOSEST(dsi_dphy_clk, "dsi-dphy",
dsi_dphy_parents, dsi_dphy_table,
0x168, 0, 4, 8, 2, BIT(15), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
/* Fixed Factor clocks */
static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0);
static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x",
clk_parent_pll_audio,
1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x",
clk_parent_pll_audio,
1, 2, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x",
&pll_periph0_clk.common.hw,
1, 2, 0);
static CLK_FIXED_FACTOR_HW(pll_periph1_2x_clk, "pll-periph1-2x",
&pll_periph1_clk.common.hw,
1, 2, 0);
static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x",
&pll_video0_clk.common.hw,
1, 2, CLK_SET_RATE_PARENT);
static struct ccu_common *sun50i_a64_ccu_clks[] = {
&pll_cpux_clk.common,
&pll_audio_base_clk.common,
&pll_video0_clk.common,
&pll_ve_clk.common,
&pll_ddr0_clk.common,
&pll_periph0_clk.common,
&pll_periph1_clk.common,
&pll_video1_clk.common,
&pll_gpu_clk.common,
&pll_mipi_clk.common,
&pll_hsic_clk.common,
&pll_de_clk.common,
&pll_ddr1_clk.common,
&cpux_clk.common,
&axi_clk.common,
&ahb1_clk.common,
&apb1_clk.common,
&apb2_clk.common,
&ahb2_clk.common,
&bus_mipi_dsi_clk.common,
&bus_ce_clk.common,
&bus_dma_clk.common,
&bus_mmc0_clk.common,
&bus_mmc1_clk.common,
&bus_mmc2_clk.common,
&bus_nand_clk.common,
&bus_dram_clk.common,
&bus_emac_clk.common,
&bus_ts_clk.common,
&bus_hstimer_clk.common,
&bus_spi0_clk.common,
&bus_spi1_clk.common,
&bus_otg_clk.common,
&bus_ehci0_clk.common,
&bus_ehci1_clk.common,
&bus_ohci0_clk.common,
&bus_ohci1_clk.common,
&bus_ve_clk.common,
&bus_tcon0_clk.common,
&bus_tcon1_clk.common,
&bus_deinterlace_clk.common,
&bus_csi_clk.common,
&bus_hdmi_clk.common,
&bus_de_clk.common,
&bus_gpu_clk.common,
&bus_msgbox_clk.common,
&bus_spinlock_clk.common,
&bus_codec_clk.common,
&bus_spdif_clk.common,
&bus_pio_clk.common,
&bus_ths_clk.common,
&bus_i2s0_clk.common,
&bus_i2s1_clk.common,
&bus_i2s2_clk.common,
&bus_i2c0_clk.common,
&bus_i2c1_clk.common,
&bus_i2c2_clk.common,
&bus_scr_clk.common,
&bus_uart0_clk.common,
&bus_uart1_clk.common,
&bus_uart2_clk.common,
&bus_uart3_clk.common,
&bus_uart4_clk.common,
&bus_dbg_clk.common,
&ths_clk.common,
&nand_clk.common,
&mmc0_clk.common,
&mmc1_clk.common,
&mmc2_clk.common,
&ts_clk.common,
&ce_clk.common,
&spi0_clk.common,
&spi1_clk.common,
&i2s0_clk.common,
&i2s1_clk.common,
&i2s2_clk.common,
&spdif_clk.common,
&usb_phy0_clk.common,
&usb_phy1_clk.common,
&usb_hsic_clk.common,
&usb_hsic_12m_clk.common,
&usb_ohci0_clk.common,
&usb_ohci1_clk.common,
&dram_clk.common,
&dram_ve_clk.common,
&dram_csi_clk.common,
&dram_deinterlace_clk.common,
&dram_ts_clk.common,
&de_clk.common,
&tcon0_clk.common,
&tcon1_clk.common,
&deinterlace_clk.common,
&csi_misc_clk.common,
&csi_sclk_clk.common,
&csi_mclk_clk.common,
&ve_clk.common,
&ac_dig_clk.common,
&ac_dig_4x_clk.common,
&avs_clk.common,
&hdmi_clk.common,
&hdmi_ddc_clk.common,
&mbus_clk.common,
&dsi_dphy_clk.common,
&gpu_clk.common,
};
static struct clk_hw_onecell_data sun50i_a64_hw_clks = {
.hws = {
[CLK_OSC_12M] = &osc12M_clk.hw,
[CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
[CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
[CLK_PLL_AUDIO] = &pll_audio_clk.hw,
[CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
[CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
[CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
[CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
[CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
[CLK_PLL_VE] = &pll_ve_clk.common.hw,
[CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
[CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
[CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
[CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
[CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
[CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
[CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
[CLK_PLL_MIPI] = &pll_mipi_clk.common.hw,
[CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
[CLK_PLL_DE] = &pll_de_clk.common.hw,
[CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
[CLK_CPUX] = &cpux_clk.common.hw,
[CLK_AXI] = &axi_clk.common.hw,
[CLK_AHB1] = &ahb1_clk.common.hw,
[CLK_APB1] = &apb1_clk.common.hw,
[CLK_APB2] = &apb2_clk.common.hw,
[CLK_AHB2] = &ahb2_clk.common.hw,
[CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
[CLK_BUS_CE] = &bus_ce_clk.common.hw,
[CLK_BUS_DMA] = &bus_dma_clk.common.hw,
[CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
[CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
[CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
[CLK_BUS_NAND] = &bus_nand_clk.common.hw,
[CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
[CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
[CLK_BUS_TS] = &bus_ts_clk.common.hw,
[CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
[CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
[CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
[CLK_BUS_OTG] = &bus_otg_clk.common.hw,
[CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
[CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
[CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
[CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
[CLK_BUS_VE] = &bus_ve_clk.common.hw,
[CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw,
[CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw,
[CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
[CLK_BUS_CSI] = &bus_csi_clk.common.hw,
[CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
[CLK_BUS_DE] = &bus_de_clk.common.hw,
[CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
[CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
[CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
[CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
[CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
[CLK_BUS_PIO] = &bus_pio_clk.common.hw,
[CLK_BUS_THS] = &bus_ths_clk.common.hw,
[CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
[CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
[CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
[CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
[CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
[CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
[CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
[CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
[CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
[CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
[CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
[CLK_BUS_SCR] = &bus_scr_clk.common.hw,
[CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
[CLK_THS] = &ths_clk.common.hw,
[CLK_NAND] = &nand_clk.common.hw,
[CLK_MMC0] = &mmc0_clk.common.hw,
[CLK_MMC1] = &mmc1_clk.common.hw,
[CLK_MMC2] = &mmc2_clk.common.hw,
[CLK_TS] = &ts_clk.common.hw,
[CLK_CE] = &ce_clk.common.hw,
[CLK_SPI0] = &spi0_clk.common.hw,
[CLK_SPI1] = &spi1_clk.common.hw,
[CLK_I2S0] = &i2s0_clk.common.hw,
[CLK_I2S1] = &i2s1_clk.common.hw,
[CLK_I2S2] = &i2s2_clk.common.hw,
[CLK_SPDIF] = &spdif_clk.common.hw,
[CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
[CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
[CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
[CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
[CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
[CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
[CLK_DRAM] = &dram_clk.common.hw,
[CLK_DRAM_VE] = &dram_ve_clk.common.hw,
[CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
[CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
[CLK_DRAM_TS] = &dram_ts_clk.common.hw,
[CLK_DE] = &de_clk.common.hw,
[CLK_TCON0] = &tcon0_clk.common.hw,
[CLK_TCON1] = &tcon1_clk.common.hw,
[CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
[CLK_AC_DIG_4X] = &ac_dig_4x_clk.common.hw,
[CLK_AVS] = &avs_clk.common.hw,
[CLK_HDMI] = &hdmi_clk.common.hw,
[CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw,
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_DSI_DPHY] = &dsi_dphy_clk.common.hw,
[CLK_GPU] = &gpu_clk.common.hw,
},
.num = CLK_NUMBER,
};
static const struct ccu_reset_map sun50i_a64_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
[RST_DRAM] = { 0x0f4, BIT(31) },
[RST_MBUS] = { 0x0fc, BIT(31) },
[RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
[RST_BUS_CE] = { 0x2c0, BIT(5) },
[RST_BUS_DMA] = { 0x2c0, BIT(6) },
[RST_BUS_MMC0] = { 0x2c0, BIT(8) },
[RST_BUS_MMC1] = { 0x2c0, BIT(9) },
[RST_BUS_MMC2] = { 0x2c0, BIT(10) },
[RST_BUS_NAND] = { 0x2c0, BIT(13) },
[RST_BUS_DRAM] = { 0x2c0, BIT(14) },
[RST_BUS_EMAC] = { 0x2c0, BIT(17) },
[RST_BUS_TS] = { 0x2c0, BIT(18) },
[RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
[RST_BUS_SPI0] = { 0x2c0, BIT(20) },
[RST_BUS_SPI1] = { 0x2c0, BIT(21) },
[RST_BUS_OTG] = { 0x2c0, BIT(23) },
[RST_BUS_EHCI0] = { 0x2c0, BIT(24) },
[RST_BUS_EHCI1] = { 0x2c0, BIT(25) },
[RST_BUS_OHCI0] = { 0x2c0, BIT(28) },
[RST_BUS_OHCI1] = { 0x2c0, BIT(29) },
[RST_BUS_VE] = { 0x2c4, BIT(0) },
[RST_BUS_TCON0] = { 0x2c4, BIT(3) },
[RST_BUS_TCON1] = { 0x2c4, BIT(4) },
[RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
[RST_BUS_CSI] = { 0x2c4, BIT(8) },
[RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
[RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
[RST_BUS_DE] = { 0x2c4, BIT(12) },
[RST_BUS_GPU] = { 0x2c4, BIT(20) },
[RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
[RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
[RST_BUS_DBG] = { 0x2c4, BIT(31) },
[RST_BUS_LVDS] = { 0x2c8, BIT(0) },
[RST_BUS_CODEC] = { 0x2d0, BIT(0) },
[RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
[RST_BUS_THS] = { 0x2d0, BIT(8) },
[RST_BUS_I2S0] = { 0x2d0, BIT(12) },
[RST_BUS_I2S1] = { 0x2d0, BIT(13) },
[RST_BUS_I2S2] = { 0x2d0, BIT(14) },
[RST_BUS_I2C0] = { 0x2d8, BIT(0) },
[RST_BUS_I2C1] = { 0x2d8, BIT(1) },
[RST_BUS_I2C2] = { 0x2d8, BIT(2) },
[RST_BUS_SCR] = { 0x2d8, BIT(5) },
[RST_BUS_UART0] = { 0x2d8, BIT(16) },
[RST_BUS_UART1] = { 0x2d8, BIT(17) },
[RST_BUS_UART2] = { 0x2d8, BIT(18) },
[RST_BUS_UART3] = { 0x2d8, BIT(19) },
[RST_BUS_UART4] = { 0x2d8, BIT(20) },
};
static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
.ccu_clks = sun50i_a64_ccu_clks,
.num_ccu_clks = ARRAY_SIZE(sun50i_a64_ccu_clks),
.hw_clks = &sun50i_a64_hw_clks,
.resets = sun50i_a64_ccu_resets,
.num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
};
static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
.common = &pll_cpux_clk.common,
/* copy from pll_cpux_clk */
.enable = BIT(31),
.lock = BIT(28),
};
static struct ccu_mux_nb sun50i_a64_cpu_nb = {
.common = &cpux_clk.common,
.cm = &cpux_clk.mux,
.delay_us = 1, /* > 8 clock cycles at 24 MHz */
.bypass_index = 1, /* index of 24 MHz oscillator */
};
static int sun50i_a64_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
int ret;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN50I_A64_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val | (0 << 16), reg + SUN50I_A64_PLL_AUDIO_REG);
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
/* Set PLL MIPI as parent for TCON0 */
val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
val &= ~GENMASK(26, 24);
writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun50i_a64_cpu_nb);
return 0;
}
static const struct of_device_id sun50i_a64_ccu_ids[] = {
{ .compatible = "allwinner,sun50i-a64-ccu" },
{ }
};
MODULE_DEVICE_TABLE(of, sun50i_a64_ccu_ids);
static struct platform_driver sun50i_a64_ccu_driver = {
.probe = sun50i_a64_ccu_probe,
.driver = {
.name = "sun50i-a64-ccu",
.suppress_bind_attrs = true,
.of_match_table = sun50i_a64_ccu_ids,
},
};
module_platform_driver(sun50i_a64_ccu_driver);
MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A64 CCU");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/ch11.h>
#define TEST_SE0_NAK_PID 0x0101
#define TEST_J_PID 0x0102
#define TEST_K_PID 0x0103
#define TEST_PACKET_PID 0x0104
#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
const struct usb_device_id *id);
/*
* A list of USB hubs which requires to disable the power
* to the port before starting the testing procedures.
*/
static const struct usb_device_id ehset_hub_list[] = {
{ USB_DEVICE(0x0424, 0x4502) },
{ USB_DEVICE(0x0424, 0x4913) },
{ USB_DEVICE(0x0451, 0x8027) },
{ }
};
static int ehset_prepare_port_for_testing(struct usb_device *hub_udev, u16 portnum)
{
int ret = 0;
/*
* The USB2.0 spec chapter 11.24.2.13 says that the USB port which is
* going under test needs to be put in suspend before sending the
* test command. Most hubs don't enforce this precondition, but there
* are some hubs which needs to disable the power to the port before
* starting the test.
*/
if (usb_device_match_id(hub_udev, ehset_hub_list)) {
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_ENABLE,
portnum, NULL, 0, 1000, GFP_KERNEL);
/*
* Wait for the port to be disabled. It's an arbitrary value
* which worked every time.
*/
msleep(100);
} else {
/*
* For the hubs which are compliant with the spec,
* put the port in SUSPEND.
*/
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
}
return ret;
}
static int ehset_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret = -EINVAL;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_device *hub_udev = dev->parent;
struct usb_device_descriptor buf;
u8 portnum = dev->portnum;
u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
switch (test_pid) {
case TEST_SE0_NAK_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_SE0_NAK << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_J_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_J << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_K_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_K << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_PACKET_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_PACKET << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_HS_HOST_PORT_SUSPEND_RESUME:
/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
msleep(15 * 1000);
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
if (ret < 0)
break;
msleep(15 * 1000);
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_SINGLE_STEP_GET_DEV_DESC:
/* Test: wait for 15secs -> GetDescriptor request */
msleep(15 * 1000);
ret = usb_control_msg_recv(dev, 0, USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN, USB_DT_DEVICE << 8, 0,
&buf, USB_DT_DEVICE_SIZE,
USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
break;
case TEST_SINGLE_STEP_SET_FEATURE:
/*
* GetDescriptor SETUP request -> 15secs delay -> IN & STATUS
*
* Note, this test is only supported on root hubs since the
* SetPortFeature handling can only be done inside the HCD's
* hub_control callback function.
*/
if (hub_udev != dev->bus->root_hub) {
dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
break;
}
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(6 << 8) | portnum, NULL, 0,
60 * 1000, GFP_KERNEL);
break;
default:
dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
__func__, test_pid);
}
return ret;
}
static void ehset_disconnect(struct usb_interface *intf)
{
}
static const struct usb_device_id ehset_id_table[] = {
{ USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
{ USB_DEVICE(0x1a0a, TEST_J_PID) },
{ USB_DEVICE(0x1a0a, TEST_K_PID) },
{ USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
{ USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ehset_id_table);
static struct usb_driver ehset_driver = {
.name = "usb_ehset_test",
.probe = ehset_probe,
.disconnect = ehset_disconnect,
.id_table = ehset_id_table,
};
module_usb_driver(ehset_driver);
MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
#ifdef CONFIG_X86_UV
#include <linux/efi.h>
#define UV_PROC_NODE "sgi_uv"
static inline int uv(int uvtype)
{
/* uv(0) is "any" */
if (uvtype >= 0 && uvtype <= 30)
return 1 << uvtype;
return 1;
}
extern unsigned long uv_systab_phys;
extern enum uv_system_type get_uv_system_type(void);
static inline bool is_early_uv_system(void)
{
return uv_systab_phys && uv_systab_phys != EFI_INVALID_TABLE_ADDR;
}
extern int is_uv_system(void);
extern int is_uv_hubbed(int uvtype);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
#else /* !X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline bool is_early_uv_system(void) { return 0; }
static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubbed(int uv) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
#endif /* X86_UV */
#endif /* _ASM_X86_UV_UV_H */
|
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static void
nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x40000000;
#endif
}
}
static const struct nvkm_fb_func
nv35_fb = {
.tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv35_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb);
}
|
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Rafael Antognolli <[email protected]>
*
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include "drm_dp_helper_internal.h"
struct drm_dp_aux_dev {
unsigned index;
struct drm_dp_aux *aux;
struct device *dev;
struct kref refcount;
atomic_t usecount;
};
#define DRM_AUX_MINORS 256
#define AUX_MAX_OFFSET (1 << 20)
static DEFINE_IDR(aux_idr);
static DEFINE_MUTEX(aux_idr_mutex);
static struct class *drm_dp_aux_dev_class;
static int drm_dev_major = -1;
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
{
struct drm_dp_aux_dev *aux_dev = NULL;
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int index;
aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL);
if (!aux_dev)
return ERR_PTR(-ENOMEM);
aux_dev->aux = aux;
atomic_set(&aux_dev->usecount, 1);
kref_init(&aux_dev->refcount);
mutex_lock(&aux_idr_mutex);
index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL);
mutex_unlock(&aux_idr_mutex);
if (index < 0) {
kfree(aux_dev);
return ERR_PTR(index);
}
aux_dev->index = index;
return aux_dev;
}
static void release_drm_dp_aux_dev(struct kref *ref)
{
struct drm_dp_aux_dev *aux_dev =
container_of(ref, struct drm_dp_aux_dev, refcount);
kfree(aux_dev);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t res;
struct drm_dp_aux_dev *aux_dev =
drm_dp_aux_dev_get_by_minor(MINOR(dev->devt));
if (!aux_dev)
return -ENODEV;
res = sprintf(buf, "%s\n", aux_dev->aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return res;
}
static DEVICE_ATTR_RO(name);
static struct attribute *drm_dp_aux_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(drm_dp_aux);
static int auxdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct drm_dp_aux_dev *aux_dev;
aux_dev = drm_dp_aux_dev_get_by_minor(minor);
if (!aux_dev)
return -ENODEV;
file->private_data = aux_dev;
return 0;
}
static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
{
return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
}
static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(to, AUX_MAX_OFFSET - pos);
while (iov_iter_count(to)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(to), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
if (copy_to_iter(buf, res, to) != res) {
res = -EFAULT;
break;
}
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data;
loff_t pos = iocb->ki_pos;
ssize_t res = 0;
if (!atomic_inc_not_zero(&aux_dev->usecount))
return -ENODEV;
iov_iter_truncate(from, AUX_MAX_OFFSET - pos);
while (iov_iter_count(from)) {
uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min(iov_iter_count(from), sizeof(buf));
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
if (!copy_from_iter_full(buf, todo, from)) {
res = -EFAULT;
break;
}
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
if (res <= 0)
break;
pos += res;
}
if (pos != iocb->ki_pos)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_var(&aux_dev->usecount);
return res;
}
static int auxdev_release(struct inode *inode, struct file *file)
{
struct drm_dp_aux_dev *aux_dev = file->private_data;
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
return 0;
}
static const struct file_operations auxdev_fops = {
.owner = THIS_MODULE,
.llseek = auxdev_llseek,
.read_iter = auxdev_read_iter,
.write_iter = auxdev_write_iter,
.open = auxdev_open,
.release = auxdev_release,
};
#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux)
static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *iter, *aux_dev = NULL;
int id;
/* don't increase kref count here because this function should only be
* used by drm_dp_aux_unregister_devnode. Thus, it will always have at
* least one reference - the one that drm_dp_aux_register_devnode
* created
*/
mutex_lock(&aux_idr_mutex);
idr_for_each_entry(&aux_idr, iter, id) {
if (iter->aux == aux) {
aux_dev = iter;
break;
}
}
mutex_unlock(&aux_idr_mutex);
return aux_dev;
}
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
unsigned int minor;
aux_dev = drm_dp_aux_dev_get_by_aux(aux);
if (!aux_dev) /* attach must have failed */
return;
/*
* As some AUX adapters may exist as platform devices which outlive their respective DRM
* devices, we clear drm_dev to ensure that we never accidentally reference a stale pointer
*/
aux->drm_dev = NULL;
mutex_lock(&aux_idr_mutex);
idr_remove(&aux_idr, aux_dev->index);
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
minor = aux_dev->index;
if (aux_dev->dev)
device_destroy(drm_dp_aux_dev_class,
MKDEV(drm_dev_major, minor));
DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
}
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
int res;
aux_dev = alloc_drm_dp_aux_dev(aux);
if (IS_ERR(aux_dev))
return PTR_ERR(aux_dev);
aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev,
MKDEV(drm_dev_major, aux_dev->index), NULL,
"drm_dp_aux%d", aux_dev->index);
if (IS_ERR(aux_dev->dev)) {
res = PTR_ERR(aux_dev->dev);
aux_dev->dev = NULL;
goto error;
}
DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n",
aux->name, aux_dev->index);
return 0;
error:
drm_dp_aux_unregister_devnode(aux);
return res;
}
int drm_dp_aux_dev_init(void)
{
int res;
drm_dp_aux_dev_class = class_create("drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
res = register_chrdev(0, "aux", &auxdev_fops);
if (res < 0)
goto out;
drm_dev_major = res;
return 0;
out:
class_destroy(drm_dp_aux_dev_class);
return res;
}
void drm_dp_aux_dev_exit(void)
{
unregister_chrdev(drm_dev_major, "aux");
class_destroy(drm_dp_aux_dev_class);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Linaro Ltd.
* Author: Sam Protsenko <[email protected]>
*
* Common Clock Framework support for Exynos850 SoC.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/exynos850.h>
#include "clk.h"
#include "clk-cpu.h"
#include "clk-exynos-arm64.h"
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (CLK_DOUT_CPUCL1_SWITCH + 1)
#define CLKS_NR_APM (CLK_GOUT_SYSREG_APM_PCLK + 1)
#define CLKS_NR_AUD (CLK_GOUT_AUD_CMU_AUD_PCLK + 1)
#define CLKS_NR_CMGP (CLK_GOUT_SYSREG_CMGP_PCLK + 1)
#define CLKS_NR_CPUCL0 (CLK_CLUSTER0_SCLK + 1)
#define CLKS_NR_CPUCL1 (CLK_CLUSTER1_SCLK + 1)
#define CLKS_NR_G3D (CLK_GOUT_G3D_SYSREG_PCLK + 1)
#define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1)
#define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1)
#define CLKS_NR_MFCMSCL (CLK_GOUT_MFCMSCL_SYSREG_PCLK + 1)
#define CLKS_NR_PERI (CLK_GOUT_BUSIF_TMU_PCLK + 1)
#define CLKS_NR_CORE (CLK_GOUT_SPDMA_CORE_ACLK + 1)
#define CLKS_NR_DPU (CLK_GOUT_DPU_SYSREG_PCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
/* Register Offset definitions for CMU_TOP (0x120e0000) */
#define PLL_LOCKTIME_PLL_MMC 0x0000
#define PLL_LOCKTIME_PLL_SHARED0 0x0004
#define PLL_LOCKTIME_PLL_SHARED1 0x0008
#define PLL_CON0_PLL_MMC 0x0100
#define PLL_CON3_PLL_MMC 0x010c
#define PLL_CON0_PLL_SHARED0 0x0140
#define PLL_CON3_PLL_SHARED0 0x014c
#define PLL_CON0_PLL_SHARED1 0x0180
#define PLL_CON3_PLL_SHARED1 0x018c
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_AUD 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
#define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020
#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1024
#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1028
#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG 0x102c
#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1030
#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034
#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1038
#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
#define CLK_CON_MUX_MUX_CLKCMU_IS_BUS 0x1048
#define CLK_CON_MUX_MUX_CLKCMU_IS_GDC 0x104c
#define CLK_CON_MUX_MUX_CLKCMU_IS_ITP 0x1050
#define CLK_CON_MUX_MUX_CLKCMU_IS_VRA 0x1054
#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG 0x1058
#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M 0x105c
#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC 0x1060
#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC 0x1064
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x180c
#define CLK_CON_DIV_CLKCMU_AUD 0x1810
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
#define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c
#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1830
#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1834
#define CLK_CON_DIV_CLKCMU_CPUCL1_DBG 0x1838
#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
#define CLK_CON_DIV_CLKCMU_DPU 0x1840
#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1844
#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
#define CLK_CON_DIV_CLKCMU_IS_BUS 0x1854
#define CLK_CON_DIV_CLKCMU_IS_GDC 0x1858
#define CLK_CON_DIV_CLKCMU_IS_ITP 0x185c
#define CLK_CON_DIV_CLKCMU_IS_VRA 0x1860
#define CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG 0x1864
#define CLK_CON_DIV_CLKCMU_MFCMSCL_M2M 0x1868
#define CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC 0x186c
#define CLK_CON_DIV_CLKCMU_MFCMSCL_MFC 0x1870
#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x187c
#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1880
#define CLK_CON_DIV_CLKCMU_PERI_UART 0x1884
#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x188c
#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x1890
#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x1894
#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1898
#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
#define CLK_CON_GAT_GATE_CLKCMU_AUD 0x200c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
#define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028
#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG 0x202c
#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2030
#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG 0x2034
#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2038
#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c
#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2040
#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
#define CLK_CON_GAT_GATE_CLKCMU_IS_BUS 0x2050
#define CLK_CON_GAT_GATE_CLKCMU_IS_GDC 0x2054
#define CLK_CON_GAT_GATE_CLKCMU_IS_ITP 0x2058
#define CLK_CON_GAT_GATE_CLKCMU_IS_VRA 0x205c
#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG 0x2060
#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M 0x2064
#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC 0x2068
#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC 0x206c
#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x2080
#define CLK_CON_GAT_GATE_CLKCMU_PERI_IP 0x2084
#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART 0x2088
static const unsigned long top_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_MMC,
PLL_LOCKTIME_PLL_SHARED0,
PLL_LOCKTIME_PLL_SHARED1,
PLL_CON0_PLL_MMC,
PLL_CON3_PLL_MMC,
PLL_CON0_PLL_SHARED0,
PLL_CON3_PLL_SHARED0,
PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_AUD,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
CLK_CON_MUX_MUX_CLKCMU_CORE_SSS,
CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG,
CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_DPU,
CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
CLK_CON_MUX_MUX_CLKCMU_IS_BUS,
CLK_CON_MUX_MUX_CLKCMU_IS_GDC,
CLK_CON_MUX_MUX_CLKCMU_IS_ITP,
CLK_CON_MUX_MUX_CLKCMU_IS_VRA,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_AUD,
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
CLK_CON_DIV_CLKCMU_CORE_SSS,
CLK_CON_DIV_CLKCMU_CPUCL0_DBG,
CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
CLK_CON_DIV_CLKCMU_CPUCL1_DBG,
CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
CLK_CON_DIV_CLKCMU_DPU,
CLK_CON_DIV_CLKCMU_G3D_SWITCH,
CLK_CON_DIV_CLKCMU_HSI_BUS,
CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
CLK_CON_DIV_CLKCMU_IS_BUS,
CLK_CON_DIV_CLKCMU_IS_GDC,
CLK_CON_DIV_CLKCMU_IS_ITP,
CLK_CON_DIV_CLKCMU_IS_VRA,
CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG,
CLK_CON_DIV_CLKCMU_MFCMSCL_M2M,
CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC,
CLK_CON_DIV_CLKCMU_MFCMSCL_MFC,
CLK_CON_DIV_CLKCMU_PERI_BUS,
CLK_CON_DIV_CLKCMU_PERI_IP,
CLK_CON_DIV_CLKCMU_PERI_UART,
CLK_CON_DIV_PLL_SHARED0_DIV2,
CLK_CON_DIV_PLL_SHARED0_DIV3,
CLK_CON_DIV_PLL_SHARED0_DIV4,
CLK_CON_DIV_PLL_SHARED1_DIV2,
CLK_CON_DIV_PLL_SHARED1_DIV3,
CLK_CON_DIV_PLL_SHARED1_DIV4,
CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
CLK_CON_GAT_GATE_CLKCMU_AUD,
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS,
CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG,
CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG,
CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_DPU,
CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
CLK_CON_GAT_GATE_CLKCMU_IS_BUS,
CLK_CON_GAT_GATE_CLKCMU_IS_GDC,
CLK_CON_GAT_GATE_CLKCMU_IS_ITP,
CLK_CON_GAT_GATE_CLKCMU_IS_VRA,
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG,
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M,
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC,
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC,
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
CLK_CON_GAT_GATE_CLKCMU_PERI_IP,
CLK_CON_GAT_GATE_CLKCMU_PERI_UART,
};
/*
* Do not provide PLL tables to core PLLs, as MANUAL_PLL_CTRL bit is not set
* for those PLLs by default, so set_rate operation would fail.
*/
static const struct samsung_pll_clock top_pll_clks[] __initconst = {
/* CMU_TOP_PURECLKCOMP */
PLL(pll_0822x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0,
NULL),
PLL(pll_0822x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1,
NULL),
PLL(pll_0831x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
};
/* List of parent clocks for Muxes in CMU_TOP */
PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_APM */
PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared0_div4", "pll_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_AUD */
PNAME(mout_aud_p) = { "fout_shared1_pll", "dout_shared0_div2",
"dout_shared1_div2", "dout_shared0_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
PNAME(mout_core_cci_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2",
"dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "mout_mmc_pll",
"oscclk", "oscclk" };
PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL0 */
PNAME(mout_cpucl0_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
"dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_cpucl0_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CPUCL1 */
PNAME(mout_cpucl1_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
"dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_cpucl1_dbg_p) = { "dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_G3D */
PNAME(mout_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_HSI */
PNAME(mout_hsi_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
"dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "mout_mmc_pll",
"oscclk", "oscclk" };
PNAME(mout_hsi_usb20drd_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_IS */
PNAME(mout_is_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
PNAME(mout_is_itp_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
PNAME(mout_is_vra_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
PNAME(mout_is_gdc_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared1_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_MFCMSCL */
PNAME(mout_mfcmscl_mfc_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
PNAME(mout_mfcmscl_m2m_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
PNAME(mout_mfcmscl_mcsc_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
PNAME(mout_mfcmscl_jpeg_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
PNAME(mout_peri_uart_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
PNAME(mout_peri_ip_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_DPU */
PNAME(mout_dpu_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
/* CMU_TOP_PURECLKCOMP */
MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
PLL_CON0_PLL_SHARED0, 4, 1),
MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
PLL_CON0_PLL_SHARED1, 4, 1),
MUX(CLK_MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
PLL_CON0_PLL_MMC, 4, 1),
/* APM */
MUX(CLK_MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus",
mout_clkcmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
/* AUD */
MUX(CLK_MOUT_AUD, "mout_aud", mout_aud_p,
CLK_CON_MUX_MUX_CLKCMU_AUD, 0, 2),
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
MUX(CLK_MOUT_CORE_CCI, "mout_core_cci", mout_core_cci_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, 0, 2),
MUX(CLK_MOUT_CORE_MMC_EMBD, "mout_core_mmc_embd", mout_core_mmc_embd_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD, 0, 3),
MUX(CLK_MOUT_CORE_SSS, "mout_core_sss", mout_core_sss_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, 0, 2),
/* CPUCL0 */
MUX(CLK_MOUT_CPUCL0_DBG, "mout_cpucl0_dbg", mout_cpucl0_dbg_p,
CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG, 0, 1),
MUX(CLK_MOUT_CPUCL0_SWITCH, "mout_cpucl0_switch", mout_cpucl0_switch_p,
CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, 0, 2),
/* CPUCL1 */
MUX(CLK_MOUT_CPUCL1_DBG, "mout_cpucl1_dbg", mout_cpucl1_dbg_p,
CLK_CON_MUX_MUX_CLKCMU_CPUCL1_DBG, 0, 1),
MUX(CLK_MOUT_CPUCL1_SWITCH, "mout_cpucl1_switch", mout_cpucl1_switch_p,
CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, 0, 2),
/* DPU */
MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p,
CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2),
/* G3D */
MUX(CLK_MOUT_G3D_SWITCH, "mout_g3d_switch", mout_g3d_switch_p,
CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 2),
/* HSI */
MUX(CLK_MOUT_HSI_BUS, "mout_hsi_bus", mout_hsi_bus_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS, 0, 1),
MUX(CLK_MOUT_HSI_MMC_CARD, "mout_hsi_mmc_card", mout_hsi_mmc_card_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD, 0, 3),
MUX(CLK_MOUT_HSI_USB20DRD, "mout_hsi_usb20drd", mout_hsi_usb20drd_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD, 0, 2),
/* IS */
MUX(CLK_MOUT_IS_BUS, "mout_is_bus", mout_is_bus_p,
CLK_CON_MUX_MUX_CLKCMU_IS_BUS, 0, 2),
MUX(CLK_MOUT_IS_ITP, "mout_is_itp", mout_is_itp_p,
CLK_CON_MUX_MUX_CLKCMU_IS_ITP, 0, 2),
MUX(CLK_MOUT_IS_VRA, "mout_is_vra", mout_is_vra_p,
CLK_CON_MUX_MUX_CLKCMU_IS_VRA, 0, 2),
MUX(CLK_MOUT_IS_GDC, "mout_is_gdc", mout_is_gdc_p,
CLK_CON_MUX_MUX_CLKCMU_IS_GDC, 0, 2),
/* MFCMSCL */
MUX(CLK_MOUT_MFCMSCL_MFC, "mout_mfcmscl_mfc", mout_mfcmscl_mfc_p,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC, 0, 2),
MUX(CLK_MOUT_MFCMSCL_M2M, "mout_mfcmscl_m2m", mout_mfcmscl_m2m_p,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M, 0, 2),
MUX(CLK_MOUT_MFCMSCL_MCSC, "mout_mfcmscl_mcsc", mout_mfcmscl_mcsc_p,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC, 0, 2),
MUX(CLK_MOUT_MFCMSCL_JPEG, "mout_mfcmscl_jpeg", mout_mfcmscl_jpeg_p,
CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG, 0, 2),
/* PERI */
MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
MUX(CLK_MOUT_PERI_UART, "mout_peri_uart", mout_peri_uart_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART, 0, 2),
MUX(CLK_MOUT_PERI_IP, "mout_peri_ip", mout_peri_ip_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP, 0, 2),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
/* CMU_TOP_PURECLKCOMP */
DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
/* APM */
DIV(CLK_DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus",
"gout_clkcmu_apm_bus", CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
/* AUD */
DIV(CLK_DOUT_AUD, "dout_aud", "gout_aud",
CLK_CON_DIV_CLKCMU_AUD, 0, 4),
/* CORE */
DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
DIV(CLK_DOUT_CORE_CCI, "dout_core_cci", "gout_core_cci",
CLK_CON_DIV_CLKCMU_CORE_CCI, 0, 4),
DIV(CLK_DOUT_CORE_MMC_EMBD, "dout_core_mmc_embd", "gout_core_mmc_embd",
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD, 0, 9),
DIV(CLK_DOUT_CORE_SSS, "dout_core_sss", "gout_core_sss",
CLK_CON_DIV_CLKCMU_CORE_SSS, 0, 4),
/* CPUCL0 */
DIV(CLK_DOUT_CPUCL0_DBG, "dout_cpucl0_dbg", "gout_cpucl0_dbg",
CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 3),
DIV(CLK_DOUT_CPUCL0_SWITCH, "dout_cpucl0_switch", "gout_cpucl0_switch",
CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
/* CPUCL1 */
DIV(CLK_DOUT_CPUCL1_DBG, "dout_cpucl1_dbg", "gout_cpucl1_dbg",
CLK_CON_DIV_CLKCMU_CPUCL1_DBG, 0, 3),
DIV(CLK_DOUT_CPUCL1_SWITCH, "dout_cpucl1_switch", "gout_cpucl1_switch",
CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
/* DPU */
DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu",
CLK_CON_DIV_CLKCMU_DPU, 0, 4),
/* G3D */
DIV(CLK_DOUT_G3D_SWITCH, "dout_g3d_switch", "gout_g3d_switch",
CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
/* HSI */
DIV(CLK_DOUT_HSI_BUS, "dout_hsi_bus", "gout_hsi_bus",
CLK_CON_DIV_CLKCMU_HSI_BUS, 0, 4),
DIV(CLK_DOUT_HSI_MMC_CARD, "dout_hsi_mmc_card", "gout_hsi_mmc_card",
CLK_CON_DIV_CLKCMU_HSI_MMC_CARD, 0, 9),
DIV(CLK_DOUT_HSI_USB20DRD, "dout_hsi_usb20drd", "gout_hsi_usb20drd",
CLK_CON_DIV_CLKCMU_HSI_USB20DRD, 0, 4),
/* IS */
DIV(CLK_DOUT_IS_BUS, "dout_is_bus", "gout_is_bus",
CLK_CON_DIV_CLKCMU_IS_BUS, 0, 4),
DIV(CLK_DOUT_IS_ITP, "dout_is_itp", "gout_is_itp",
CLK_CON_DIV_CLKCMU_IS_ITP, 0, 4),
DIV(CLK_DOUT_IS_VRA, "dout_is_vra", "gout_is_vra",
CLK_CON_DIV_CLKCMU_IS_VRA, 0, 4),
DIV(CLK_DOUT_IS_GDC, "dout_is_gdc", "gout_is_gdc",
CLK_CON_DIV_CLKCMU_IS_GDC, 0, 4),
/* MFCMSCL */
DIV(CLK_DOUT_MFCMSCL_MFC, "dout_mfcmscl_mfc", "gout_mfcmscl_mfc",
CLK_CON_DIV_CLKCMU_MFCMSCL_MFC, 0, 4),
DIV(CLK_DOUT_MFCMSCL_M2M, "dout_mfcmscl_m2m", "gout_mfcmscl_m2m",
CLK_CON_DIV_CLKCMU_MFCMSCL_M2M, 0, 4),
DIV(CLK_DOUT_MFCMSCL_MCSC, "dout_mfcmscl_mcsc", "gout_mfcmscl_mcsc",
CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC, 0, 4),
DIV(CLK_DOUT_MFCMSCL_JPEG, "dout_mfcmscl_jpeg", "gout_mfcmscl_jpeg",
CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG, 0, 4),
/* PERI */
DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
DIV(CLK_DOUT_PERI_UART, "dout_peri_uart", "gout_peri_uart",
CLK_CON_DIV_CLKCMU_PERI_UART, 0, 4),
DIV(CLK_DOUT_PERI_IP, "dout_peri_ip", "gout_peri_ip",
CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* CORE */
GATE(CLK_GOUT_CORE_BUS, "gout_core_bus", "mout_core_bus",
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
GATE(CLK_GOUT_CORE_CCI, "gout_core_cci", "mout_core_cci",
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, 21, 0, 0),
GATE(CLK_GOUT_CORE_MMC_EMBD, "gout_core_mmc_embd", "mout_core_mmc_embd",
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD, 21, 0, 0),
GATE(CLK_GOUT_CORE_SSS, "gout_core_sss", "mout_core_sss",
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, 21, 0, 0),
/* APM */
GATE(CLK_GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus",
"mout_clkcmu_apm_bus", CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, 0, 0),
/* AUD */
GATE(CLK_GOUT_AUD, "gout_aud", "mout_aud",
CLK_CON_GAT_GATE_CLKCMU_AUD, 21, 0, 0),
/* CPUCL0 */
GATE(CLK_GOUT_CPUCL0_DBG, "gout_cpucl0_dbg", "mout_cpucl0_dbg",
CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG, 21, 0, 0),
GATE(CLK_GOUT_CPUCL0_SWITCH, "gout_cpucl0_switch", "mout_cpucl0_switch",
CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, 21, 0, 0),
/* CPUCL1 */
GATE(CLK_GOUT_CPUCL1_DBG, "gout_cpucl1_dbg", "mout_cpucl1_dbg",
CLK_CON_GAT_GATE_CLKCMU_CPUCL1_DBG, 21, 0, 0),
GATE(CLK_GOUT_CPUCL1_SWITCH, "gout_cpucl1_switch", "mout_cpucl1_switch",
CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, 21, 0, 0),
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
/* G3D */
GATE(CLK_GOUT_G3D_SWITCH, "gout_g3d_switch", "mout_g3d_switch",
CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, 21, 0, 0),
/* HSI */
GATE(CLK_GOUT_HSI_BUS, "gout_hsi_bus", "mout_hsi_bus",
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS, 21, 0, 0),
GATE(CLK_GOUT_HSI_MMC_CARD, "gout_hsi_mmc_card", "mout_hsi_mmc_card",
CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD, 21, 0, 0),
GATE(CLK_GOUT_HSI_USB20DRD, "gout_hsi_usb20drd", "mout_hsi_usb20drd",
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD, 21, 0, 0),
/* IS */
/* TODO: These clocks have to be always enabled to access CMU_IS regs */
GATE(CLK_GOUT_IS_BUS, "gout_is_bus", "mout_is_bus",
CLK_CON_GAT_GATE_CLKCMU_IS_BUS, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_IS_ITP, "gout_is_itp", "mout_is_itp",
CLK_CON_GAT_GATE_CLKCMU_IS_ITP, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_IS_VRA, "gout_is_vra", "mout_is_vra",
CLK_CON_GAT_GATE_CLKCMU_IS_VRA, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_IS_GDC, "gout_is_gdc", "mout_is_gdc",
CLK_CON_GAT_GATE_CLKCMU_IS_GDC, 21, CLK_IS_CRITICAL, 0),
/* MFCMSCL */
/* TODO: These have to be always enabled to access CMU_MFCMSCL regs */
GATE(CLK_GOUT_MFCMSCL_MFC, "gout_mfcmscl_mfc", "mout_mfcmscl_mfc",
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MFCMSCL_M2M, "gout_mfcmscl_m2m", "mout_mfcmscl_m2m",
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MFCMSCL_MCSC, "gout_mfcmscl_mcsc", "mout_mfcmscl_mcsc",
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MFCMSCL_JPEG, "gout_mfcmscl_jpeg", "mout_mfcmscl_jpeg",
CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG, 21, CLK_IS_CRITICAL, 0),
/* PERI */
GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
GATE(CLK_GOUT_PERI_UART, "gout_peri_uart", "mout_peri_uart",
CLK_CON_GAT_GATE_CLKCMU_PERI_UART, 21, 0, 0),
GATE(CLK_GOUT_PERI_IP, "gout_peri_ip", "mout_peri_ip",
CLK_CON_GAT_GATE_CLKCMU_PERI_IP, 21, 0, 0),
};
static const struct samsung_cmu_info top_cmu_info __initconst = {
.pll_clks = top_pll_clks,
.nr_pll_clks = ARRAY_SIZE(top_pll_clks),
.mux_clks = top_mux_clks,
.nr_mux_clks = ARRAY_SIZE(top_mux_clks),
.div_clks = top_div_clks,
.nr_div_clks = ARRAY_SIZE(top_div_clks),
.gate_clks = top_gate_clks,
.nr_gate_clks = ARRAY_SIZE(top_gate_clks),
.nr_clk_ids = CLKS_NR_TOP,
.clk_regs = top_clk_regs,
.nr_clk_regs = ARRAY_SIZE(top_clk_regs),
};
static void __init exynos850_cmu_top_init(struct device_node *np)
{
exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
}
/* Register CMU_TOP early, as it's a dependency for other early domains */
CLK_OF_DECLARE(exynos850_cmu_top, "samsung,exynos850-cmu-top",
exynos850_cmu_top_init);
/* ---- CMU_APM ------------------------------------------------------------- */
/* Register Offset definitions for CMU_APM (0x11800000) */
#define PLL_CON0_MUX_CLKCMU_APM_BUS_USER 0x0600
#define PLL_CON0_MUX_CLK_RCO_APM_I3C_USER 0x0610
#define PLL_CON0_MUX_CLK_RCO_APM_USER 0x0620
#define PLL_CON0_MUX_DLL_USER 0x0630
#define CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS 0x1000
#define CLK_CON_MUX_MUX_CLK_APM_BUS 0x1004
#define CLK_CON_MUX_MUX_CLK_APM_I3C 0x1008
#define CLK_CON_DIV_CLKCMU_CHUB_BUS 0x1800
#define CLK_CON_DIV_DIV_CLK_APM_BUS 0x1804
#define CLK_CON_DIV_DIV_CLK_APM_I3C 0x1808
#define CLK_CON_GAT_CLKCMU_CMGP_BUS 0x2000
#define CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS 0x2014
#define CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 0x2018
#define CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK 0x2020
#define CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK 0x2024
#define CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK 0x2028
#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK 0x2034
#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK 0x2038
#define CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK 0x20bc
#define CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK 0x20c0
static const unsigned long apm_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_APM_BUS_USER,
PLL_CON0_MUX_CLK_RCO_APM_I3C_USER,
PLL_CON0_MUX_CLK_RCO_APM_USER,
PLL_CON0_MUX_DLL_USER,
CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS,
CLK_CON_MUX_MUX_CLK_APM_BUS,
CLK_CON_MUX_MUX_CLK_APM_I3C,
CLK_CON_DIV_CLKCMU_CHUB_BUS,
CLK_CON_DIV_DIV_CLK_APM_BUS,
CLK_CON_DIV_DIV_CLK_APM_I3C,
CLK_CON_GAT_CLKCMU_CMGP_BUS,
CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS,
CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK,
CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK,
CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK,
CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK,
CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK,
CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK,
CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK,
CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK,
};
/* List of parent clocks for Muxes in CMU_APM */
PNAME(mout_apm_bus_user_p) = { "oscclk_rco_apm", "dout_clkcmu_apm_bus" };
PNAME(mout_rco_apm_i3c_user_p) = { "oscclk_rco_apm", "clk_rco_i3c_pmic" };
PNAME(mout_rco_apm_user_p) = { "oscclk_rco_apm", "clk_rco_apm__alv" };
PNAME(mout_dll_user_p) = { "oscclk_rco_apm", "clk_dll_dco" };
PNAME(mout_clkcmu_chub_bus_p) = { "mout_apm_bus_user", "mout_dll_user" };
PNAME(mout_apm_bus_p) = { "mout_rco_apm_user", "mout_apm_bus_user",
"mout_dll_user", "oscclk_rco_apm" };
PNAME(mout_apm_i3c_p) = { "dout_apm_i3c", "mout_rco_apm_i3c_user" };
static const struct samsung_fixed_rate_clock apm_fixed_clks[] __initconst = {
FRATE(CLK_RCO_I3C_PMIC, "clk_rco_i3c_pmic", NULL, 0, 491520000),
FRATE(OSCCLK_RCO_APM, "oscclk_rco_apm", NULL, 0, 24576000),
FRATE(CLK_RCO_APM__ALV, "clk_rco_apm__alv", NULL, 0, 49152000),
FRATE(CLK_DLL_DCO, "clk_dll_dco", NULL, 0, 360000000),
};
static const struct samsung_mux_clock apm_mux_clks[] __initconst = {
MUX(CLK_MOUT_APM_BUS_USER, "mout_apm_bus_user", mout_apm_bus_user_p,
PLL_CON0_MUX_CLKCMU_APM_BUS_USER, 4, 1),
MUX(CLK_MOUT_RCO_APM_I3C_USER, "mout_rco_apm_i3c_user",
mout_rco_apm_i3c_user_p, PLL_CON0_MUX_CLK_RCO_APM_I3C_USER, 4, 1),
MUX(CLK_MOUT_RCO_APM_USER, "mout_rco_apm_user", mout_rco_apm_user_p,
PLL_CON0_MUX_CLK_RCO_APM_USER, 4, 1),
MUX(CLK_MOUT_DLL_USER, "mout_dll_user", mout_dll_user_p,
PLL_CON0_MUX_DLL_USER, 4, 1),
MUX(CLK_MOUT_CLKCMU_CHUB_BUS, "mout_clkcmu_chub_bus",
mout_clkcmu_chub_bus_p, CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS, 0, 1),
MUX(CLK_MOUT_APM_BUS, "mout_apm_bus", mout_apm_bus_p,
CLK_CON_MUX_MUX_CLK_APM_BUS, 0, 2),
MUX(CLK_MOUT_APM_I3C, "mout_apm_i3c", mout_apm_i3c_p,
CLK_CON_MUX_MUX_CLK_APM_I3C, 0, 1),
};
static const struct samsung_div_clock apm_div_clks[] __initconst = {
DIV(CLK_DOUT_CLKCMU_CHUB_BUS, "dout_clkcmu_chub_bus",
"gout_clkcmu_chub_bus",
CLK_CON_DIV_CLKCMU_CHUB_BUS, 0, 3),
DIV(CLK_DOUT_APM_BUS, "dout_apm_bus", "mout_apm_bus",
CLK_CON_DIV_DIV_CLK_APM_BUS, 0, 3),
DIV(CLK_DOUT_APM_I3C, "dout_apm_i3c", "mout_apm_bus",
CLK_CON_DIV_DIV_CLK_APM_I3C, 0, 3),
};
static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
"mout_clkcmu_chub_bus",
CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
GATE(CLK_GOUT_RTC_PCLK, "gout_rtc_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK, 21, 0, 0),
GATE(CLK_GOUT_TOP_RTC_PCLK, "gout_top_rtc_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I3C_PCLK, "gout_i3c_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I3C_SCLK, "gout_i3c_sclk", "mout_apm_i3c",
CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK, 21, 0, 0),
GATE(CLK_GOUT_SPEEDY_PCLK, "gout_speedy_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK, 21, 0, 0),
/* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_ALIVE_PCLK, "gout_gpio_alive_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK, 21, CLK_IGNORE_UNUSED,
0),
GATE(CLK_GOUT_PMU_ALIVE_PCLK, "gout_pmu_alive_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_SYSREG_APM_PCLK, "gout_sysreg_apm_pclk", "dout_apm_bus",
CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info apm_cmu_info __initconst = {
.mux_clks = apm_mux_clks,
.nr_mux_clks = ARRAY_SIZE(apm_mux_clks),
.div_clks = apm_div_clks,
.nr_div_clks = ARRAY_SIZE(apm_div_clks),
.gate_clks = apm_gate_clks,
.nr_gate_clks = ARRAY_SIZE(apm_gate_clks),
.fixed_clks = apm_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(apm_fixed_clks),
.nr_clk_ids = CLKS_NR_APM,
.clk_regs = apm_clk_regs,
.nr_clk_regs = ARRAY_SIZE(apm_clk_regs),
.clk_name = "dout_clkcmu_apm_bus",
};
/* ---- CMU_AUD ------------------------------------------------------------- */
#define PLL_LOCKTIME_PLL_AUD 0x0000
#define PLL_CON0_PLL_AUD 0x0100
#define PLL_CON3_PLL_AUD 0x010c
#define PLL_CON0_MUX_CLKCMU_AUD_CPU_USER 0x0600
#define PLL_CON0_MUX_TICK_USB_USER 0x0610
#define CLK_CON_MUX_MUX_CLK_AUD_CPU 0x1000
#define CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH 0x1004
#define CLK_CON_MUX_MUX_CLK_AUD_FM 0x1008
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF0 0x100c
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF1 0x1010
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF2 0x1014
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF3 0x1018
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF4 0x101c
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF5 0x1020
#define CLK_CON_MUX_MUX_CLK_AUD_UAIF6 0x1024
#define CLK_CON_DIV_DIV_CLK_AUD_MCLK 0x1800
#define CLK_CON_DIV_DIV_CLK_AUD_AUDIF 0x1804
#define CLK_CON_DIV_DIV_CLK_AUD_BUSD 0x1808
#define CLK_CON_DIV_DIV_CLK_AUD_BUSP 0x180c
#define CLK_CON_DIV_DIV_CLK_AUD_CNT 0x1810
#define CLK_CON_DIV_DIV_CLK_AUD_CPU 0x1814
#define CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK 0x1818
#define CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG 0x181c
#define CLK_CON_DIV_DIV_CLK_AUD_FM 0x1820
#define CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY 0x1824
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF0 0x1828
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF1 0x182c
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF2 0x1830
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF3 0x1834
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF4 0x1838
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF5 0x183c
#define CLK_CON_DIV_DIV_CLK_AUD_UAIF6 0x1840
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT 0x2000
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0 0x2004
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1 0x2008
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2 0x200c
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3 0x2010
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4 0x2014
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5 0x2018
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6 0x201c
#define CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK 0x2020
#define CLK_CON_GAT_GOUT_AUD_ABOX_ACLK 0x2048
#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY 0x204c
#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB 0x2050
#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32 0x2054
#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP 0x2058
#define CLK_CON_GAT_GOUT_AUD_CODEC_MCLK 0x206c
#define CLK_CON_GAT_GOUT_AUD_TZPC_PCLK 0x2070
#define CLK_CON_GAT_GOUT_AUD_GPIO_PCLK 0x2074
#define CLK_CON_GAT_GOUT_AUD_PPMU_ACLK 0x2088
#define CLK_CON_GAT_GOUT_AUD_PPMU_PCLK 0x208c
#define CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1 0x20b4
#define CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK 0x20b8
#define CLK_CON_GAT_GOUT_AUD_WDT_PCLK 0x20bc
static const unsigned long aud_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_AUD,
PLL_CON0_PLL_AUD,
PLL_CON3_PLL_AUD,
PLL_CON0_MUX_CLKCMU_AUD_CPU_USER,
PLL_CON0_MUX_TICK_USB_USER,
CLK_CON_MUX_MUX_CLK_AUD_CPU,
CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH,
CLK_CON_MUX_MUX_CLK_AUD_FM,
CLK_CON_MUX_MUX_CLK_AUD_UAIF0,
CLK_CON_MUX_MUX_CLK_AUD_UAIF1,
CLK_CON_MUX_MUX_CLK_AUD_UAIF2,
CLK_CON_MUX_MUX_CLK_AUD_UAIF3,
CLK_CON_MUX_MUX_CLK_AUD_UAIF4,
CLK_CON_MUX_MUX_CLK_AUD_UAIF5,
CLK_CON_MUX_MUX_CLK_AUD_UAIF6,
CLK_CON_DIV_DIV_CLK_AUD_MCLK,
CLK_CON_DIV_DIV_CLK_AUD_AUDIF,
CLK_CON_DIV_DIV_CLK_AUD_BUSD,
CLK_CON_DIV_DIV_CLK_AUD_BUSP,
CLK_CON_DIV_DIV_CLK_AUD_CNT,
CLK_CON_DIV_DIV_CLK_AUD_CPU,
CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK,
CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG,
CLK_CON_DIV_DIV_CLK_AUD_FM,
CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY,
CLK_CON_DIV_DIV_CLK_AUD_UAIF0,
CLK_CON_DIV_DIV_CLK_AUD_UAIF1,
CLK_CON_DIV_DIV_CLK_AUD_UAIF2,
CLK_CON_DIV_DIV_CLK_AUD_UAIF3,
CLK_CON_DIV_DIV_CLK_AUD_UAIF4,
CLK_CON_DIV_DIV_CLK_AUD_UAIF5,
CLK_CON_DIV_DIV_CLK_AUD_UAIF6,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6,
CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK,
CLK_CON_GAT_GOUT_AUD_ABOX_ACLK,
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY,
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB,
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32,
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP,
CLK_CON_GAT_GOUT_AUD_CODEC_MCLK,
CLK_CON_GAT_GOUT_AUD_TZPC_PCLK,
CLK_CON_GAT_GOUT_AUD_GPIO_PCLK,
CLK_CON_GAT_GOUT_AUD_PPMU_ACLK,
CLK_CON_GAT_GOUT_AUD_PPMU_PCLK,
CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1,
CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK,
CLK_CON_GAT_GOUT_AUD_WDT_PCLK,
};
/* List of parent clocks for Muxes in CMU_AUD */
PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll" };
PNAME(mout_aud_cpu_user_p) = { "oscclk", "dout_aud" };
PNAME(mout_aud_cpu_p) = { "dout_aud_cpu", "mout_aud_cpu_user" };
PNAME(mout_aud_cpu_hch_p) = { "mout_aud_cpu", "oscclk" };
PNAME(mout_aud_uaif0_p) = { "dout_aud_uaif0", "ioclk_audiocdclk0" };
PNAME(mout_aud_uaif1_p) = { "dout_aud_uaif1", "ioclk_audiocdclk1" };
PNAME(mout_aud_uaif2_p) = { "dout_aud_uaif2", "ioclk_audiocdclk2" };
PNAME(mout_aud_uaif3_p) = { "dout_aud_uaif3", "ioclk_audiocdclk3" };
PNAME(mout_aud_uaif4_p) = { "dout_aud_uaif4", "ioclk_audiocdclk4" };
PNAME(mout_aud_uaif5_p) = { "dout_aud_uaif5", "ioclk_audiocdclk5" };
PNAME(mout_aud_uaif6_p) = { "dout_aud_uaif6", "ioclk_audiocdclk6" };
PNAME(mout_aud_tick_usb_user_p) = { "oscclk", "tick_usb" };
PNAME(mout_aud_fm_p) = { "oscclk", "dout_aud_fm_spdy" };
/*
* Do not provide PLL table to PLL_AUD, as MANUAL_PLL_CTRL bit is not set
* for that PLL by default, so set_rate operation would fail.
*/
static const struct samsung_pll_clock aud_pll_clks[] __initconst = {
PLL(pll_0831x, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk",
PLL_LOCKTIME_PLL_AUD, PLL_CON3_PLL_AUD, NULL),
};
static const struct samsung_fixed_rate_clock aud_fixed_clks[] __initconst = {
FRATE(IOCLK_AUDIOCDCLK0, "ioclk_audiocdclk0", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK1, "ioclk_audiocdclk1", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK2, "ioclk_audiocdclk2", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK3, "ioclk_audiocdclk3", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK4, "ioclk_audiocdclk4", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK5, "ioclk_audiocdclk5", NULL, 0, 25000000),
FRATE(IOCLK_AUDIOCDCLK6, "ioclk_audiocdclk6", NULL, 0, 25000000),
FRATE(TICK_USB, "tick_usb", NULL, 0, 60000000),
};
static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p,
PLL_CON0_PLL_AUD, 4, 1),
MUX(CLK_MOUT_AUD_CPU_USER, "mout_aud_cpu_user", mout_aud_cpu_user_p,
PLL_CON0_MUX_CLKCMU_AUD_CPU_USER, 4, 1),
MUX(CLK_MOUT_AUD_TICK_USB_USER, "mout_aud_tick_usb_user",
mout_aud_tick_usb_user_p,
PLL_CON0_MUX_TICK_USB_USER, 4, 1),
MUX(CLK_MOUT_AUD_CPU, "mout_aud_cpu", mout_aud_cpu_p,
CLK_CON_MUX_MUX_CLK_AUD_CPU, 0, 1),
MUX(CLK_MOUT_AUD_CPU_HCH, "mout_aud_cpu_hch", mout_aud_cpu_hch_p,
CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH, 0, 1),
MUX(CLK_MOUT_AUD_UAIF0, "mout_aud_uaif0", mout_aud_uaif0_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF0, 0, 1),
MUX(CLK_MOUT_AUD_UAIF1, "mout_aud_uaif1", mout_aud_uaif1_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF1, 0, 1),
MUX(CLK_MOUT_AUD_UAIF2, "mout_aud_uaif2", mout_aud_uaif2_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF2, 0, 1),
MUX(CLK_MOUT_AUD_UAIF3, "mout_aud_uaif3", mout_aud_uaif3_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF3, 0, 1),
MUX(CLK_MOUT_AUD_UAIF4, "mout_aud_uaif4", mout_aud_uaif4_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF4, 0, 1),
MUX(CLK_MOUT_AUD_UAIF5, "mout_aud_uaif5", mout_aud_uaif5_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF5, 0, 1),
MUX(CLK_MOUT_AUD_UAIF6, "mout_aud_uaif6", mout_aud_uaif6_p,
CLK_CON_MUX_MUX_CLK_AUD_UAIF6, 0, 1),
MUX(CLK_MOUT_AUD_FM, "mout_aud_fm", mout_aud_fm_p,
CLK_CON_MUX_MUX_CLK_AUD_FM, 0, 1),
};
static const struct samsung_div_clock aud_div_clks[] __initconst = {
DIV(CLK_DOUT_AUD_CPU, "dout_aud_cpu", "mout_aud_pll",
CLK_CON_DIV_DIV_CLK_AUD_CPU, 0, 4),
DIV(CLK_DOUT_AUD_BUSD, "dout_aud_busd", "mout_aud_pll",
CLK_CON_DIV_DIV_CLK_AUD_BUSD, 0, 4),
DIV(CLK_DOUT_AUD_BUSP, "dout_aud_busp", "mout_aud_pll",
CLK_CON_DIV_DIV_CLK_AUD_BUSP, 0, 4),
DIV(CLK_DOUT_AUD_AUDIF, "dout_aud_audif", "mout_aud_pll",
CLK_CON_DIV_DIV_CLK_AUD_AUDIF, 0, 9),
DIV(CLK_DOUT_AUD_CPU_ACLK, "dout_aud_cpu_aclk", "mout_aud_cpu_hch",
CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK, 0, 3),
DIV(CLK_DOUT_AUD_CPU_PCLKDBG, "dout_aud_cpu_pclkdbg",
"mout_aud_cpu_hch",
CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG, 0, 3),
DIV(CLK_DOUT_AUD_MCLK, "dout_aud_mclk", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_MCLK, 0, 2),
DIV(CLK_DOUT_AUD_CNT, "dout_aud_cnt", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_CNT, 0, 10),
DIV(CLK_DOUT_AUD_UAIF0, "dout_aud_uaif0", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF0, 0, 10),
DIV(CLK_DOUT_AUD_UAIF1, "dout_aud_uaif1", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF1, 0, 10),
DIV(CLK_DOUT_AUD_UAIF2, "dout_aud_uaif2", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF2, 0, 10),
DIV(CLK_DOUT_AUD_UAIF3, "dout_aud_uaif3", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF3, 0, 10),
DIV(CLK_DOUT_AUD_UAIF4, "dout_aud_uaif4", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF4, 0, 10),
DIV(CLK_DOUT_AUD_UAIF5, "dout_aud_uaif5", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF5, 0, 10),
DIV(CLK_DOUT_AUD_UAIF6, "dout_aud_uaif6", "dout_aud_audif",
CLK_CON_DIV_DIV_CLK_AUD_UAIF6, 0, 10),
DIV(CLK_DOUT_AUD_FM_SPDY, "dout_aud_fm_spdy", "mout_aud_tick_usb_user",
CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY, 0, 1),
DIV(CLK_DOUT_AUD_FM, "dout_aud_fm", "mout_aud_fm",
CLK_CON_DIV_DIV_CLK_AUD_FM, 0, 10),
};
static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
GATE(CLK_GOUT_AUD_CMU_AUD_PCLK, "gout_aud_cmu_aud_pclk",
"dout_aud_busd",
CLK_CON_GAT_CLK_AUD_CMU_AUD_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_AUD_CA32_CCLK, "gout_aud_ca32_cclk", "mout_aud_cpu_hch",
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32, 21, 0, 0),
GATE(CLK_GOUT_AUD_ASB_CCLK, "gout_aud_asb_cclk", "dout_aud_cpu_aclk",
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB, 21, 0, 0),
GATE(CLK_GOUT_AUD_DAP_CCLK, "gout_aud_dap_cclk", "dout_aud_cpu_pclkdbg",
CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP, 21, 0, 0),
/* TODO: Should be enabled in ABOX driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_AUD_ABOX_ACLK, "gout_aud_abox_aclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_ABOX_ACLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_AUD_GPIO_PCLK, "gout_aud_gpio_pclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_GPIO_PCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_PPMU_ACLK, "gout_aud_ppmu_aclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_PPMU_ACLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_PPMU_PCLK, "gout_aud_ppmu_pclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_PPMU_PCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_SYSMMU_CLK, "gout_aud_sysmmu_clk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1, 21, 0, 0),
GATE(CLK_GOUT_AUD_SYSREG_PCLK, "gout_aud_sysreg_pclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_WDT_PCLK, "gout_aud_wdt_pclk", "dout_aud_busd",
CLK_CON_GAT_GOUT_AUD_WDT_PCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_TZPC_PCLK, "gout_aud_tzpc_pclk", "dout_aud_busp",
CLK_CON_GAT_GOUT_AUD_TZPC_PCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_CODEC_MCLK, "gout_aud_codec_mclk", "dout_aud_mclk",
CLK_CON_GAT_GOUT_AUD_CODEC_MCLK, 21, 0, 0),
GATE(CLK_GOUT_AUD_CNT_BCLK, "gout_aud_cnt_bclk", "dout_aud_cnt",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF0_BCLK, "gout_aud_uaif0_bclk", "mout_aud_uaif0",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF1_BCLK, "gout_aud_uaif1_bclk", "mout_aud_uaif1",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF2_BCLK, "gout_aud_uaif2_bclk", "mout_aud_uaif2",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF3_BCLK, "gout_aud_uaif3_bclk", "mout_aud_uaif3",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF4_BCLK, "gout_aud_uaif4_bclk", "mout_aud_uaif4",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF5_BCLK, "gout_aud_uaif5_bclk", "mout_aud_uaif5",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5, 21, 0, 0),
GATE(CLK_GOUT_AUD_UAIF6_BCLK, "gout_aud_uaif6_bclk", "mout_aud_uaif6",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6, 21, 0, 0),
GATE(CLK_GOUT_AUD_SPDY_BCLK, "gout_aud_spdy_bclk", "dout_aud_fm",
CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY, 21, 0, 0),
};
static const struct samsung_cmu_info aud_cmu_info __initconst = {
.pll_clks = aud_pll_clks,
.nr_pll_clks = ARRAY_SIZE(aud_pll_clks),
.mux_clks = aud_mux_clks,
.nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
.div_clks = aud_div_clks,
.nr_div_clks = ARRAY_SIZE(aud_div_clks),
.gate_clks = aud_gate_clks,
.nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
.fixed_clks = aud_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(aud_fixed_clks),
.nr_clk_ids = CLKS_NR_AUD,
.clk_regs = aud_clk_regs,
.nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
.clk_name = "dout_aud",
};
/* ---- CMU_CMGP ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CMGP (0x11c00000) */
#define CLK_CON_MUX_CLK_CMGP_ADC 0x1000
#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0 0x1004
#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1 0x1008
#define CLK_CON_DIV_DIV_CLK_CMGP_ADC 0x1800
#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0 0x1804
#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1 0x1808
#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0 0x200c
#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1 0x2010
#define CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK 0x2018
#define CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK 0x2040
#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK 0x2044
#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK 0x2048
#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK 0x204c
#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK 0x2050
static const unsigned long cmgp_clk_regs[] __initconst = {
CLK_CON_MUX_CLK_CMGP_ADC,
CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0,
CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1,
CLK_CON_DIV_DIV_CLK_CMGP_ADC,
CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0,
CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1,
CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0,
CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1,
CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK,
CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK,
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK,
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK,
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK,
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK,
};
/* List of parent clocks for Muxes in CMU_CMGP */
PNAME(mout_cmgp_usi0_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
PNAME(mout_cmgp_usi1_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
PNAME(mout_cmgp_adc_p) = { "oscclk", "dout_cmgp_adc" };
static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
FRATE(CLK_RCO_CMGP, "clk_rco_cmgp", NULL, 0, 49152000),
};
static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
GATE(CLK_GOUT_CMGP_ADC_S0_PCLK, "gout_adc_s0_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0, 21, 0, 0),
GATE(CLK_GOUT_CMGP_ADC_S1_PCLK, "gout_adc_s1_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1, 21, 0, 0),
/* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_CMGP_GPIO_PCLK, "gout_gpio_cmgp_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_CMGP_PCLK, "gout_sysreg_cmgp_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
.mux_clks = cmgp_mux_clks,
.nr_mux_clks = ARRAY_SIZE(cmgp_mux_clks),
.div_clks = cmgp_div_clks,
.nr_div_clks = ARRAY_SIZE(cmgp_div_clks),
.gate_clks = cmgp_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cmgp_gate_clks),
.fixed_clks = cmgp_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(cmgp_fixed_clks),
.nr_clk_ids = CLKS_NR_CMGP,
.clk_regs = cmgp_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cmgp_clk_regs),
.clk_name = "gout_clkcmu_cmgp_bus",
};
/* ---- CMU_CPUCL0 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_CPUCL0 (0x10900000) */
#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
#define PLL_CON0_PLL_CPUCL0 0x0100
#define PLL_CON1_PLL_CPUCL0 0x0104
#define PLL_CON3_PLL_CPUCL0 0x010c
#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0610
#define CLK_CON_MUX_MUX_CLK_CPUCL0_PLL 0x100c
#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1808
#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG 0x180c
#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
#define CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF 0x1814
#define CLK_CON_DIV_DIV_CLK_CPUCL0_CPU 0x1818
#define CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK 0x181c
#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK 0x2000
#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK 0x2004
#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK 0x2008
#define CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK 0x200c
#define CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK 0x2010
#define CLK_CON_GAT_GATE_CLK_CPUCL0_CPU 0x2020
static const unsigned long cpucl0_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_CPUCL0,
PLL_CON0_PLL_CPUCL0,
PLL_CON1_PLL_CPUCL0,
PLL_CON3_PLL_CPUCL0,
PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
CLK_CON_MUX_MUX_CLK_CPUCL0_PLL,
CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG,
CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF,
CLK_CON_DIV_DIV_CLK_CPUCL0_CPU,
CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK,
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK,
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK,
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK,
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK,
CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK,
CLK_CON_GAT_GATE_CLK_CPUCL0_CPU,
};
/* List of parent clocks for Muxes in CMU_CPUCL0 */
PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_cpucl0_switch" };
PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_cpucl0_dbg" };
PNAME(mout_cpucl0_pll_p) = { "mout_pll_cpucl0",
"mout_cpucl0_switch_user" };
static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
PLL_35XX_RATE(26 * MHZ, 2210000000U, 255, 3, 0),
PLL_35XX_RATE(26 * MHZ, 2106000000U, 243, 3, 0),
PLL_35XX_RATE(26 * MHZ, 2002000000U, 231, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1846000000U, 213, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1742000000U, 201, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1586000000U, 183, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1456000000U, 168, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1300000000U, 150, 3, 0),
PLL_35XX_RATE(26 * MHZ, 1157000000U, 267, 3, 1),
PLL_35XX_RATE(26 * MHZ, 1053000000U, 243, 3, 1),
PLL_35XX_RATE(26 * MHZ, 949000000U, 219, 3, 1),
PLL_35XX_RATE(26 * MHZ, 806000000U, 186, 3, 1),
PLL_35XX_RATE(26 * MHZ, 650000000U, 150, 3, 1),
PLL_35XX_RATE(26 * MHZ, 546000000U, 252, 3, 2),
PLL_35XX_RATE(26 * MHZ, 442000000U, 204, 3, 2),
PLL_35XX_RATE(26 * MHZ, 351000000U, 162, 3, 2),
PLL_35XX_RATE(26 * MHZ, 247000000U, 114, 3, 2),
PLL_35XX_RATE(26 * MHZ, 182000000U, 168, 3, 3),
PLL_35XX_RATE(26 * MHZ, 130000000U, 120, 3, 3),
};
static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
PLL(pll_0822x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
};
static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
MUX_F(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
PLL_CON0_PLL_CPUCL0, 4, 1,
CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX_F(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user",
mout_cpucl0_switch_user_p,
PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1,
CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user",
mout_cpucl0_dbg_user_p,
PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
MUX_F(CLK_MOUT_CPUCL0_PLL, "mout_cpucl0_pll", mout_cpucl0_pll_p,
CLK_CON_MUX_MUX_CLK_CPUCL0_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
DIV_F(CLK_DOUT_CPUCL0_CPU, "dout_cpucl0_cpu", "mout_cpucl0_pll",
CLK_CON_DIV_DIV_CLK_CPUCL0_CPU, 0, 1,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CPUCL0_CMUREF, "dout_cpucl0_cmuref", "dout_cpucl0_cpu",
CLK_CON_DIV_DIV_CLK_CPUCL0_CMUREF, 0, 3,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CPUCL0_PCLK, "dout_cpucl0_pclk", "dout_cpucl0_cpu",
CLK_CON_DIV_DIV_CLK_CPUCL0_PCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
/* EMBEDDED_CMU_CPUCL0 */
DIV_F(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk", "gout_cluster0_cpu",
CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
"gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER0_PCLKDBG, "dout_cluster0_pclkdbg",
"gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLKDBG, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
"gout_cluster0_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
};
static const struct samsung_gate_clock cpucl0_gate_clks[] __initconst = {
GATE(CLK_GOUT_CPUCL0_CMU_CPUCL0_PCLK, "gout_cpucl0_cmu_cpucl0_pclk",
"dout_cpucl0_pclk",
CLK_CON_GAT_CLK_CPUCL0_CMU_CPUCL0_PCLK, 21, CLK_IGNORE_UNUSED, 0),
/* EMBEDDED_CMU_CPUCL0 */
GATE(CLK_GOUT_CLUSTER0_CPU, "gout_cluster0_cpu", "dout_cpucl0_cpu",
CLK_CON_GAT_GATE_CLK_CPUCL0_CPU, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER0_SCLK, "gout_cluster0_sclk", "gout_cluster0_cpu",
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_SCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER0_ATCLK, "gout_cluster0_atclk",
"dout_cluster0_atclk",
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_ATCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER0_PERIPHCLK, "gout_cluster0_periphclk",
"dout_cluster0_periphclk",
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PERIPHCLK, 21,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER0_PCLK, "gout_cluster0_pclk",
"dout_cluster0_pclkdbg",
CLK_CON_GAT_CLK_CPUCL0_CLUSTER0_PCLK, 21, CLK_IGNORE_UNUSED, 0),
};
/*
* Each parameter is going to be written into the corresponding DIV register. So
* the actual divider value for each parameter will be 1/(param+1). All these
* parameters must be in the range of 0..15, as the divider range for all of
* these DIV clocks is 1..16. The default values for these dividers is
* (1, 3, 3, 1).
*/
#define E850_CPU_DIV0(aclk, atclk, pclkdbg, periphclk) \
(((aclk) << 16) | ((atclk) << 12) | ((pclkdbg) << 8) | \
((periphclk) << 4))
static const struct exynos_cpuclk_cfg_data exynos850_cluster_clk_d[] __initconst
= {
{ 2210000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 2106000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 2002000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1846000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1742000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1586000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1456000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1300000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1157000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 1053000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 949000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 806000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 650000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 546000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 442000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 351000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 247000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 182000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 130000, E850_CPU_DIV0(1, 3, 3, 1) },
{ 0 }
};
static const struct samsung_cpu_clock cpucl0_cpu_clks[] __initconst = {
CPU_CLK(CLK_CLUSTER0_SCLK, "cluster0_clk", CLK_MOUT_PLL_CPUCL0,
CLK_MOUT_CPUCL0_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL0,
exynos850_cluster_clk_d),
};
static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
.pll_clks = cpucl0_pll_clks,
.nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
.mux_clks = cpucl0_mux_clks,
.nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
.div_clks = cpucl0_div_clks,
.nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
.gate_clks = cpucl0_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cpucl0_gate_clks),
.cpu_clks = cpucl0_cpu_clks,
.nr_cpu_clks = ARRAY_SIZE(cpucl0_cpu_clks),
.nr_clk_ids = CLKS_NR_CPUCL0,
.clk_regs = cpucl0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
.clk_name = "dout_cpucl0_switch",
.manual_plls = true,
};
static void __init exynos850_cmu_cpucl0_init(struct device_node *np)
{
exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
}
/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
CLK_OF_DECLARE(exynos850_cmu_cpucl0, "samsung,exynos850-cmu-cpucl0",
exynos850_cmu_cpucl0_init);
/* ---- CMU_CPUCL1 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_CPUCL1 (0x10800000) */
#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
#define PLL_CON0_PLL_CPUCL1 0x0100
#define PLL_CON1_PLL_CPUCL1 0x0104
#define PLL_CON3_PLL_CPUCL1 0x010c
#define PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
#define CLK_CON_MUX_MUX_CLK_CPUCL1_PLL 0x1000
#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1808
#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG 0x180c
#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
#define CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF 0x1814
#define CLK_CON_DIV_DIV_CLK_CPUCL1_CPU 0x1818
#define CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK 0x181c
#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK 0x2000
#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK 0x2004
#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK 0x2008
#define CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK 0x200c
#define CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK 0x2010
#define CLK_CON_GAT_GATE_CLK_CPUCL1_CPU 0x2020
static const unsigned long cpucl1_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_CPUCL1,
PLL_CON0_PLL_CPUCL1,
PLL_CON1_PLL_CPUCL1,
PLL_CON3_PLL_CPUCL1,
PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER,
PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
CLK_CON_MUX_MUX_CLK_CPUCL1_PLL,
CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG,
CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF,
CLK_CON_DIV_DIV_CLK_CPUCL1_CPU,
CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK,
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK,
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK,
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK,
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK,
CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK,
CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
};
/* List of parent clocks for Muxes in CMU_CPUCL0 */
PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" };
PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" };
PNAME(mout_cpucl1_pll_p) = { "mout_pll_cpucl1",
"mout_cpucl1_switch_user" };
static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
PLL(pll_0822x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
};
static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
MUX_F(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
PLL_CON0_PLL_CPUCL1, 4, 1,
CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX_F(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user",
mout_cpucl1_switch_user_p,
PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1,
CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_CPUCL1_DBG_USER, "mout_cpucl1_dbg_user",
mout_cpucl1_dbg_user_p,
PLL_CON0_MUX_CLKCMU_CPUCL1_DBG_USER, 4, 1),
MUX_F(CLK_MOUT_CPUCL1_PLL, "mout_cpucl1_pll", mout_cpucl1_pll_p,
CLK_CON_MUX_MUX_CLK_CPUCL1_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
DIV_F(CLK_DOUT_CPUCL1_CPU, "dout_cpucl1_cpu", "mout_cpucl1_pll",
CLK_CON_DIV_DIV_CLK_CPUCL1_CPU, 0, 1,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CPUCL1_CMUREF, "dout_cpucl1_cmuref", "dout_cpucl1_cpu",
CLK_CON_DIV_DIV_CLK_CPUCL1_CMUREF, 0, 3,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CPUCL1_PCLK, "dout_cpucl1_pclk", "dout_cpucl1_cpu",
CLK_CON_DIV_DIV_CLK_CPUCL1_PCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
/* EMBEDDED_CMU_CPUCL1 */
DIV_F(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk", "gout_cluster1_cpu",
CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
"gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER1_PCLKDBG, "dout_cluster1_pclkdbg",
"gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLKDBG, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
DIV_F(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
"gout_cluster1_cpu", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4,
CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
};
static const struct samsung_gate_clock cpucl1_gate_clks[] __initconst = {
GATE(CLK_GOUT_CPUCL1_CMU_CPUCL1_PCLK, "gout_cpucl1_cmu_cpucl1_pclk",
"dout_cpucl1_pclk",
CLK_CON_GAT_CLK_CPUCL1_CMU_CPUCL1_PCLK, 21, CLK_IGNORE_UNUSED, 0),
/* EMBEDDED_CMU_CPUCL1 */
GATE(CLK_GOUT_CLUSTER1_CPU, "gout_cluster1_cpu", "dout_cpucl1_cpu",
CLK_CON_GAT_GATE_CLK_CPUCL1_CPU, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER1_SCLK, "gout_cluster1_sclk", "gout_cluster1_cpu",
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_SCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER1_ATCLK, "gout_cluster1_atclk",
"dout_cluster1_atclk",
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_ATCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER1_PERIPHCLK, "gout_cluster1_periphclk",
"dout_cluster1_periphclk",
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PERIPHCLK, 21,
CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CLUSTER1_PCLK, "gout_cluster1_pclk",
"dout_cluster1_pclkdbg",
CLK_CON_GAT_CLK_CPUCL1_CLUSTER1_PCLK, 21, CLK_IGNORE_UNUSED, 0),
};
static const struct samsung_cpu_clock cpucl1_cpu_clks[] __initconst = {
CPU_CLK(CLK_CLUSTER1_SCLK, "cluster1_clk", CLK_MOUT_PLL_CPUCL1,
CLK_MOUT_CPUCL1_SWITCH_USER, 0, 0x0, CPUCLK_LAYOUT_E850_CL1,
exynos850_cluster_clk_d),
};
static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
.pll_clks = cpucl1_pll_clks,
.nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
.mux_clks = cpucl1_mux_clks,
.nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
.div_clks = cpucl1_div_clks,
.nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
.gate_clks = cpucl1_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cpucl1_gate_clks),
.cpu_clks = cpucl1_cpu_clks,
.nr_cpu_clks = ARRAY_SIZE(cpucl1_cpu_clks),
.nr_clk_ids = CLKS_NR_CPUCL1,
.clk_regs = cpucl1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
.clk_name = "dout_cpucl1_switch",
.manual_plls = true,
};
static void __init exynos850_cmu_cpucl1_init(struct device_node *np)
{
exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
}
/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
CLK_OF_DECLARE(exynos850_cmu_cpucl1, "samsung,exynos850-cmu-cpucl1",
exynos850_cmu_cpucl1_init);
/* ---- CMU_G3D ------------------------------------------------------------- */
/* Register Offset definitions for CMU_G3D (0x11400000) */
#define PLL_LOCKTIME_PLL_G3D 0x0000
#define PLL_CON0_PLL_G3D 0x0100
#define PLL_CON3_PLL_G3D 0x010c
#define PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER 0x0600
#define CLK_CON_MUX_MUX_CLK_G3D_BUSD 0x1000
#define CLK_CON_DIV_DIV_CLK_G3D_BUSP 0x1804
#define CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK 0x2000
#define CLK_CON_GAT_CLK_G3D_GPU_CLK 0x2004
#define CLK_CON_GAT_GOUT_G3D_TZPC_PCLK 0x200c
#define CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK 0x2010
#define CLK_CON_GAT_GOUT_G3D_BUSD_CLK 0x2024
#define CLK_CON_GAT_GOUT_G3D_BUSP_CLK 0x2028
#define CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK 0x202c
static const unsigned long g3d_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_G3D,
PLL_CON0_PLL_G3D,
PLL_CON3_PLL_G3D,
PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER,
CLK_CON_MUX_MUX_CLK_G3D_BUSD,
CLK_CON_DIV_DIV_CLK_G3D_BUSP,
CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK,
CLK_CON_GAT_CLK_G3D_GPU_CLK,
CLK_CON_GAT_GOUT_G3D_TZPC_PCLK,
CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK,
CLK_CON_GAT_GOUT_G3D_BUSD_CLK,
CLK_CON_GAT_GOUT_G3D_BUSP_CLK,
CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK,
};
/* List of parent clocks for Muxes in CMU_G3D */
PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll" };
PNAME(mout_g3d_switch_user_p) = { "oscclk", "dout_g3d_switch" };
PNAME(mout_g3d_busd_p) = { "mout_g3d_pll", "mout_g3d_switch_user" };
/*
* Do not provide PLL table to PLL_G3D, as MANUAL_PLL_CTRL bit is not set
* for that PLL by default, so set_rate operation would fail.
*/
static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
PLL(pll_0818x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
};
static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
PLL_CON0_PLL_G3D, 4, 1),
MUX(CLK_MOUT_G3D_SWITCH_USER, "mout_g3d_switch_user",
mout_g3d_switch_user_p,
PLL_CON0_MUX_CLKCMU_G3D_SWITCH_USER, 4, 1),
MUX(CLK_MOUT_G3D_BUSD, "mout_g3d_busd", mout_g3d_busd_p,
CLK_CON_MUX_MUX_CLK_G3D_BUSD, 0, 1),
};
static const struct samsung_div_clock g3d_div_clks[] __initconst = {
DIV(CLK_DOUT_G3D_BUSP, "dout_g3d_busp", "mout_g3d_busd",
CLK_CON_DIV_DIV_CLK_G3D_BUSP, 0, 3),
};
static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
GATE(CLK_GOUT_G3D_CMU_G3D_PCLK, "gout_g3d_cmu_g3d_pclk",
"dout_g3d_busp",
CLK_CON_GAT_CLK_G3D_CMU_G3D_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_G3D_GPU_CLK, "gout_g3d_gpu_clk", "mout_g3d_busd",
CLK_CON_GAT_CLK_G3D_GPU_CLK, 21, 0, 0),
GATE(CLK_GOUT_G3D_TZPC_PCLK, "gout_g3d_tzpc_pclk", "dout_g3d_busp",
CLK_CON_GAT_GOUT_G3D_TZPC_PCLK, 21, 0, 0),
GATE(CLK_GOUT_G3D_GRAY2BIN_CLK, "gout_g3d_gray2bin_clk",
"mout_g3d_busd",
CLK_CON_GAT_GOUT_G3D_GRAY2BIN_CLK, 21, 0, 0),
GATE(CLK_GOUT_G3D_BUSD_CLK, "gout_g3d_busd_clk", "mout_g3d_busd",
CLK_CON_GAT_GOUT_G3D_BUSD_CLK, 21, 0, 0),
GATE(CLK_GOUT_G3D_BUSP_CLK, "gout_g3d_busp_clk", "dout_g3d_busp",
CLK_CON_GAT_GOUT_G3D_BUSP_CLK, 21, 0, 0),
GATE(CLK_GOUT_G3D_SYSREG_PCLK, "gout_g3d_sysreg_pclk", "dout_g3d_busp",
CLK_CON_GAT_GOUT_G3D_SYSREG_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info g3d_cmu_info __initconst = {
.pll_clks = g3d_pll_clks,
.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
.mux_clks = g3d_mux_clks,
.nr_mux_clks = ARRAY_SIZE(g3d_mux_clks),
.div_clks = g3d_div_clks,
.nr_div_clks = ARRAY_SIZE(g3d_div_clks),
.gate_clks = g3d_gate_clks,
.nr_gate_clks = ARRAY_SIZE(g3d_gate_clks),
.nr_clk_ids = CLKS_NR_G3D,
.clk_regs = g3d_clk_regs,
.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
.clk_name = "dout_g3d_switch",
};
/* ---- CMU_HSI ------------------------------------------------------------- */
/* Register Offset definitions for CMU_HSI (0x13400000) */
#define PLL_CON0_MUX_CLKCMU_HSI_BUS_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER 0x0620
#define CLK_CON_MUX_MUX_CLK_HSI_RTC 0x1000
#define CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK 0x2000
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV 0x2008
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50 0x200c
#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26 0x2010
#define CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK 0x2018
#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK 0x2024
#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN 0x2028
#define CLK_CON_GAT_GOUT_HSI_PPMU_ACLK 0x202c
#define CLK_CON_GAT_GOUT_HSI_PPMU_PCLK 0x2030
#define CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK 0x2038
#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20 0x203c
#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY 0x2040
static const unsigned long hsi_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_HSI_BUS_USER,
PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
CLK_CON_MUX_MUX_CLK_HSI_RTC,
CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50,
CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26,
CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK,
CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK,
CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN,
CLK_CON_GAT_GOUT_HSI_PPMU_ACLK,
CLK_CON_GAT_GOUT_HSI_PPMU_PCLK,
CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK,
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20,
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
};
/* List of parent clocks for Muxes in CMU_HSI */
PNAME(mout_hsi_bus_user_p) = { "oscclk", "dout_hsi_bus" };
PNAME(mout_hsi_mmc_card_user_p) = { "oscclk", "dout_hsi_mmc_card" };
PNAME(mout_hsi_usb20drd_user_p) = { "oscclk", "dout_hsi_usb20drd" };
PNAME(mout_hsi_rtc_p) = { "rtcclk", "oscclk" };
static const struct samsung_mux_clock hsi_mux_clks[] __initconst = {
MUX(CLK_MOUT_HSI_BUS_USER, "mout_hsi_bus_user", mout_hsi_bus_user_p,
PLL_CON0_MUX_CLKCMU_HSI_BUS_USER, 4, 1),
MUX_F(CLK_MOUT_HSI_MMC_CARD_USER, "mout_hsi_mmc_card_user",
mout_hsi_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
4, 1, CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_HSI_USB20DRD_USER, "mout_hsi_usb20drd_user",
mout_hsi_usb20drd_user_p, PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
4, 1),
MUX(CLK_MOUT_HSI_RTC, "mout_hsi_rtc", mout_hsi_rtc_p,
CLK_CON_MUX_MUX_CLK_HSI_RTC, 0, 1),
};
static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
/* TODO: Should be enabled in corresponding driver */
GATE(CLK_GOUT_HSI_CMU_HSI_PCLK, "gout_hsi_cmu_hsi_pclk",
"mout_hsi_bus_user",
CLK_CON_GAT_CLK_HSI_CMU_HSI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_USB_RTC_CLK, "gout_usb_rtc", "mout_hsi_rtc",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV, 21, 0, 0),
GATE(CLK_GOUT_USB_REF_CLK, "gout_usb_ref", "mout_hsi_usb20drd_user",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50, 21, 0, 0),
GATE(CLK_GOUT_USB_PHY_REF_CLK, "gout_usb_phy_ref", "oscclk",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26, 21, 0, 0),
/* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_HSI_PCLK, "gout_gpio_hsi_pclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
"mout_hsi_mmc_card_user",
CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_HSI_PPMU_ACLK, "gout_hsi_ppmu_aclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_PPMU_ACLK, 21, 0, 0),
GATE(CLK_GOUT_HSI_PPMU_PCLK, "gout_hsi_ppmu_pclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_PPMU_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_HSI_PCLK, "gout_sysreg_hsi_pclk",
"mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK, 21, 0, 0),
GATE(CLK_GOUT_USB_PHY_ACLK, "gout_usb_phy_aclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20, 21, 0, 0),
GATE(CLK_GOUT_USB_BUS_EARLY_CLK, "gout_usb_bus_early",
"mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY, 21, 0, 0),
};
static const struct samsung_cmu_info hsi_cmu_info __initconst = {
.mux_clks = hsi_mux_clks,
.nr_mux_clks = ARRAY_SIZE(hsi_mux_clks),
.gate_clks = hsi_gate_clks,
.nr_gate_clks = ARRAY_SIZE(hsi_gate_clks),
.nr_clk_ids = CLKS_NR_HSI,
.clk_regs = hsi_clk_regs,
.nr_clk_regs = ARRAY_SIZE(hsi_clk_regs),
.clk_name = "dout_hsi_bus",
};
/* ---- CMU_IS -------------------------------------------------------------- */
#define PLL_CON0_MUX_CLKCMU_IS_BUS_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_IS_GDC_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_IS_ITP_USER 0x0620
#define PLL_CON0_MUX_CLKCMU_IS_VRA_USER 0x0630
#define CLK_CON_DIV_DIV_CLK_IS_BUSP 0x1800
#define CLK_CON_GAT_CLK_IS_CMU_IS_PCLK 0x2000
#define CLK_CON_GAT_GOUT_IS_CSIS0_ACLK 0x2040
#define CLK_CON_GAT_GOUT_IS_CSIS1_ACLK 0x2044
#define CLK_CON_GAT_GOUT_IS_CSIS2_ACLK 0x2048
#define CLK_CON_GAT_GOUT_IS_TZPC_PCLK 0x204c
#define CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA 0x2050
#define CLK_CON_GAT_GOUT_IS_CLK_GDC 0x2054
#define CLK_CON_GAT_GOUT_IS_CLK_IPP 0x2058
#define CLK_CON_GAT_GOUT_IS_CLK_ITP 0x205c
#define CLK_CON_GAT_GOUT_IS_CLK_MCSC 0x2060
#define CLK_CON_GAT_GOUT_IS_CLK_VRA 0x2064
#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK 0x2074
#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK 0x2078
#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK 0x207c
#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK 0x2080
#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1 0x2098
#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1 0x209c
#define CLK_CON_GAT_GOUT_IS_SYSREG_PCLK 0x20a0
static const unsigned long is_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_IS_BUS_USER,
PLL_CON0_MUX_CLKCMU_IS_GDC_USER,
PLL_CON0_MUX_CLKCMU_IS_ITP_USER,
PLL_CON0_MUX_CLKCMU_IS_VRA_USER,
CLK_CON_DIV_DIV_CLK_IS_BUSP,
CLK_CON_GAT_CLK_IS_CMU_IS_PCLK,
CLK_CON_GAT_GOUT_IS_CSIS0_ACLK,
CLK_CON_GAT_GOUT_IS_CSIS1_ACLK,
CLK_CON_GAT_GOUT_IS_CSIS2_ACLK,
CLK_CON_GAT_GOUT_IS_TZPC_PCLK,
CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA,
CLK_CON_GAT_GOUT_IS_CLK_GDC,
CLK_CON_GAT_GOUT_IS_CLK_IPP,
CLK_CON_GAT_GOUT_IS_CLK_ITP,
CLK_CON_GAT_GOUT_IS_CLK_MCSC,
CLK_CON_GAT_GOUT_IS_CLK_VRA,
CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK,
CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK,
CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK,
CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK,
CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1,
CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1,
CLK_CON_GAT_GOUT_IS_SYSREG_PCLK,
};
/* List of parent clocks for Muxes in CMU_IS */
PNAME(mout_is_bus_user_p) = { "oscclk", "dout_is_bus" };
PNAME(mout_is_itp_user_p) = { "oscclk", "dout_is_itp" };
PNAME(mout_is_vra_user_p) = { "oscclk", "dout_is_vra" };
PNAME(mout_is_gdc_user_p) = { "oscclk", "dout_is_gdc" };
static const struct samsung_mux_clock is_mux_clks[] __initconst = {
MUX(CLK_MOUT_IS_BUS_USER, "mout_is_bus_user", mout_is_bus_user_p,
PLL_CON0_MUX_CLKCMU_IS_BUS_USER, 4, 1),
MUX(CLK_MOUT_IS_ITP_USER, "mout_is_itp_user", mout_is_itp_user_p,
PLL_CON0_MUX_CLKCMU_IS_ITP_USER, 4, 1),
MUX(CLK_MOUT_IS_VRA_USER, "mout_is_vra_user", mout_is_vra_user_p,
PLL_CON0_MUX_CLKCMU_IS_VRA_USER, 4, 1),
MUX(CLK_MOUT_IS_GDC_USER, "mout_is_gdc_user", mout_is_gdc_user_p,
PLL_CON0_MUX_CLKCMU_IS_GDC_USER, 4, 1),
};
static const struct samsung_div_clock is_div_clks[] __initconst = {
DIV(CLK_DOUT_IS_BUSP, "dout_is_busp", "mout_is_bus_user",
CLK_CON_DIV_DIV_CLK_IS_BUSP, 0, 2),
};
static const struct samsung_gate_clock is_gate_clks[] __initconst = {
/* TODO: Should be enabled in IS driver */
GATE(CLK_GOUT_IS_CMU_IS_PCLK, "gout_is_cmu_is_pclk", "dout_is_busp",
CLK_CON_GAT_CLK_IS_CMU_IS_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_IS_CSIS0_ACLK, "gout_is_csis0_aclk", "mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_CSIS0_ACLK, 21, 0, 0),
GATE(CLK_GOUT_IS_CSIS1_ACLK, "gout_is_csis1_aclk", "mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_CSIS1_ACLK, 21, 0, 0),
GATE(CLK_GOUT_IS_CSIS2_ACLK, "gout_is_csis2_aclk", "mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_CSIS2_ACLK, 21, 0, 0),
GATE(CLK_GOUT_IS_TZPC_PCLK, "gout_is_tzpc_pclk", "dout_is_busp",
CLK_CON_GAT_GOUT_IS_TZPC_PCLK, 21, 0, 0),
GATE(CLK_GOUT_IS_CSIS_DMA_CLK, "gout_is_csis_dma_clk",
"mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA, 21, 0, 0),
GATE(CLK_GOUT_IS_GDC_CLK, "gout_is_gdc_clk", "mout_is_gdc_user",
CLK_CON_GAT_GOUT_IS_CLK_GDC, 21, 0, 0),
GATE(CLK_GOUT_IS_IPP_CLK, "gout_is_ipp_clk", "mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_CLK_IPP, 21, 0, 0),
GATE(CLK_GOUT_IS_ITP_CLK, "gout_is_itp_clk", "mout_is_itp_user",
CLK_CON_GAT_GOUT_IS_CLK_ITP, 21, 0, 0),
GATE(CLK_GOUT_IS_MCSC_CLK, "gout_is_mcsc_clk", "mout_is_itp_user",
CLK_CON_GAT_GOUT_IS_CLK_MCSC, 21, 0, 0),
GATE(CLK_GOUT_IS_VRA_CLK, "gout_is_vra_clk", "mout_is_vra_user",
CLK_CON_GAT_GOUT_IS_CLK_VRA, 21, 0, 0),
GATE(CLK_GOUT_IS_PPMU_IS0_ACLK, "gout_is_ppmu_is0_aclk",
"mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK, 21, 0, 0),
GATE(CLK_GOUT_IS_PPMU_IS0_PCLK, "gout_is_ppmu_is0_pclk", "dout_is_busp",
CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_IS_PPMU_IS1_ACLK, "gout_is_ppmu_is1_aclk",
"mout_is_itp_user",
CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK, 21, 0, 0),
GATE(CLK_GOUT_IS_PPMU_IS1_PCLK, "gout_is_ppmu_is1_pclk", "dout_is_busp",
CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK, 21, 0, 0),
GATE(CLK_GOUT_IS_SYSMMU_IS0_CLK, "gout_is_sysmmu_is0_clk",
"mout_is_bus_user",
CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1, 21, 0, 0),
GATE(CLK_GOUT_IS_SYSMMU_IS1_CLK, "gout_is_sysmmu_is1_clk",
"mout_is_itp_user",
CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1, 21, 0, 0),
GATE(CLK_GOUT_IS_SYSREG_PCLK, "gout_is_sysreg_pclk", "dout_is_busp",
CLK_CON_GAT_GOUT_IS_SYSREG_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info is_cmu_info __initconst = {
.mux_clks = is_mux_clks,
.nr_mux_clks = ARRAY_SIZE(is_mux_clks),
.div_clks = is_div_clks,
.nr_div_clks = ARRAY_SIZE(is_div_clks),
.gate_clks = is_gate_clks,
.nr_gate_clks = ARRAY_SIZE(is_gate_clks),
.nr_clk_ids = CLKS_NR_IS,
.clk_regs = is_clk_regs,
.nr_clk_regs = ARRAY_SIZE(is_clk_regs),
.clk_name = "dout_is_bus",
};
/* ---- CMU_MFCMSCL --------------------------------------------------------- */
#define PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER 0x0620
#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER 0x0630
#define CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP 0x1800
#define CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK 0x2000
#define CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK 0x2038
#define CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK 0x203c
#define CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK 0x2048
#define CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK 0x204c
#define CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK 0x2050
#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK 0x2054
#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK 0x2058
#define CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1 0x2074
#define CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK 0x2078
static const unsigned long mfcmscl_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER,
PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER,
PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER,
PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER,
CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP,
CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
};
/* List of parent clocks for Muxes in CMU_MFCMSCL */
PNAME(mout_mfcmscl_mfc_user_p) = { "oscclk", "dout_mfcmscl_mfc" };
PNAME(mout_mfcmscl_m2m_user_p) = { "oscclk", "dout_mfcmscl_m2m" };
PNAME(mout_mfcmscl_mcsc_user_p) = { "oscclk", "dout_mfcmscl_mcsc" };
PNAME(mout_mfcmscl_jpeg_user_p) = { "oscclk", "dout_mfcmscl_jpeg" };
static const struct samsung_mux_clock mfcmscl_mux_clks[] __initconst = {
MUX(CLK_MOUT_MFCMSCL_MFC_USER, "mout_mfcmscl_mfc_user",
mout_mfcmscl_mfc_user_p,
PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER, 4, 1),
MUX(CLK_MOUT_MFCMSCL_M2M_USER, "mout_mfcmscl_m2m_user",
mout_mfcmscl_m2m_user_p,
PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER, 4, 1),
MUX(CLK_MOUT_MFCMSCL_MCSC_USER, "mout_mfcmscl_mcsc_user",
mout_mfcmscl_mcsc_user_p,
PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER, 4, 1),
MUX(CLK_MOUT_MFCMSCL_JPEG_USER, "mout_mfcmscl_jpeg_user",
mout_mfcmscl_jpeg_user_p,
PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER, 4, 1),
};
static const struct samsung_div_clock mfcmscl_div_clks[] __initconst = {
DIV(CLK_DOUT_MFCMSCL_BUSP, "dout_mfcmscl_busp", "mout_mfcmscl_mfc_user",
CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP, 0, 3),
};
static const struct samsung_gate_clock mfcmscl_gate_clks[] __initconst = {
/* TODO: Should be enabled in MFC driver */
GATE(CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK, "gout_mfcmscl_cmu_mfcmscl_pclk",
"dout_mfcmscl_busp", CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_MFCMSCL_TZPC_PCLK, "gout_mfcmscl_tzpc_pclk",
"dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_JPEG_ACLK, "gout_mfcmscl_jpeg_aclk",
"mout_mfcmscl_jpeg_user", CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_M2M_ACLK, "gout_mfcmscl_m2m_aclk",
"mout_mfcmscl_m2m_user", CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_MCSC_CLK, "gout_mfcmscl_mcsc_clk",
"mout_mfcmscl_mcsc_user", CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_MFC_ACLK, "gout_mfcmscl_mfc_aclk",
"mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_PPMU_ACLK, "gout_mfcmscl_ppmu_aclk",
"mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_PPMU_PCLK, "gout_mfcmscl_ppmu_pclk",
"dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_SYSMMU_CLK, "gout_mfcmscl_sysmmu_clk",
"mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
21, 0, 0),
GATE(CLK_GOUT_MFCMSCL_SYSREG_PCLK, "gout_mfcmscl_sysreg_pclk",
"dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
21, 0, 0),
};
static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
.mux_clks = mfcmscl_mux_clks,
.nr_mux_clks = ARRAY_SIZE(mfcmscl_mux_clks),
.div_clks = mfcmscl_div_clks,
.nr_div_clks = ARRAY_SIZE(mfcmscl_div_clks),
.gate_clks = mfcmscl_gate_clks,
.nr_gate_clks = ARRAY_SIZE(mfcmscl_gate_clks),
.nr_clk_ids = CLKS_NR_MFCMSCL,
.clk_regs = mfcmscl_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mfcmscl_clk_regs),
.clk_name = "dout_mfcmscl_mfc",
};
/* ---- CMU_PERI ------------------------------------------------------------ */
/* Register Offset definitions for CMU_PERI (0x10030000) */
#define PLL_CON0_MUX_CLKCMU_PERI_BUS_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_PERI_SPI_USER 0x0620
#define PLL_CON0_MUX_CLKCMU_PERI_UART_USER 0x0630
#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0 0x1800
#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1 0x1804
#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2 0x1808
#define CLK_CON_DIV_DIV_CLK_PERI_SPI_0 0x180c
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014
#define CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK 0x2018
#define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK 0x202c
#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK 0x2030
#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK 0x2034
#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK 0x2038
#define CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK 0x203c
#define CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK 0x2040
#define CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK 0x2044
#define CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK 0x2048
#define CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK 0x204c
#define CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK 0x2050
#define CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK 0x2054
#define CLK_CON_GAT_GOUT_PERI_MCT_PCLK 0x205c
#define CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK 0x2064
#define CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK 0x209c
#define CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK 0x20a0
#define CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK 0x20a4
#define CLK_CON_GAT_GOUT_PERI_UART_IPCLK 0x20a8
#define CLK_CON_GAT_GOUT_PERI_UART_PCLK 0x20ac
#define CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK 0x20b0
#define CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK 0x20b4
static const unsigned long peri_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_PERI_BUS_USER,
PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER,
PLL_CON0_MUX_CLKCMU_PERI_SPI_USER,
PLL_CON0_MUX_CLKCMU_PERI_UART_USER,
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0,
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1,
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2,
CLK_CON_DIV_DIV_CLK_PERI_SPI_0,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2,
CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK,
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK,
CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK,
CLK_CON_GAT_GOUT_PERI_MCT_PCLK,
CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK,
CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK,
CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK,
CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK,
CLK_CON_GAT_GOUT_PERI_UART_IPCLK,
CLK_CON_GAT_GOUT_PERI_UART_PCLK,
CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK,
CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK,
};
/* List of parent clocks for Muxes in CMU_PERI */
PNAME(mout_peri_bus_user_p) = { "oscclk", "dout_peri_bus" };
PNAME(mout_peri_uart_user_p) = { "oscclk", "dout_peri_uart" };
PNAME(mout_peri_hsi2c_user_p) = { "oscclk", "dout_peri_ip" };
PNAME(mout_peri_spi_user_p) = { "oscclk", "dout_peri_ip" };
static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
MUX(CLK_MOUT_PERI_BUS_USER, "mout_peri_bus_user", mout_peri_bus_user_p,
PLL_CON0_MUX_CLKCMU_PERI_BUS_USER, 4, 1),
MUX(CLK_MOUT_PERI_UART_USER, "mout_peri_uart_user",
mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock peri_div_clks[] __initconst = {
DIV(CLK_DOUT_PERI_HSI2C0, "dout_peri_hsi2c0", "gout_peri_hsi2c0",
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0, 0, 5),
DIV(CLK_DOUT_PERI_HSI2C1, "dout_peri_hsi2c1", "gout_peri_hsi2c1",
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERI_HSI2C0, "gout_peri_hsi2c0", "mout_peri_hsi2c_user",
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0, 21, 0, 0),
GATE(CLK_GOUT_PERI_HSI2C1, "gout_peri_hsi2c1", "mout_peri_hsi2c_user",
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1, 21, 0, 0),
GATE(CLK_GOUT_PERI_HSI2C2, "gout_peri_hsi2c2", "mout_peri_hsi2c_user",
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2, 21, 0, 0),
GATE(CLK_GOUT_HSI2C0_IPCLK, "gout_hsi2c0_ipclk", "dout_peri_hsi2c0",
CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK, 21, 0, 0),
GATE(CLK_GOUT_HSI2C0_PCLK, "gout_hsi2c0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_HSI2C1_IPCLK, "gout_hsi2c1_ipclk", "dout_peri_hsi2c1",
CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK, 21, 0, 0),
GATE(CLK_GOUT_HSI2C1_PCLK, "gout_hsi2c1_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK, 21, 0, 0),
GATE(CLK_GOUT_HSI2C2_IPCLK, "gout_hsi2c2_ipclk", "dout_peri_hsi2c2",
CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK, 21, 0, 0),
GATE(CLK_GOUT_HSI2C2_PCLK, "gout_hsi2c2_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C0_PCLK, "gout_i2c0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C1_PCLK, "gout_i2c1_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C2_PCLK, "gout_i2c2_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C3_PCLK, "gout_i2c3_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C4_PCLK, "gout_i2c4_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C5_PCLK, "gout_i2c5_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK, 21, 0, 0),
GATE(CLK_GOUT_I2C6_PCLK, "gout_i2c6_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK, 21, 0, 0),
GATE(CLK_GOUT_MCT_PCLK, "gout_mct_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_MCT_PCLK, 21, 0, 0),
GATE(CLK_GOUT_PWM_MOTOR_PCLK, "gout_pwm_motor_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK, 21, 0, 0),
GATE(CLK_GOUT_UART_IPCLK, "gout_uart_ipclk", "mout_peri_uart_user",
CLK_CON_GAT_GOUT_PERI_UART_IPCLK, 21, 0, 0),
GATE(CLK_GOUT_UART_PCLK, "gout_uart_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_UART_PCLK, 21, 0, 0),
GATE(CLK_GOUT_WDT0_PCLK, "gout_wdt0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK, 21, 0, 0),
/* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_BUSIF_TMU_PCLK, "gout_busif_tmu_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info peri_cmu_info __initconst = {
.mux_clks = peri_mux_clks,
.nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
.div_clks = peri_div_clks,
.nr_div_clks = ARRAY_SIZE(peri_div_clks),
.gate_clks = peri_gate_clks,
.nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
.nr_clk_ids = CLKS_NR_PERI,
.clk_regs = peri_clk_regs,
.nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
.clk_name = "dout_peri_bus",
};
static void __init exynos850_cmu_peri_init(struct device_node *np)
{
exynos_arm64_register_cmu(NULL, np, &peri_cmu_info);
}
/* Register CMU_PERI early, as it's needed for MCT timer */
CLK_OF_DECLARE(exynos850_cmu_peri, "samsung,exynos850-cmu-peri",
exynos850_cmu_peri_init);
/* ---- CMU_CORE ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CORE (0x12000000) */
#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0600
#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0610
#define PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER 0x0620
#define PLL_CON0_MUX_CLKCMU_CORE_SSS_USER 0x0630
#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2038
#define CLK_CON_GAT_GOUT_CORE_GIC_CLK 0x2040
#define CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK 0x2044
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
#define CLK_CON_GAT_GOUT_CORE_PDMA_ACLK 0x20f0
#define CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK 0x2124
#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
#define CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK 0x2130
static const unsigned long core_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
PLL_CON0_MUX_CLKCMU_CORE_CCI_USER,
PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
PLL_CON0_MUX_CLKCMU_CORE_SSS_USER,
CLK_CON_MUX_MUX_CLK_CORE_GIC,
CLK_CON_DIV_DIV_CLK_CORE_BUSP,
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
CLK_CON_GAT_GOUT_CORE_GIC_CLK,
CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
CLK_CON_GAT_GOUT_CORE_PDMA_ACLK,
CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK,
};
/* List of parent clocks for Muxes in CMU_CORE */
PNAME(mout_core_bus_user_p) = { "oscclk", "dout_core_bus" };
PNAME(mout_core_cci_user_p) = { "oscclk", "dout_core_cci" };
PNAME(mout_core_mmc_embd_user_p) = { "oscclk", "dout_core_mmc_embd" };
PNAME(mout_core_sss_user_p) = { "oscclk", "dout_core_sss" };
PNAME(mout_core_gic_p) = { "dout_core_busp", "oscclk" };
static const struct samsung_mux_clock core_mux_clks[] __initconst = {
MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
MUX(CLK_MOUT_CORE_CCI_USER, "mout_core_cci_user", mout_core_cci_user_p,
PLL_CON0_MUX_CLKCMU_CORE_CCI_USER, 4, 1),
MUX_F(CLK_MOUT_CORE_MMC_EMBD_USER, "mout_core_mmc_embd_user",
mout_core_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
4, 1, CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_CORE_SSS_USER, "mout_core_sss_user", mout_core_sss_user_p,
PLL_CON0_MUX_CLKCMU_CORE_SSS_USER, 4, 1),
MUX(CLK_MOUT_CORE_GIC, "mout_core_gic", mout_core_gic_p,
CLK_CON_MUX_MUX_CLK_CORE_GIC, 0, 1),
};
static const struct samsung_div_clock core_div_clks[] __initconst = {
DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 2),
};
static const struct samsung_gate_clock core_gate_clks[] __initconst = {
/* CCI (interconnect) clock must be always running */
GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, CLK_IS_CRITICAL, 0),
/* GIC (interrupt controller) clock must be always running */
GATE(CLK_GOUT_GIC_CLK, "gout_gic_clk", "mout_core_gic",
CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
"mout_core_mmc_embd_user", CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PDMA_CORE_ACLK, "gout_pdma_core_aclk",
"mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_PDMA_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SPDMA_CORE_ACLK, "gout_spdma_core_aclk",
"mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_SPDMA_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_ACLK, "gout_sss_aclk", "mout_core_sss_user",
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK, 21, 0, 0),
/* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_CORE_PCLK, "gout_gpio_core_pclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_SYSREG_CORE_PCLK, "gout_sysreg_core_pclk",
"dout_core_busp",
CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info core_cmu_info __initconst = {
.mux_clks = core_mux_clks,
.nr_mux_clks = ARRAY_SIZE(core_mux_clks),
.div_clks = core_div_clks,
.nr_div_clks = ARRAY_SIZE(core_div_clks),
.gate_clks = core_gate_clks,
.nr_gate_clks = ARRAY_SIZE(core_gate_clks),
.nr_clk_ids = CLKS_NR_CORE,
.clk_regs = core_clk_regs,
.nr_clk_regs = ARRAY_SIZE(core_clk_regs),
.clk_name = "dout_core_bus",
};
/* ---- CMU_DPU ------------------------------------------------------------- */
/* Register Offset definitions for CMU_DPU (0x13000000) */
#define PLL_CON0_MUX_CLKCMU_DPU_USER 0x0600
#define CLK_CON_DIV_DIV_CLK_DPU_BUSP 0x1800
#define CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK 0x2004
#define CLK_CON_GAT_GOUT_DPU_ACLK_DECON0 0x2010
#define CLK_CON_GAT_GOUT_DPU_ACLK_DMA 0x2014
#define CLK_CON_GAT_GOUT_DPU_ACLK_DPP 0x2018
#define CLK_CON_GAT_GOUT_DPU_PPMU_ACLK 0x2028
#define CLK_CON_GAT_GOUT_DPU_PPMU_PCLK 0x202c
#define CLK_CON_GAT_GOUT_DPU_SMMU_CLK 0x2038
#define CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK 0x203c
static const unsigned long dpu_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_DPU_USER,
CLK_CON_DIV_DIV_CLK_DPU_BUSP,
CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK,
CLK_CON_GAT_GOUT_DPU_ACLK_DECON0,
CLK_CON_GAT_GOUT_DPU_ACLK_DMA,
CLK_CON_GAT_GOUT_DPU_ACLK_DPP,
CLK_CON_GAT_GOUT_DPU_PPMU_ACLK,
CLK_CON_GAT_GOUT_DPU_PPMU_PCLK,
CLK_CON_GAT_GOUT_DPU_SMMU_CLK,
CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK,
};
/* List of parent clocks for Muxes in CMU_DPU */
PNAME(mout_dpu_user_p) = { "oscclk", "dout_dpu" };
static const struct samsung_mux_clock dpu_mux_clks[] __initconst = {
MUX(CLK_MOUT_DPU_USER, "mout_dpu_user", mout_dpu_user_p,
PLL_CON0_MUX_CLKCMU_DPU_USER, 4, 1),
};
static const struct samsung_div_clock dpu_div_clks[] __initconst = {
DIV(CLK_DOUT_DPU_BUSP, "dout_dpu_busp", "mout_dpu_user",
CLK_CON_DIV_DIV_CLK_DPU_BUSP, 0, 3),
};
static const struct samsung_gate_clock dpu_gate_clks[] __initconst = {
/* TODO: Should be enabled in DSIM driver */
GATE(CLK_GOUT_DPU_CMU_DPU_PCLK, "gout_dpu_cmu_dpu_pclk",
"dout_dpu_busp",
CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_DPU_DECON0_ACLK, "gout_dpu_decon0_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_ACLK_DECON0, 21, 0, 0),
GATE(CLK_GOUT_DPU_DMA_ACLK, "gout_dpu_dma_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_ACLK_DMA, 21, 0, 0),
GATE(CLK_GOUT_DPU_DPP_ACLK, "gout_dpu_dpp_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_ACLK_DPP, 21, 0, 0),
GATE(CLK_GOUT_DPU_PPMU_ACLK, "gout_dpu_ppmu_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_PPMU_ACLK, 21, 0, 0),
GATE(CLK_GOUT_DPU_PPMU_PCLK, "gout_dpu_ppmu_pclk", "dout_dpu_busp",
CLK_CON_GAT_GOUT_DPU_PPMU_PCLK, 21, 0, 0),
GATE(CLK_GOUT_DPU_SMMU_CLK, "gout_dpu_smmu_clk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_SMMU_CLK, 21, 0, 0),
GATE(CLK_GOUT_DPU_SYSREG_PCLK, "gout_dpu_sysreg_pclk", "dout_dpu_busp",
CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info dpu_cmu_info __initconst = {
.mux_clks = dpu_mux_clks,
.nr_mux_clks = ARRAY_SIZE(dpu_mux_clks),
.div_clks = dpu_div_clks,
.nr_div_clks = ARRAY_SIZE(dpu_div_clks),
.gate_clks = dpu_gate_clks,
.nr_gate_clks = ARRAY_SIZE(dpu_gate_clks),
.nr_clk_ids = CLKS_NR_DPU,
.clk_regs = dpu_clk_regs,
.nr_clk_regs = ARRAY_SIZE(dpu_clk_regs),
.clk_name = "dout_dpu",
};
/* ---- platform_driver ----------------------------------------------------- */
static int __init exynos850_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
struct device *dev = &pdev->dev;
info = of_device_get_match_data(dev);
exynos_arm64_register_cmu(dev, dev->of_node, info);
return 0;
}
static const struct of_device_id exynos850_cmu_of_match[] = {
{
.compatible = "samsung,exynos850-cmu-apm",
.data = &apm_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-aud",
.data = &aud_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-cmgp",
.data = &cmgp_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-g3d",
.data = &g3d_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-is",
.data = &is_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-mfcmscl",
.data = &mfcmscl_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-core",
.data = &core_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-dpu",
.data = &dpu_cmu_info,
}, {
},
};
static struct platform_driver exynos850_cmu_driver __refdata = {
.driver = {
.name = "exynos850-cmu",
.of_match_table = exynos850_cmu_of_match,
.suppress_bind_attrs = true,
},
.probe = exynos850_cmu_probe,
};
static int __init exynos850_cmu_init(void)
{
return platform_driver_register(&exynos850_cmu_driver);
}
core_initcall(exynos850_cmu_init);
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2018 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_STLB_MASKS_H_
#define ASIC_REG_STLB_MASKS_H_
/*
*****************************************
* STLB (Prototype: STLB)
*****************************************
*/
/* STLB_CACHE_INV */
#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
/* STLB_CACHE_INV_BASE_39_8 */
#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0
#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF
/* STLB_CACHE_INV_BASE_49_40 */
#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0
#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF
/* STLB_STLB_FEATURE_EN */
#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0
#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1
#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1
#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2
#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2
#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4
#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3
#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8
#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4
#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10
#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5
#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20
#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6
#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40
#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7
#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80
/* STLB_STLB_AXI_CACHE */
#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0
#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF
#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4
#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0
#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8
#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00
/* STLB_HOP_CONFIGURATION */
#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0
#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7
#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4
#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70
#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8
#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700
/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */
#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0
#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF
/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */
#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0
#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF
/* STLB_LINK_LIST */
#define STLB_LINK_LIST_CLEAR_SHIFT 0
#define STLB_LINK_LIST_CLEAR_MASK 0x1
#define STLB_LINK_LIST_EN_SHIFT 1
#define STLB_LINK_LIST_EN_MASK 0x2
/* STLB_INV_ALL_START */
#define STLB_INV_ALL_START_R_SHIFT 0
#define STLB_INV_ALL_START_R_MASK 0x1
/* STLB_INV_ALL_SET */
#define STLB_INV_ALL_SET_R_SHIFT 0
#define STLB_INV_ALL_SET_R_MASK 0xFF
/* STLB_INV_PS */
#define STLB_INV_PS_R_SHIFT 0
#define STLB_INV_PS_R_MASK 0x3
/* STLB_INV_CONSUMER_INDEX */
#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0
#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF
/* STLB_INV_HIT_COUNT */
#define STLB_INV_HIT_COUNT_R_SHIFT 0
#define STLB_INV_HIT_COUNT_R_MASK 0x7FF
/* STLB_INV_SET */
#define STLB_INV_SET_R_SHIFT 0
#define STLB_INV_SET_R_MASK 0xFF
/* STLB_SRAM_INIT */
#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0
#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3
#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2
#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC
#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4
#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
#endif /* ASIC_REG_STLB_MASKS_H_ */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Industrial I/O driver for Microchip digital potentiometers
* Copyright (c) 2018 Axentia Technologies AB
* Author: Peter Rosin <[email protected]>
*
* Datasheet: http://www.microchip.com/downloads/en/DeviceDoc/22147a.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm)
* mcp4017 1 128 5, 10, 50, 100
* mcp4018 1 128 5, 10, 50, 100
* mcp4019 1 128 5, 10, 50, 100
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#define MCP4018_WIPER_MAX 127
struct mcp4018_cfg {
int kohms;
};
enum mcp4018_type {
MCP4018_502,
MCP4018_103,
MCP4018_503,
MCP4018_104,
};
static const struct mcp4018_cfg mcp4018_cfg[] = {
[MCP4018_502] = { .kohms = 5, },
[MCP4018_103] = { .kohms = 10, },
[MCP4018_503] = { .kohms = 50, },
[MCP4018_104] = { .kohms = 100, },
};
struct mcp4018_data {
struct i2c_client *client;
const struct mcp4018_cfg *cfg;
};
static const struct iio_chan_spec mcp4018_channel = {
.type = IIO_RESISTANCE,
.indexed = 1,
.output = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
};
static int mcp4018_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4018_data *data = iio_priv(indio_dev);
s32 ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = i2c_smbus_read_byte(data->client);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = MCP4018_WIPER_MAX;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int mcp4018_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mcp4018_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > MCP4018_WIPER_MAX || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return i2c_smbus_write_byte(data->client, val);
}
static const struct iio_info mcp4018_info = {
.read_raw = mcp4018_read_raw,
.write_raw = mcp4018_write_raw,
};
#define MCP4018_ID_TABLE(_name, cfg) { \
.name = _name, \
.driver_data = (kernel_ulong_t)&mcp4018_cfg[cfg], \
}
static const struct i2c_device_id mcp4018_id[] = {
MCP4018_ID_TABLE("mcp4017-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4017-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4017-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4017-104", MCP4018_104),
MCP4018_ID_TABLE("mcp4018-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4018-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4018-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4018-104", MCP4018_104),
MCP4018_ID_TABLE("mcp4019-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4019-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4019-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4019-104", MCP4018_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mcp4018_id);
#define MCP4018_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &mcp4018_cfg[cfg], \
}
static const struct of_device_id mcp4018_of_match[] = {
MCP4018_COMPATIBLE("microchip,mcp4017-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4017-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4017-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4017-104", MCP4018_104),
MCP4018_COMPATIBLE("microchip,mcp4018-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4018-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4018-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4018-104", MCP4018_104),
MCP4018_COMPATIBLE("microchip,mcp4019-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4019-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4019-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4019-104", MCP4018_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mcp4018_of_match);
static int mcp4018_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mcp4018_data *data;
struct iio_dev *indio_dev;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE)) {
dev_err(dev, "SMBUS Byte transfers not supported\n");
return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
data->cfg = i2c_get_match_data(client);
indio_dev->info = &mcp4018_info;
indio_dev->channels = &mcp4018_channel;
indio_dev->num_channels = 1;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver mcp4018_driver = {
.driver = {
.name = "mcp4018",
.of_match_table = mcp4018_of_match,
},
.probe = mcp4018_probe,
.id_table = mcp4018_id,
};
module_i2c_driver(mcp4018_driver);
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_DESCRIPTION("MCP4018 digital potentiometer");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_OOO_H
#define _QED_OOO_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "qed.h"
#define QED_MAX_NUM_ISLES 256
#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512
#define QED_OOO_LEFT_BUF 0
#define QED_OOO_RIGHT_BUF 1
struct qed_ooo_buffer {
struct list_head list_entry;
void *rx_buffer_virt_addr;
dma_addr_t rx_buffer_phys_addr;
u32 rx_buffer_size;
u16 packet_length;
u16 parse_flags;
u16 vlan;
u8 placement_offset;
};
struct qed_ooo_isle {
struct list_head list_entry;
struct list_head buffers_list;
};
struct qed_ooo_archipelago {
struct list_head isles_list;
};
struct qed_ooo_history {
struct ooo_opaque *p_cqes;
u32 head_idx;
u32 num_of_cqes;
};
struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
u16 max_num_archipelagos;
u16 cid_base;
};
#if IS_ENABLED(CONFIG_QED_OOO)
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
void qed_ooo_setup(struct qed_hwfn *p_hwfn);
void qed_ooo_free(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid);
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer, u8 on_tail);
struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size);
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct qed_ooo_buffer *p_buffer, u8 buffer_side);
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid) {}
static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{}
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer,
u8 on_tail) {}
static inline struct qed_ooo_buffer *
qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info) { return NULL; }
static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size) {}
static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer) {}
static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 ooo_isle,
struct qed_ooo_buffer *p_buffer,
u8 buffer_side) {}
static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid,
u8 left_isle) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
*
* Note: This driver is a cleanroom reimplementation based on reverse
* engineered documentation written by Carl-Daniel Hailfinger
* and Andrew de Quincey.
*
* NVIDIA, nForce and other NVIDIA marks are trademarks or registered
* trademarks of NVIDIA Corporation in the United States and other
* countries.
*
* Copyright (C) 2003,4,5 Manfred Spraul
* Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification)
* Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
* This means recovery from netif_stop_queue only happens if the hw timer
* interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
* and the timer is active in the IRQMask, or if a rx packet arrives by chance.
* If your hardware reliably generates tx done interrupts, then you can remove
* DEV_NEED_TIMERIRQ from the driver_data flags.
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define FORCEDETH_VERSION "0.64"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/random.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
#include <linux/u64_stats_sync.h>
#include <linux/io.h>
#include <asm/irq.h>
#define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT
#define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT
/*
* Hardware access:
*/
#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
enum {
NvRegIrqStatus = 0x000,
#define NVREG_IRQSTAT_MIIEVENT 0x040
#define NVREG_IRQSTAT_MASK 0x83ff
NvRegIrqMask = 0x004,
#define NVREG_IRQ_RX_ERROR 0x0001
#define NVREG_IRQ_RX 0x0002
#define NVREG_IRQ_RX_NOBUF 0x0004
#define NVREG_IRQ_TX_ERR 0x0008
#define NVREG_IRQ_TX_OK 0x0010
#define NVREG_IRQ_TIMER 0x0020
#define NVREG_IRQ_LINK 0x0040
#define NVREG_IRQ_RX_FORCED 0x0080
#define NVREG_IRQ_TX_FORCED 0x0100
#define NVREG_IRQ_RECOVER_ERROR 0x8200
#define NVREG_IRQMASK_THROUGHPUT 0x00df
#define NVREG_IRQMASK_CPU 0x0060
#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
/*
* NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
* NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
*/
NvRegPollingInterval = 0x00c,
#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
#define NVREG_POLL_DEFAULT_CPU 13
NvRegMSIMap0 = 0x020,
NvRegMSIMap1 = 0x024,
NvRegMSIIrqMask = 0x030,
#define NVREG_MSI_VECTOR_0_ENABLED 0x01
NvRegMisc1 = 0x080,
#define NVREG_MISC1_PAUSE_TX 0x01
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
#define NVREG_XMITCTL_MGMT_ST 0x40000000
#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
#define NVREG_XMITCTL_HOST_LOADED 0x00004000
#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
#define NVREG_XMITCTL_DATA_START 0x00100000
#define NVREG_XMITCTL_DATA_READY 0x00010000
#define NVREG_XMITCTL_DATA_ERROR 0x00020000
NvRegTransmitterStatus = 0x088,
#define NVREG_XMITSTAT_BUSY 0x01
NvRegPacketFilterFlags = 0x8c,
#define NVREG_PFF_PAUSE_RX 0x08
#define NVREG_PFF_ALWAYS 0x7F0000
#define NVREG_PFF_PROMISC 0x80
#define NVREG_PFF_MYADDR 0x20
#define NVREG_PFF_LOOPBACK 0x10
NvRegOffloadConfig = 0x90,
#define NVREG_OFFLOAD_HOMEPHY 0x601
#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
NvRegReceiverControl = 0x094,
#define NVREG_RCVCTL_START 0x01
#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
NvRegReceiverStatus = 0x98,
#define NVREG_RCVSTAT_BUSY 0x01
NvRegSlotTime = 0x9c,
#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
#define NVREG_SLOTTIME_HALF 0x0000ff00
#define NVREG_SLOTTIME_DEFAULT 0x00007f00
#define NVREG_SLOTTIME_MASK 0x000000ff
NvRegTxDeferral = 0xA0,
#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
NvRegRxDeferral = 0xA4,
#define NVREG_RX_DEFERRAL_DEFAULT 0x16
NvRegMacAddrA = 0xA8,
NvRegMacAddrB = 0xAC,
NvRegMulticastAddrA = 0xB0,
#define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8,
#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC,
#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000
NvRegBackOffControl = 0xC4,
#define NVREG_BKOFFCTRL_DEFAULT 0x70000000
#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
#define NVREG_BKOFFCTRL_SELECT 24
#define NVREG_BKOFFCTRL_GEAR 12
NvRegTxRingPhysAddr = 0x100,
NvRegRxRingPhysAddr = 0x104,
NvRegRingSizes = 0x108,
#define NVREG_RINGSZ_TXSHIFT 0
#define NVREG_RINGSZ_RXSHIFT 16
NvRegTransmitPoll = 0x10c,
#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
NvRegLinkSpeed = 0x110,
#define NVREG_LINKSPEED_FORCE 0x10000
#define NVREG_LINKSPEED_10 1000
#define NVREG_LINKSPEED_100 100
#define NVREG_LINKSPEED_1000 50
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
NvRegTxWatermark = 0x13c,
#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
NvRegTxRxControl = 0x144,
#define NVREG_TXRXCTL_KICK 0x0001
#define NVREG_TXRXCTL_BIT1 0x0002
#define NVREG_TXRXCTL_BIT2 0x0004
#define NVREG_TXRXCTL_IDLE 0x0008
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
#define NVREG_TXRXCTL_DESC_1 0
#define NVREG_TXRXCTL_DESC_2 0x002100
#define NVREG_TXRXCTL_DESC_3 0xc02200
#define NVREG_TXRXCTL_VLANSTRIP 0x00040
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
NvRegTxPauseFrameLimit = 0x174,
#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
#define NVREG_MIISTAT_MASK_RW 0x0007
#define NVREG_MIISTAT_MASK_ALL 0x000f
NvRegMIIMask = 0x184,
#define NVREG_MII_LINKCHANGE 0x0008
NvRegAdapterControl = 0x188,
#define NVREG_ADAPTCTL_START 0x02
#define NVREG_ADAPTCTL_LINKUP 0x04
#define NVREG_ADAPTCTL_PHYVALID 0x40000
#define NVREG_ADAPTCTL_RUNNING 0x100000
#define NVREG_ADAPTCTL_PHYSHIFT 24
NvRegMIISpeed = 0x18c,
#define NVREG_MIISPEED_BIT8 (1<<8)
#define NVREG_MIIDELAY 5
NvRegMIIControl = 0x190,
#define NVREG_MIICTL_INUSE 0x08000
#define NVREG_MIICTL_WRITE 0x00400
#define NVREG_MIICTL_ADDRSHIFT 5
NvRegMIIData = 0x194,
NvRegTxUnicast = 0x1a0,
NvRegTxMulticast = 0x1a4,
NvRegTxBroadcast = 0x1a8,
NvRegWakeUpFlags = 0x200,
#define NVREG_WAKEUPFLAGS_VAL 0x7770
#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
#define NVREG_WAKEUPFLAGS_D3SHIFT 12
#define NVREG_WAKEUPFLAGS_D2SHIFT 8
#define NVREG_WAKEUPFLAGS_D1SHIFT 4
#define NVREG_WAKEUPFLAGS_D0SHIFT 0
#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegMgmtUnitGetVersion = 0x204,
#define NVREG_MGMTUNITGETVERSION 0x01
NvRegMgmtUnitVersion = 0x208,
#define NVREG_MGMTUNITVERSION 0x08
NvRegPowerCap = 0x268,
#define NVREG_POWERCAP_D3SUPP (1<<30)
#define NVREG_POWERCAP_D2SUPP (1<<26)
#define NVREG_POWERCAP_D1SUPP (1<<25)
NvRegPowerState = 0x26c,
#define NVREG_POWERSTATE_POWEREDUP 0x8000
#define NVREG_POWERSTATE_VALID 0x0100
#define NVREG_POWERSTATE_MASK 0x0003
#define NVREG_POWERSTATE_D0 0x0000
#define NVREG_POWERSTATE_D1 0x0001
#define NVREG_POWERSTATE_D2 0x0002
#define NVREG_POWERSTATE_D3 0x0003
NvRegMgmtUnitControl = 0x278,
#define NVREG_MGMTUNITCONTROL_INUSE 0x20000
NvRegTxCnt = 0x280,
NvRegTxZeroReXmt = 0x284,
NvRegTxOneReXmt = 0x288,
NvRegTxManyReXmt = 0x28c,
NvRegTxLateCol = 0x290,
NvRegTxUnderflow = 0x294,
NvRegTxLossCarrier = 0x298,
NvRegTxExcessDef = 0x29c,
NvRegTxRetryErr = 0x2a0,
NvRegRxFrameErr = 0x2a4,
NvRegRxExtraByte = 0x2a8,
NvRegRxLateCol = 0x2ac,
NvRegRxRunt = 0x2b0,
NvRegRxFrameTooLong = 0x2b4,
NvRegRxOverflow = 0x2b8,
NvRegRxFCSErr = 0x2bc,
NvRegRxFrameAlignErr = 0x2c0,
NvRegRxLenErr = 0x2c4,
NvRegRxUnicast = 0x2c8,
NvRegRxMulticast = 0x2cc,
NvRegRxBroadcast = 0x2d0,
NvRegTxDef = 0x2d4,
NvRegTxFrame = 0x2d8,
NvRegRxCnt = 0x2dc,
NvRegTxPause = 0x2e0,
NvRegRxPause = 0x2e4,
NvRegRxDropFrame = 0x2e8,
NvRegVlanControl = 0x300,
#define NVREG_VLANCONTROL_ENABLE 0x2000
NvRegMSIXMap0 = 0x3e0,
NvRegMSIXMap1 = 0x3e4,
NvRegMSIXIrqStatus = 0x3f0,
NvRegPowerState2 = 0x600,
#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
#define NVREG_POWERSTATE2_PHY_RESET 0x0004
#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
};
/* Big endian: should work, but is untested */
struct ring_desc {
__le32 buf;
__le32 flaglen;
};
struct ring_desc_ex {
__le32 bufhigh;
__le32 buflow;
__le32 txvlan;
__le32 flaglen;
};
union ring_type {
struct ring_desc *orig;
struct ring_desc_ex *ex;
};
#define FLAG_MASK_V1 0xffff0000
#define FLAG_MASK_V2 0xffffc000
#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
#define NV_TX_LASTPACKET (1<<16)
#define NV_TX_RETRYERROR (1<<19)
#define NV_TX_RETRYCOUNT_MASK (0xF<<20)
#define NV_TX_FORCED_INTERRUPT (1<<24)
#define NV_TX_DEFERRED (1<<26)
#define NV_TX_CARRIERLOST (1<<27)
#define NV_TX_LATECOLLISION (1<<28)
#define NV_TX_UNDERFLOW (1<<29)
#define NV_TX_ERROR (1<<30)
#define NV_TX_VALID (1<<31)
#define NV_TX2_LASTPACKET (1<<29)
#define NV_TX2_RETRYERROR (1<<18)
#define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
#define NV_TX2_FORCED_INTERRUPT (1<<30)
#define NV_TX2_DEFERRED (1<<25)
#define NV_TX2_CARRIERLOST (1<<26)
#define NV_TX2_LATECOLLISION (1<<27)
#define NV_TX2_UNDERFLOW (1<<28)
/* error and valid are the same for both */
#define NV_TX2_ERROR (1<<30)
#define NV_TX2_VALID (1<<31)
#define NV_TX2_TSO (1<<28)
#define NV_TX2_TSO_SHIFT 14
#define NV_TX2_TSO_MAX_SHIFT 14
#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
#define NV_TX2_CHECKSUM_L3 (1<<27)
#define NV_TX2_CHECKSUM_L4 (1<<26)
#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
#define NV_RX_SUBTRACT1 (1<<18)
#define NV_RX_ERROR1 (1<<23)
#define NV_RX_ERROR2 (1<<24)
#define NV_RX_ERROR3 (1<<25)
#define NV_RX_ERROR4 (1<<26)
#define NV_RX_CRCERR (1<<27)
#define NV_RX_OVERFLOW (1<<28)
#define NV_RX_FRAMINGERR (1<<29)
#define NV_RX_ERROR (1<<30)
#define NV_RX_AVAIL (1<<31)
#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
#define NV_RX2_CHECKSUM_IP (0x10000000)
#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
#define NV_RX2_ERROR2 (1<<19)
#define NV_RX2_ERROR3 (1<<20)
#define NV_RX2_ERROR4 (1<<21)
#define NV_RX2_CRCERR (1<<22)
#define NV_RX2_OVERFLOW (1<<23)
#define NV_RX2_FRAMINGERR (1<<24)
/* error and avail are the same for both */
#define NV_RX2_ERROR (1<<30)
#define NV_RX2_AVAIL (1<<31)
#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
/* Miscellaneous hardware related defines: */
#define NV_PCI_REGSZ_VER1 0x270
#define NV_PCI_REGSZ_VER2 0x2d4
#define NV_PCI_REGSZ_VER3 0x604
#define NV_PCI_REGSZ_MAX 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
#define NV_TXSTOP_DELAY1 10
#define NV_TXSTOP_DELAY1MAX 500000
#define NV_TXSTOP_DELAY2 100
#define NV_RXSTOP_DELAY1 10
#define NV_RXSTOP_DELAY1MAX 500000
#define NV_RXSTOP_DELAY2 100
#define NV_SETUP5_DELAY 5
#define NV_SETUP5_DELAYMAX 50000
#define NV_POWERUP_DELAY 5
#define NV_POWERUP_DELAYMAX 5000
#define NV_MIIBUSY_DELAY 50
#define NV_MIIPHY_DELAY 10
#define NV_MIIPHY_DELAYMAX 10000
#define NV_MAC_RESET_DELAY 64
#define NV_WAKEUPPATTERNS 5
#define NV_WAKEUPMASKENTRIES 4
/* General driver defaults */
#define NV_WATCHDOG_TIMEO (5*HZ)
#define RX_RING_DEFAULT 512
#define TX_RING_DEFAULT 256
#define RX_RING_MIN 128
#define TX_RING_MIN 64
#define RING_MAX_DESC_VER_1 1024
#define RING_MAX_DESC_VER_2_3 16384
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
/* even more slack. */
#define NV_RX_ALLOC_PAD (64)
/* maximum mtu size */
#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
#define STATS_INTERVAL (10*HZ)
/*
* desc_ver values:
* The nic supports three different descriptor types:
* - DESC_VER_1: Original
* - DESC_VER_2: support for jumbo frames.
* - DESC_VER_3: 64-bit format.
*/
#define DESC_VER_1 1
#define DESC_VER_2 2
#define DESC_VER_3 3
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
#define PHY_OUI_VITESSE 0x01c1
#define PHY_OUI_REALTEK 0x0732
#define PHY_OUI_REALTEK2 0x0020
#define PHYID1_OUI_MASK 0x03ff
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
#define PHYID2_OUI_SHFT 10
#define PHYID2_MODEL_MASK 0x03f0
#define PHY_MODEL_REALTEK_8211 0x0110
#define PHY_REV_MASK 0x0001
#define PHY_REV_REALTEK_8211B 0x0000
#define PHY_REV_REALTEK_8211C 0x0001
#define PHY_MODEL_REALTEK_8201 0x0200
#define PHY_MODEL_MARVELL_E3016 0x0220
#define PHY_MARVELL_E3016_INITMASK 0x0300
#define PHY_CICADA_INIT1 0x0f000
#define PHY_CICADA_INIT2 0x0e00
#define PHY_CICADA_INIT3 0x01000
#define PHY_CICADA_INIT4 0x0200
#define PHY_CICADA_INIT5 0x0004
#define PHY_CICADA_INIT6 0x02000
#define PHY_VITESSE_INIT_REG1 0x1f
#define PHY_VITESSE_INIT_REG2 0x10
#define PHY_VITESSE_INIT_REG3 0x11
#define PHY_VITESSE_INIT_REG4 0x12
#define PHY_VITESSE_INIT_MSK1 0xc
#define PHY_VITESSE_INIT_MSK2 0x0180
#define PHY_VITESSE_INIT1 0x52b5
#define PHY_VITESSE_INIT2 0xaf8a
#define PHY_VITESSE_INIT3 0x8
#define PHY_VITESSE_INIT4 0x8f8a
#define PHY_VITESSE_INIT5 0xaf86
#define PHY_VITESSE_INIT6 0x8f86
#define PHY_VITESSE_INIT7 0xaf82
#define PHY_VITESSE_INIT8 0x0100
#define PHY_VITESSE_INIT9 0x8f82
#define PHY_VITESSE_INIT10 0x0
#define PHY_REALTEK_INIT_REG1 0x1f
#define PHY_REALTEK_INIT_REG2 0x19
#define PHY_REALTEK_INIT_REG3 0x13
#define PHY_REALTEK_INIT_REG4 0x14
#define PHY_REALTEK_INIT_REG5 0x18
#define PHY_REALTEK_INIT_REG6 0x11
#define PHY_REALTEK_INIT_REG7 0x01
#define PHY_REALTEK_INIT1 0x0000
#define PHY_REALTEK_INIT2 0x8e00
#define PHY_REALTEK_INIT3 0x0001
#define PHY_REALTEK_INIT4 0xad17
#define PHY_REALTEK_INIT5 0xfb54
#define PHY_REALTEK_INIT6 0xf5c7
#define PHY_REALTEK_INIT7 0x1000
#define PHY_REALTEK_INIT8 0x0003
#define PHY_REALTEK_INIT9 0x0008
#define PHY_REALTEK_INIT10 0x0005
#define PHY_REALTEK_INIT11 0x0200
#define PHY_REALTEK_INIT_MSK1 0x0003
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
#define PHY_ERROR 0x2
#define PHY_100 0x1
#define PHY_1000 0x2
#define PHY_HALF 0x100
#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
#define NV_PAUSEFRAME_RX_ENABLE 0x0004
#define NV_PAUSEFRAME_TX_ENABLE 0x0008
#define NV_PAUSEFRAME_RX_REQ 0x0010
#define NV_PAUSEFRAME_TX_REQ 0x0020
#define NV_PAUSEFRAME_AUTONEG 0x0040
/* MSI/MSI-X defines */
#define NV_MSI_X_MAX_VECTORS 8
#define NV_MSI_X_VECTORS_MASK 0x000f
#define NV_MSI_CAPABLE 0x0010
#define NV_MSI_X_CAPABLE 0x0020
#define NV_MSI_ENABLED 0x0040
#define NV_MSI_X_ENABLED 0x0080
#define NV_MSI_X_VECTOR_ALL 0x0
#define NV_MSI_X_VECTOR_RX 0x0
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
#define NV_MSI_PRIV_OFFSET 0x68
#define NV_MSI_PRIV_VALUE 0xffffffff
#define NV_RESTART_TX 0x1
#define NV_RESTART_RX 0x2
#define NV_TX_LIMIT_COUNT 16
#define NV_DYNAMIC_THRESHOLD 4
#define NV_DYNAMIC_MAX_QUIET_COUNT 2048
/* statistics */
struct nv_ethtool_str {
char name[ETH_GSTRING_LEN];
};
static const struct nv_ethtool_str nv_estats_str[] = {
{ "tx_bytes" }, /* includes Ethernet FCS CRC */
{ "tx_zero_rexmt" },
{ "tx_one_rexmt" },
{ "tx_many_rexmt" },
{ "tx_late_collision" },
{ "tx_fifo_errors" },
{ "tx_carrier_errors" },
{ "tx_excess_deferral" },
{ "tx_retry_error" },
{ "rx_frame_error" },
{ "rx_extra_byte" },
{ "rx_late_collision" },
{ "rx_runt" },
{ "rx_frame_too_long" },
{ "rx_over_errors" },
{ "rx_crc_errors" },
{ "rx_frame_align_error" },
{ "rx_length_error" },
{ "rx_unicast" },
{ "rx_multicast" },
{ "rx_broadcast" },
{ "rx_packets" },
{ "rx_errors_total" },
{ "tx_errors_total" },
/* version 2 stats */
{ "tx_deferral" },
{ "tx_packets" },
{ "rx_bytes" }, /* includes Ethernet FCS CRC */
{ "tx_pause" },
{ "rx_pause" },
{ "rx_drop_frame" },
/* version 3 stats */
{ "tx_unicast" },
{ "tx_multicast" },
{ "tx_broadcast" }
};
struct nv_ethtool_stats {
u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
u64 tx_zero_rexmt;
u64 tx_one_rexmt;
u64 tx_many_rexmt;
u64 tx_late_collision;
u64 tx_fifo_errors;
u64 tx_carrier_errors;
u64 tx_excess_deferral;
u64 tx_retry_error;
u64 rx_frame_error;
u64 rx_extra_byte;
u64 rx_late_collision;
u64 rx_runt;
u64 rx_frame_too_long;
u64 rx_over_errors;
u64 rx_crc_errors;
u64 rx_frame_align_error;
u64 rx_length_error;
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
u64 rx_packets; /* should be ifconfig->rx_packets */
u64 rx_errors_total;
u64 tx_errors_total;
/* version 2 stats */
u64 tx_deferral;
u64 tx_packets; /* should be ifconfig->tx_packets */
u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
u64 tx_pause;
u64 rx_pause;
u64 rx_drop_frame;
/* version 3 stats */
u64 tx_unicast;
u64 tx_multicast;
u64 tx_broadcast;
};
#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
/* diagnostics */
#define NV_TEST_COUNT_BASE 3
#define NV_TEST_COUNT_EXTENDED 4
static const struct nv_ethtool_str nv_etests_str[] = {
{ "link (online/offline)" },
{ "register (offline) " },
{ "interrupt (offline) " },
{ "loopback (offline) " }
};
struct register_test {
__u32 reg;
__u32 mask;
};
static const struct register_test nv_registers_test[] = {
{ NvRegUnknownSetupReg6, 0x01 },
{ NvRegMisc1, 0x03c },
{ NvRegOffloadConfig, 0x03ff },
{ NvRegMulticastAddrA, 0xffffffff },
{ NvRegTxWatermark, 0x0ff },
{ NvRegWakeUpFlags, 0x07777 },
{ 0, 0 }
};
struct nv_skb_map {
struct sk_buff *skb;
dma_addr_t dma;
unsigned int dma_len:31;
unsigned int dma_single:1;
struct ring_desc_ex *first_tx_desc;
struct nv_skb_map *next_tx_ctx;
};
struct nv_txrx_stats {
u64 stat_rx_packets;
u64 stat_rx_bytes; /* not always available in HW */
u64 stat_rx_missed_errors;
u64 stat_rx_dropped;
u64 stat_tx_packets; /* not always available in HW */
u64 stat_tx_bytes;
u64 stat_tx_dropped;
};
#define nv_txrx_stats_inc(member) \
__this_cpu_inc(np->txrx_stats->member)
#define nv_txrx_stats_add(member, count) \
__this_cpu_add(np->txrx_stats->member, (count))
/*
* SMP locking:
* All hardware access under netdev_priv(dev)->lock, except the performance
* critical parts:
* - rx is (pseudo-) lockless: it relies on the single-threading provided
* by the arch code for interrupts.
* - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs netdev_priv(dev)->lock :-(
* - set_multicast_list: preparation lockless, relies on netif_tx_lock.
*
* Hardware stats updates are protected by hwstats_lock:
* - updated by nv_do_stats_poll (timer). This is meant to avoid
* integer wraparound in the NIC stats registers, at low frequency
* (0.1 Hz)
* - updated by nv_get_ethtool_stats + nv_get_stats64
*
* Software stats are accessed only through 64b synchronization points
* and are not subject to other synchronization techniques (single
* update thread on the TX or RX paths).
*/
/* in dev: base, irq */
struct fe_priv {
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
/* hardware stats are updated in syscall and timer */
spinlock_t hwstats_lock;
struct nv_ethtool_stats estats;
int in_shutdown;
u32 linkspeed;
int duplex;
int autoneg;
int fixed_mode;
int phyaddr;
int wolenabled;
unsigned int phy_oui;
unsigned int phy_model;
unsigned int phy_rev;
u16 gigabit;
int intr_test;
int recover_error;
int quiet_count;
/* General data: RO fields */
dma_addr_t ring_addr;
struct pci_dev *pci_dev;
u32 orig_mac[2];
u32 events;
u32 irqmask;
u32 desc_ver;
u32 txrxctl_bits;
u32 vlanctl_bits;
u32 driver_data;
u32 device_id;
u32 register_size;
u32 mac_in_use;
int mgmt_version;
int mgmt_sema;
void __iomem *base;
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
union ring_type get_rx, put_rx, last_rx;
struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
struct nv_skb_map *last_rx_ctx;
struct nv_skb_map *rx_skb;
union ring_type rx_ring;
unsigned int rx_buf_sz;
unsigned int pkt_limit;
struct timer_list oom_kick;
struct timer_list nic_poll;
struct timer_list stats_poll;
u32 nic_poll_irq;
int rx_ring_size;
/* RX software stats */
struct u64_stats_sync swstats_rx_syncp;
struct nv_txrx_stats __percpu *txrx_stats;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
int need_linktimer;
unsigned long link_timeout;
/*
* tx specific fields.
*/
union ring_type get_tx, put_tx, last_tx;
struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
struct nv_skb_map *last_tx_ctx;
struct nv_skb_map *tx_skb;
union ring_type tx_ring;
u32 tx_flags;
int tx_ring_size;
int tx_limit;
u32 tx_pkts_in_progress;
struct nv_skb_map *tx_change_owner;
struct nv_skb_map *tx_end_flip;
int tx_stop;
/* TX software stats */
struct u64_stats_sync swstats_tx_syncp;
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
/* flow control */
u32 pause_flags;
/* power saved state */
u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
/* for different msi-x irq type */
char name_rx[IFNAMSIZ + 3]; /* -rx */
char name_tx[IFNAMSIZ + 3]; /* -tx */
char name_other[IFNAMSIZ + 6]; /* -other */
};
/*
* Maximum number of loops until we assume that a bit in the irq mask
* is stuck. Overridable with module param.
*/
static int max_interrupt_work = 4;
/*
* Optimization can be either throuput mode or cpu mode
*
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
enum {
NV_OPTIMIZATION_MODE_THROUGHPUT,
NV_OPTIMIZATION_MODE_CPU,
NV_OPTIMIZATION_MODE_DYNAMIC
};
static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
/*
* Poll interval for timer irq
*
* This interval determines how frequent an interrupt is generated.
* The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
* Min = 0, and Max = 65535
*/
static int poll_interval = -1;
/*
* MSI interrupts
*/
enum {
NV_MSI_INT_DISABLED,
NV_MSI_INT_ENABLED
};
static int msi = NV_MSI_INT_ENABLED;
/*
* MSIX interrupts
*/
enum {
NV_MSIX_INT_DISABLED,
NV_MSIX_INT_ENABLED
};
static int msix = NV_MSIX_INT_ENABLED;
/*
* DMA 64bit
*/
enum {
NV_DMA_64BIT_DISABLED,
NV_DMA_64BIT_ENABLED
};
static int dma_64bit = NV_DMA_64BIT_ENABLED;
/*
* Debug output control for tx_timeout
*/
static bool debug_tx_timeout = false;
/*
* Crossover Detection
* Realtek 8201 phy + some OEM boards do not work properly.
*/
enum {
NV_CROSSOVER_DETECTION_DISABLED,
NV_CROSSOVER_DETECTION_ENABLED
};
static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
/*
* Power down phy when interface is down (persists through reboot;
* older Linux and other OSes may not power it up again)
*/
static int phy_power_down;
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
return netdev_priv(dev);
}
static inline u8 __iomem *get_hwbase(struct net_device *dev)
{
return ((struct fe_priv *)netdev_priv(dev))->base;
}
static inline void pci_push(u8 __iomem *base)
{
/* force out pending posted writes */
readl(base);
}
static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
{
return le32_to_cpu(prd->flaglen)
& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
}
static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
{
return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
}
static bool nv_optimized(struct fe_priv *np)
{
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
return false;
return true;
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
int delay, int delaymax)
{
u8 __iomem *base = get_hwbase(dev);
pci_push(base);
do {
udelay(delay);
delaymax -= delay;
if (delaymax < 0)
return 1;
} while ((readl(base + offset) & mask) != target);
return 0;
}
#define NV_SETUP_RX_RING 0x01
#define NV_SETUP_TX_RING 0x02
static inline u32 dma_low(dma_addr_t addr)
{
return addr;
}
static inline u32 dma_high(dma_addr_t addr)
{
return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
}
static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
if (!nv_optimized(np)) {
if (rxtx_flags & NV_SETUP_RX_RING)
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
if (rxtx_flags & NV_SETUP_TX_RING)
writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
} else {
if (rxtx_flags & NV_SETUP_RX_RING) {
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
}
if (rxtx_flags & NV_SETUP_TX_RING) {
writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
}
}
}
static void free_rings(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!nv_optimized(np)) {
if (np->rx_ring.orig)
dma_free_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc) *
(np->rx_ring_size +
np->tx_ring_size),
np->rx_ring.orig, np->ring_addr);
} else {
if (np->rx_ring.ex)
dma_free_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
np->rx_ring.ex, np->ring_addr);
}
kfree(np->rx_skb);
kfree(np->tx_skb);
}
static int using_multi_irqs(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
return 0;
else
return 1;
}
static void nv_txrx_gate(struct net_device *dev, bool gate)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 powerstate;
if (!np->mac_in_use &&
(np->driver_data & DEV_HAS_POWER_CNTRL)) {
powerstate = readl(base + NvRegPowerState2);
if (gate)
powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
else
powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
writel(powerstate, base + NvRegPowerState2);
}
}
static void nv_enable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(np->pci_dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
static void nv_disable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
disable_irq(np->pci_dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
/* In MSIX mode, a write to irqmask behaves as XOR */
static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
{
u8 __iomem *base = get_hwbase(dev);
writel(mask, base + NvRegIrqMask);
}
static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
if (np->msi_flags & NV_MSI_X_ENABLED) {
writel(mask, base + NvRegIrqMask);
} else {
if (np->msi_flags & NV_MSI_ENABLED)
writel(0, base + NvRegMSIIrqMask);
writel(0, base + NvRegIrqMask);
}
}
static void nv_napi_enable(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
napi_enable(&np->napi);
}
static void nv_napi_disable(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
napi_disable(&np->napi);
}
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
* Caller must guarantee serialization
*/
static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
{
u8 __iomem *base = get_hwbase(dev);
u32 reg;
int retval;
writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
reg = readl(base + NvRegMIIControl);
if (reg & NVREG_MIICTL_INUSE) {
writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
udelay(NV_MIIBUSY_DELAY);
}
reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
if (value != MII_READ) {
writel(value, base + NvRegMIIData);
reg |= NVREG_MIICTL_WRITE;
}
writel(reg, base + NvRegMIIControl);
if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
retval = -1;
} else if (value != MII_READ) {
/* it was a write operation - fewer failures are detectable */
retval = 0;
} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
retval = -1;
} else {
retval = readl(base + NvRegMIIData);
}
return retval;
}
static int phy_reset(struct net_device *dev, u32 bmcr_setup)
{
struct fe_priv *np = netdev_priv(dev);
u32 miicontrol;
unsigned int tries = 0;
miicontrol = BMCR_RESET | bmcr_setup;
if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
return -1;
/* wait for 500ms */
msleep(500);
/* must wait till reset is deasserted */
while (miicontrol & BMCR_RESET) {
usleep_range(10000, 20000);
miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
/* FIXME: 100 tries seem excessive */
if (tries++ > 100)
return -1;
}
return 0;
}
static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
{
static const struct {
int reg;
int init;
} ri[] = {
{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
};
int i;
for (i = 0; i < ARRAY_SIZE(ri); i++) {
if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
return PHY_ERROR;
}
return 0;
}
static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
{
u32 reg;
u8 __iomem *base = get_hwbase(dev);
u32 powerstate = readl(base + NvRegPowerState2);
/* need to perform hw phy reset */
powerstate |= NVREG_POWERSTATE2_PHY_RESET;
writel(powerstate, base + NvRegPowerState2);
msleep(25);
powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
writel(powerstate, base + NvRegPowerState2);
msleep(25);
reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
reg |= PHY_REALTEK_INIT9;
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
return PHY_ERROR;
reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
if (!(reg & PHY_REALTEK_INIT11)) {
reg |= PHY_REALTEK_INIT11;
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
return PHY_ERROR;
return 0;
}
static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
{
u32 phy_reserved;
if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG6, MII_READ);
phy_reserved |= PHY_REALTEK_INIT7;
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG6, phy_reserved))
return PHY_ERROR;
}
return 0;
}
static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
{
u32 phy_reserved;
if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG2, MII_READ);
phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
phy_reserved |= PHY_REALTEK_INIT3;
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG2, phy_reserved))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
return PHY_ERROR;
}
return 0;
}
static int init_cicada(struct net_device *dev, struct fe_priv *np,
u32 phyinterface)
{
u32 phy_reserved;
if (phyinterface & PHY_RGMII) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
phy_reserved |= PHY_CICADA_INIT5;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
phy_reserved |= PHY_CICADA_INIT6;
if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
return PHY_ERROR;
return 0;
}
static int init_vitesse(struct net_device *dev, struct fe_priv *np)
{
u32 phy_reserved;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG4, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG3, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
phy_reserved |= PHY_VITESSE_INIT3;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG4, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
phy_reserved |= PHY_VITESSE_INIT3;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG3, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG4, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
return PHY_ERROR;
phy_reserved = mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG3, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
phy_reserved |= PHY_VITESSE_INIT8;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
return PHY_ERROR;
if (mii_rw(dev, np->phyaddr,
PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
return PHY_ERROR;
return 0;
}
static int phy_init(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 phyinterface;
u32 mii_status, mii_control, mii_control_1000, reg;
/* phy errata for E3016 phy */
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
reg &= ~PHY_MARVELL_E3016_INITMASK;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
netdev_info(dev, "%s: phy write to errata reg failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
if (init_realtek_8211b(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211C) {
if (init_realtek_8211c(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
if (init_realtek_8201(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
}
}
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL |
ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
netdev_info(dev, "%s: phy write to advertise failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
/* get phy interface type */
phyinterface = readl(base + NvRegPhyInterface);
/* see if gigabit phy */
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
mii_control_1000 = mii_rw(dev, np->phyaddr,
MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
else
mii_control_1000 &= ~ADVERTISE_1000FULL;
if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else
np->gigabit = 0;
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= BMCR_ANENABLE;
if (np->phy_oui == PHY_OUI_REALTEK &&
np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211C) {
/* start autoneg since we already performed hw reset above */
mii_control |= BMCR_ANRESTART;
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else {
/* reset the phy
* (certain phys need bmcr to be setup with reset)
*/
if (phy_reset(dev, mii_control)) {
netdev_info(dev, "%s: phy reset failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
}
/* phy vendor specific configuration */
if (np->phy_oui == PHY_OUI_CICADA) {
if (init_cicada(dev, np, phyinterface)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else if (np->phy_oui == PHY_OUI_VITESSE) {
if (init_vitesse(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
/* reset could have cleared these out, set them back */
if (init_realtek_8211b(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
if (init_realtek_8201(dev, np) ||
init_realtek_8201_cross(dev, np)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
return PHY_ERROR;
}
}
}
/* some phys clear out pause advertisement on reset, set it back */
mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation, power down phy */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
if (phy_power_down)
mii_control |= BMCR_PDOWN;
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
return PHY_ERROR;
return 0;
}
static void nv_start_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
/* Already running? Stop it. */
if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
rx_ctrl &= ~NVREG_RCVCTL_START;
writel(rx_ctrl, base + NvRegReceiverControl);
pci_push(base);
}
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
rx_ctrl |= NVREG_RCVCTL_START;
if (np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
pci_push(base);
}
static void nv_stop_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
if (!np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_START;
else
rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
netdev_info(dev, "%s: ReceiverStatus remained busy\n",
__func__);
udelay(NV_RXSTOP_DELAY2);
if (!np->mac_in_use)
writel(0, base + NvRegLinkSpeed);
}
static void nv_start_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
tx_ctrl |= NVREG_XMITCTL_START;
if (np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
writel(tx_ctrl, base + NvRegTransmitterControl);
pci_push(base);
}
static void nv_stop_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
if (!np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_START;
else
tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
writel(tx_ctrl, base + NvRegTransmitterControl);
if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
netdev_info(dev, "%s: TransmitterStatus remained busy\n",
__func__);
udelay(NV_TXSTOP_DELAY2);
if (!np->mac_in_use)
writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
base + NvRegTransmitPoll);
}
static void nv_start_rxtx(struct net_device *dev)
{
nv_start_rx(dev);
nv_start_tx(dev);
}
static void nv_stop_rxtx(struct net_device *dev)
{
nv_stop_rx(dev);
nv_stop_tx(dev);
}
static void nv_txrx_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
udelay(NV_TXRX_RESET_DELAY);
writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
static void nv_mac_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 temp1, temp2, temp3;
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
/* save registers since they will be cleared on reset */
temp1 = readl(base + NvRegMacAddrA);
temp2 = readl(base + NvRegMacAddrB);
temp3 = readl(base + NvRegTransmitPoll);
writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
writel(0, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
/* restore saved registers */
writel(temp1, base + NvRegMacAddrA);
writel(temp2, base + NvRegMacAddrB);
writel(temp3, base + NvRegTransmitPoll);
writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
static void nv_update_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
lockdep_assert_held(&np->hwstats_lock);
/* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
np->estats.rx_runt += readl(base + NvRegRxRunt);
np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
np->estats.rx_length_error += readl(base + NvRegRxLenErr);
np->estats.rx_unicast += readl(base + NvRegRxUnicast);
np->estats.rx_multicast += readl(base + NvRegRxMulticast);
np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
np->estats.rx_packets =
np->estats.rx_unicast +
np->estats.rx_multicast +
np->estats.rx_broadcast;
np->estats.rx_errors_total =
np->estats.rx_crc_errors +
np->estats.rx_over_errors +
np->estats.rx_frame_error +
(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
np->estats.rx_late_collision +
np->estats.rx_runt +
np->estats.rx_frame_too_long;
np->estats.tx_errors_total =
np->estats.tx_late_collision +
np->estats.tx_fifo_errors +
np->estats.tx_carrier_errors +
np->estats.tx_excess_deferral +
np->estats.tx_retry_error;
if (np->driver_data & DEV_HAS_STATISTICS_V2) {
np->estats.tx_deferral += readl(base + NvRegTxDef);
np->estats.tx_packets += readl(base + NvRegTxFrame);
np->estats.rx_bytes += readl(base + NvRegRxCnt);
np->estats.tx_pause += readl(base + NvRegTxPause);
np->estats.rx_pause += readl(base + NvRegRxPause);
np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
np->estats.rx_errors_total += np->estats.rx_drop_frame;
}
if (np->driver_data & DEV_HAS_STATISTICS_V3) {
np->estats.tx_unicast += readl(base + NvRegTxUnicast);
np->estats.tx_multicast += readl(base + NvRegTxMulticast);
np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
}
}
static void nv_get_stats(int cpu, struct fe_priv *np,
struct rtnl_link_stats64 *storage)
{
struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
unsigned int syncp_start;
u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
u64 tx_packets, tx_bytes, tx_dropped;
do {
syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp);
rx_packets = src->stat_rx_packets;
rx_bytes = src->stat_rx_bytes;
rx_dropped = src->stat_rx_dropped;
rx_missed_errors = src->stat_rx_missed_errors;
} while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start));
storage->rx_packets += rx_packets;
storage->rx_bytes += rx_bytes;
storage->rx_dropped += rx_dropped;
storage->rx_missed_errors += rx_missed_errors;
do {
syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp);
tx_packets = src->stat_tx_packets;
tx_bytes = src->stat_tx_bytes;
tx_dropped = src->stat_tx_dropped;
} while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start));
storage->tx_packets += tx_packets;
storage->tx_bytes += tx_bytes;
storage->tx_dropped += tx_dropped;
}
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
* Called with rcu_read_lock() held -
* only synchronized against unregister_netdevice.
*/
static void
nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
int cpu;
/*
* Note: because HW stats are not always available and for
* consistency reasons, the following ifconfig stats are
* managed by software: rx_bytes, tx_bytes, rx_packets and
* tx_packets. The related hardware stats reported by ethtool
* should be equivalent to these ifconfig stats, with 4
* additional bytes per packet (Ethernet FCS CRC), except for
* tx_packets when TSO kicks in.
*/
/* software stats */
for_each_online_cpu(cpu)
nv_get_stats(cpu, np, storage);
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
spin_lock_bh(&np->hwstats_lock);
nv_update_stats(dev);
/* generic stats */
storage->rx_errors = np->estats.rx_errors_total;
storage->tx_errors = np->estats.tx_errors_total;
/* meaningful only when NIC supports stats v3 */
storage->multicast = np->estats.rx_multicast;
/* detailed rx_errors */
storage->rx_length_errors = np->estats.rx_length_error;
storage->rx_over_errors = np->estats.rx_over_errors;
storage->rx_crc_errors = np->estats.rx_crc_errors;
storage->rx_frame_errors = np->estats.rx_frame_align_error;
storage->rx_fifo_errors = np->estats.rx_drop_frame;
/* detailed tx_errors */
storage->tx_carrier_errors = np->estats.tx_carrier_errors;
storage->tx_fifo_errors = np->estats.tx_fifo_errors;
spin_unlock_bh(&np->hwstats_lock);
}
}
/*
* nv_alloc_rx: fill rx ring entries.
* Return 1 if the allocations for the skbs failed and the
* rx engine is without Available descriptors
*/
static int nv_alloc_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
if (less_rx-- == np->rx_ring.orig)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
np->put_rx.orig = np->rx_ring.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
}
return 0;
}
static int nv_alloc_rx_optimized(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
wmb();
np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
}
return 0;
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{
struct fe_priv *np = from_timer(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
}
static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
np->get_rx = np->rx_ring;
np->put_rx = np->rx_ring;
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
else
np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
np->get_rx_ctx = np->rx_skb;
np->put_rx_ctx = np->rx_skb;
np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
for (i = 0; i < np->rx_ring_size; i++) {
if (!nv_optimized(np)) {
np->rx_ring.orig[i].flaglen = 0;
np->rx_ring.orig[i].buf = 0;
} else {
np->rx_ring.ex[i].flaglen = 0;
np->rx_ring.ex[i].txvlan = 0;
np->rx_ring.ex[i].bufhigh = 0;
np->rx_ring.ex[i].buflow = 0;
}
np->rx_skb[i].skb = NULL;
np->rx_skb[i].dma = 0;
}
}
static void nv_init_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
np->get_tx = np->tx_ring;
np->put_tx = np->tx_ring;
if (!nv_optimized(np))
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->tx_skb;
np->put_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
np->tx_stop = 0;
for (i = 0; i < np->tx_ring_size; i++) {
if (!nv_optimized(np)) {
np->tx_ring.orig[i].flaglen = 0;
np->tx_ring.orig[i].buf = 0;
} else {
np->tx_ring.ex[i].flaglen = 0;
np->tx_ring.ex[i].txvlan = 0;
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
np->tx_skb[i].skb = NULL;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
}
static int nv_init_ring(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
nv_init_tx(dev);
nv_init_rx(dev);
if (!nv_optimized(np))
return nv_alloc_rx(dev);
else
return nv_alloc_rx_optimized(dev);
}
static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
if (tx_skb->dma) {
if (tx_skb->dma_single)
dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
DMA_TO_DEVICE);
else
dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
DMA_TO_DEVICE);
tx_skb->dma = 0;
}
}
static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
nv_unmap_txskb(np, tx_skb);
if (tx_skb->skb) {
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
return 1;
}
return 0;
}
static void nv_drain_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
unsigned int i;
for (i = 0; i < np->tx_ring_size; i++) {
if (!nv_optimized(np)) {
np->tx_ring.orig[i].flaglen = 0;
np->tx_ring.orig[i].buf = 0;
} else {
np->tx_ring.ex[i].flaglen = 0;
np->tx_ring.ex[i].txvlan = 0;
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
if (nv_release_txskb(np, &np->tx_skb[i])) {
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
}
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
}
static void nv_drain_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
for (i = 0; i < np->rx_ring_size; i++) {
if (!nv_optimized(np)) {
np->rx_ring.orig[i].flaglen = 0;
np->rx_ring.orig[i].buf = 0;
} else {
np->rx_ring.ex[i].flaglen = 0;
np->rx_ring.ex[i].txvlan = 0;
np->rx_ring.ex[i].bufhigh = 0;
np->rx_ring.ex[i].buflow = 0;
}
wmb();
if (np->rx_skb[i].skb) {
dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
(skb_end_pointer(np->rx_skb[i].skb) -
np->rx_skb[i].skb->data),
DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skb[i].skb);
np->rx_skb[i].skb = NULL;
}
}
}
static void nv_drain_rxtx(struct net_device *dev)
{
nv_drain_tx(dev);
nv_drain_rx(dev);
}
static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
{
return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
}
static void nv_legacybackoff_reseed(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
u32 reg;
u32 low;
int tx_status = 0;
reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
get_random_bytes(&low, sizeof(low));
reg |= low & NVREG_SLOTTIME_MASK;
/* Need to stop tx before change takes effect.
* Caller has already gained np->lock.
*/
tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
if (tx_status)
nv_stop_tx(dev);
nv_stop_rx(dev);
writel(reg, base + NvRegSlotTime);
if (tx_status)
nv_start_tx(dev);
nv_start_rx(dev);
}
/* Gear Backoff Seeds */
#define BACKOFF_SEEDSET_ROWS 8
#define BACKOFF_SEEDSET_LFSRS 15
/* Known Good seed sets */
static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
static void nv_gear_backoff_reseed(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
u32 temp, seedset, combinedSeed;
int i;
/* Setup seed for free running LFSR */
/* We are going to read the time stamp counter 3 times
and swizzle bits around to increase randomness */
get_random_bytes(&miniseed1, sizeof(miniseed1));
miniseed1 &= 0x0fff;
if (miniseed1 == 0)
miniseed1 = 0xabc;
get_random_bytes(&miniseed2, sizeof(miniseed2));
miniseed2 &= 0x0fff;
if (miniseed2 == 0)
miniseed2 = 0xabc;
miniseed2_reversed =
((miniseed2 & 0xF00) >> 8) |
(miniseed2 & 0x0F0) |
((miniseed2 & 0x00F) << 8);
get_random_bytes(&miniseed3, sizeof(miniseed3));
miniseed3 &= 0x0fff;
if (miniseed3 == 0)
miniseed3 = 0xabc;
miniseed3_reversed =
((miniseed3 & 0xF00) >> 8) |
(miniseed3 & 0x0F0) |
((miniseed3 & 0x00F) << 8);
combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
(miniseed2 ^ miniseed3_reversed);
/* Seeds can not be zero */
if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
combinedSeed |= 0x08;
if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
combinedSeed |= 0x8000;
/* No need to disable tx here */
temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
writel(temp, base + NvRegBackOffControl);
/* Setup seeds for all gear LFSRs. */
get_random_bytes(&seedset, sizeof(seedset));
seedset = seedset % BACKOFF_SEEDSET_ROWS;
for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
temp |= main_seedset[seedset][i-1] & 0x3ff;
temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
writel(temp, base + NvRegBackOffControl);
}
}
/*
* nv_start_xmit: dev->hard_start_xmit function
* Called with netif_tx_lock held.
*/
static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
unsigned int i;
u32 offset = 0;
u32 bcnt;
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc *put_tx;
struct ring_desc *start_tx;
struct ring_desc *prev_tx;
struct nv_skb_map *prev_tx_ctx;
struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
unsigned long flags;
netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) {
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
/* When normal packets and/or xmit_more packets fill up
* tx_desc, it is necessary to trigger NIC tx reg.
*/
ret = NETDEV_TX_BUSY;
goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.orig;
/* setup the header buffer */
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
ret = NETDEV_TX_OK;
goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
tx_flags = np->tx_flags;
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
for (i = 0; i < fragments; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 frag_size = skb_frag_size(frag);
offset = 0;
do {
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
bcnt,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
ret = NETDEV_TX_OK;
goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
if (unlikely(put_tx == np->tx_ring.orig))
prev_tx = np->last_tx.orig;
else
prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
/* save skb in this slot's context area */
prev_tx_ctx->skb = skb;
if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
spin_lock_irqsave(&np->lock, flags);
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
netdev_sent_queue(np->dev, skb->len);
skb_tx_timestamp(skb);
np->put_tx.orig = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
txkick:
if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
u32 txrxctl_kick;
dma_error:
txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
}
return ret;
}
static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u32 tx_flags = 0;
u32 tx_flags_extra;
unsigned int fragments = skb_shinfo(skb)->nr_frags;
unsigned int i;
u32 offset = 0;
u32 bcnt;
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc_ex *put_tx;
struct ring_desc_ex *start_tx;
struct ring_desc_ex *prev_tx;
struct nv_skb_map *prev_tx_ctx;
struct nv_skb_map *start_tx_ctx = NULL;
struct nv_skb_map *tmp_tx_ctx = NULL;
unsigned long flags;
netdev_tx_t ret = NETDEV_TX_OK;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
empty_slots = nv_get_empty_tx_slots(np);
if (unlikely(empty_slots <= entries)) {
netif_stop_queue(dev);
np->tx_stop = 1;
spin_unlock_irqrestore(&np->lock, flags);
/* When normal packets and/or xmit_more packets fill up
* tx_desc, it is necessary to trigger NIC tx reg.
*/
ret = NETDEV_TX_BUSY;
goto txkick;
}
spin_unlock_irqrestore(&np->lock, flags);
start_tx = put_tx = np->put_tx.ex;
start_tx_ctx = np->put_tx_ctx;
/* setup the header buffer */
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
ret = NETDEV_TX_OK;
goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
tx_flags = NV_TX2_VALID;
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
for (i = 0; i < fragments; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 frag_size = skb_frag_size(frag);
offset = 0;
do {
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
bcnt,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
ret = NETDEV_TX_OK;
goto dma_error;
}
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
if (unlikely(put_tx == np->tx_ring.ex))
prev_tx = np->last_tx.ex;
else
prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
/* save skb in this slot's context area */
prev_tx_ctx->skb = skb;
if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
if (skb_vlan_tag_present(skb))
start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
skb_vlan_tag_get(skb));
else
start_tx->txvlan = 0;
spin_lock_irqsave(&np->lock, flags);
if (np->tx_limit) {
/* Limit the number of outstanding tx. Setup all fragments, but
* do not set the VALID bit on the first descriptor. Save a pointer
* to that descriptor and also for next skb_map element.
*/
if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
if (!np->tx_change_owner)
np->tx_change_owner = start_tx_ctx;
/* remove VALID bit */
tx_flags &= ~NV_TX2_VALID;
start_tx_ctx->first_tx_desc = start_tx;
start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
np->tx_end_flip = np->put_tx_ctx;
} else {
np->tx_pkts_in_progress++;
}
}
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
netdev_sent_queue(np->dev, skb->len);
skb_tx_timestamp(skb);
np->put_tx.ex = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
txkick:
if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
u32 txrxctl_kick;
dma_error:
txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
}
return ret;
}
static inline void nv_tx_flip_ownership(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
np->tx_pkts_in_progress--;
if (np->tx_change_owner) {
np->tx_change_owner->first_tx_desc->flaglen |=
cpu_to_le32(NV_TX2_VALID);
np->tx_pkts_in_progress++;
np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
if (np->tx_change_owner == np->tx_end_flip)
np->tx_change_owner = NULL;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
}
}
/*
* nv_tx_done: check for completed packets, release the skbs.
*
* Caller must own np->lock.
*/
static int nv_tx_done(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
struct ring_desc *orig_get_tx = np->get_tx.orig;
unsigned int bytes_compl = 0;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
(tx_work < limit)) {
nv_unmap_txskb(np, np->get_tx_ctx);
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (unlikely(flags & NV_TX_ERROR)) {
if ((flags & NV_TX_RETRYERROR)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
}
} else {
if (flags & NV_TX2_LASTPACKET) {
if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
}
}
if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
np->get_tx.orig = np->tx_ring.orig;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_compl);
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
}
return tx_work;
}
static int nv_tx_done_optimized(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
unsigned long bytes_cleaned = 0;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
(tx_work < limit)) {
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
else
nv_legacybackoff_reseed(dev);
}
} else {
unsigned int len;
u64_stats_update_begin(&np->swstats_tx_syncp);
nv_txrx_stats_inc(stat_tx_packets);
len = np->get_tx_ctx->skb->len;
nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_cleaned += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
if (np->tx_limit)
nv_tx_flip_ownership(dev);
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->tx_ring.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
}
return tx_work;
}
/*
* nv_tx_timeout: dev->tx_timeout function
* Called with netif_tx_lock held.
*/
static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 status;
union ring_type put_tx;
int saved_tx_limit;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
if (unlikely(debug_tx_timeout)) {
int i;
netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
netdev_info(dev, "Dumping tx registers\n");
for (i = 0; i <= np->register_size; i += 32) {
netdev_info(dev,
"%3x: %08x %08x %08x %08x "
"%08x %08x %08x %08x\n",
i,
readl(base + i + 0), readl(base + i + 4),
readl(base + i + 8), readl(base + i + 12),
readl(base + i + 16), readl(base + i + 20),
readl(base + i + 24), readl(base + i + 28));
}
netdev_info(dev, "Dumping tx ring\n");
for (i = 0; i < np->tx_ring_size; i += 4) {
if (!nv_optimized(np)) {
netdev_info(dev,
"%03x: %08x %08x // %08x %08x "
"// %08x %08x // %08x %08x\n",
i,
le32_to_cpu(np->tx_ring.orig[i].buf),
le32_to_cpu(np->tx_ring.orig[i].flaglen),
le32_to_cpu(np->tx_ring.orig[i+1].buf),
le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
le32_to_cpu(np->tx_ring.orig[i+2].buf),
le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
le32_to_cpu(np->tx_ring.orig[i+3].buf),
le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
} else {
netdev_info(dev,
"%03x: %08x %08x %08x "
"// %08x %08x %08x "
"// %08x %08x %08x "
"// %08x %08x %08x\n",
i,
le32_to_cpu(np->tx_ring.ex[i].bufhigh),
le32_to_cpu(np->tx_ring.ex[i].buflow),
le32_to_cpu(np->tx_ring.ex[i].flaglen),
le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
le32_to_cpu(np->tx_ring.ex[i+1].buflow),
le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
le32_to_cpu(np->tx_ring.ex[i+2].buflow),
le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
le32_to_cpu(np->tx_ring.ex[i+3].buflow),
le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
}
}
}
spin_lock_irq(&np->lock);
/* 1) stop tx engine */
nv_stop_tx(dev);
/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
saved_tx_limit = np->tx_limit;
np->tx_limit = 0; /* prevent giving HW any limited pkts */
np->tx_stop = 0; /* prevent waking tx queue */
if (!nv_optimized(np))
nv_tx_done(dev, np->tx_ring_size);
else
nv_tx_done_optimized(dev, np->tx_ring_size);
/* save current HW position */
if (np->tx_change_owner)
put_tx.ex = np->tx_change_owner->first_tx_desc;
else
put_tx = np->put_tx;
/* 3) clear all tx state */
nv_drain_tx(dev);
nv_init_tx(dev);
/* 4) restore state to current HW position */
np->get_tx = np->put_tx = put_tx;
np->tx_limit = saved_tx_limit;
/* 5) restart tx engine */
nv_start_tx(dev);
netif_wake_queue(dev);
spin_unlock_irq(&np->lock);
}
/*
* Called when the nic notices a mismatch between the actual data len on the
* wire and the len indicated in the 802 header
*/
static int nv_getlen(struct net_device *dev, void *packet, int datalen)
{
int hdrlen; /* length of the 802 header */
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
hdrlen = VLAN_HLEN;
} else {
protolen = ntohs(((struct ethhdr *)packet)->h_proto);
hdrlen = ETH_HLEN;
}
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks possible */
protolen += hdrlen;
/* consistency checks: */
if (datalen > ETH_ZLEN) {
if (datalen >= protolen) {
/* more data on wire than in 802 header, trim of
* additional data.
*/
return protolen;
} else {
/* less data on wire than mentioned in header.
* Discard the packet.
*/
return -1;
}
} else {
/* short packet. Accept only if 802 values are also short */
if (protolen > ETH_ZLEN) {
return -1;
}
return datalen;
}
}
static void rx_missing_handler(u32 flags, struct fe_priv *np)
{
if (flags & NV_RX_MISSEDFRAME) {
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_missed_errors);
u64_stats_update_end(&np->swstats_rx_syncp);
}
}
static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int rx_work = 0;
struct sk_buff *skb;
int len;
while ((np->get_rx.orig != np->put_rx.orig) &&
!((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
(rx_work < limit)) {
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
* the performance.
*/
dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
np->get_rx_ctx->dma_len,
DMA_FROM_DEVICE);
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
/* look at what we actually got: */
if (np->desc_ver == DESC_VER_1) {
if (likely(flags & NV_RX_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V1;
if (unlikely(flags & NV_RX_ERROR)) {
if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev_kfree_skb(skb);
goto next_pkt;
}
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
if (flags & NV_RX_SUBTRACT1)
len--;
}
/* the rest are hard errors */
else {
rx_missing_handler(flags, np);
dev_kfree_skb(skb);
goto next_pkt;
}
}
} else {
dev_kfree_skb(skb);
goto next_pkt;
}
} else {
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
if (unlikely(flags & NV_RX2_ERROR)) {
if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev_kfree_skb(skb);
goto next_pkt;
}
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
else {
dev_kfree_skb(skb);
goto next_pkt;
}
}
if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
dev_kfree_skb(skb);
goto next_pkt;
}
}
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_packets);
nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->rx_ring.orig;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
np->get_rx_ctx = np->rx_skb;
rx_work++;
}
return rx_work;
}
static int nv_rx_process_optimized(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
u32 flags;
u32 vlanflags = 0;
int rx_work = 0;
struct sk_buff *skb;
int len;
while ((np->get_rx.ex != np->put_rx.ex) &&
!((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
(rx_work < limit)) {
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
* the performance.
*/
dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
np->get_rx_ctx->dma_len,
DMA_FROM_DEVICE);
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
/* look at what we actually got: */
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
if (unlikely(flags & NV_RX2_ERROR)) {
if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
dev_kfree_skb(skb);
goto next_pkt;
}
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
else {
dev_kfree_skb(skb);
goto next_pkt;
}
}
if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
prefetch(skb->data);
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
/*
* There's need to check for NETIF_F_HW_VLAN_CTAG_RX
* here. Even if vlan rx accel is disabled,
* NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
*/
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
nv_txrx_stats_inc(stat_rx_packets);
nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
}
next_pkt:
if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
np->get_rx.ex = np->rx_ring.ex;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
np->get_rx_ctx = np->rx_skb;
rx_work++;
}
return rx_work;
}
static void set_bufsize(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
if (dev->mtu <= ETH_DATA_LEN)
np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
else
np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
}
/*
* nv_change_mtu: dev->change_mtu function
* Called with RTNL held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
struct fe_priv *np = netdev_priv(dev);
int old_mtu;
old_mtu = dev->mtu;
WRITE_ONCE(dev->mtu, new_mtu);
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
return 0;
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
u8 __iomem *base = get_hwbase(dev);
/*
* It seems that the nic preloads valid ring entries into an
* internal buffer. The procedure for flushing everything is
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq(dev);
nv_napi_disable(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
nv_napi_enable(dev);
nv_enable_irq(dev);
}
return 0;
}
static void nv_copy_mac_to_hw(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
u32 mac[2];
mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
writel(mac[0], base + NvRegMacAddrA);
writel(mac[1], base + NvRegMacAddrB);
}
/*
* nv_set_mac_address: dev->set_mac_address function
* Called with rtnl_lock() held.
*/
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
struct fe_priv *np = netdev_priv(dev);
struct sockaddr *macaddr = (struct sockaddr *)addr;
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
eth_hw_addr_set(dev, macaddr->sa_data);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
/* stop rx engine */
nv_stop_rx(dev);
/* set mac address */
nv_copy_mac_to_hw(dev);
/* restart rx engine */
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
} else {
nv_copy_mac_to_hw(dev);
}
return 0;
}
/*
* nv_set_multicast: dev->set_multicast function
* Called with netif_tx_lock held.
*/
static void nv_set_multicast(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 addr[2];
u32 mask[2];
u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
memset(addr, 0, sizeof(addr));
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
pff |= NVREG_PFF_PROMISC;
} else {
pff |= NVREG_PFF_MYADDR;
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
u32 alwaysOff[2];
u32 alwaysOn[2];
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
if (dev->flags & IFF_ALLMULTI) {
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
} else {
struct netdev_hw_addr *ha;
netdev_for_each_mc_addr(ha, dev) {
unsigned char *hw_addr = ha->addr;
u32 a, b;
a = le32_to_cpu(*(__le32 *) hw_addr);
b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
alwaysOff[1] &= ~b;
}
}
addr[0] = alwaysOn[0];
addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1];
} else {
mask[0] = NVREG_MCASTMASKA_NONE;
mask[1] = NVREG_MCASTMASKB_NONE;
}
}
addr[0] |= NVREG_MCASTADDRA_FORCE;
pff |= NVREG_PFF_ALWAYS;
spin_lock_irq(&np->lock);
nv_stop_rx(dev);
writel(addr[0], base + NvRegMulticastAddrA);
writel(addr[1], base + NvRegMulticastAddrB);
writel(mask[0], base + NvRegMulticastMaskA);
writel(mask[1], base + NvRegMulticastMaskB);
writel(pff, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
}
static void nv_update_pause(struct net_device *dev, u32 pause_flags)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
} else {
writel(pff, base + NvRegPacketFilterFlags);
}
}
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
/* limit the number of tx pause frames to a default of 8 */
writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
}
writel(pause_enable, base + NvRegTxPauseFrame);
writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
} else {
writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
writel(regmisc, base + NvRegMisc1);
}
}
}
static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 phyreg, txreg;
int mii_status;
np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
np->duplex = duplex;
/* see if gigabit phy */
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
phyreg = readl(base + NvRegSlotTime);
phyreg &= ~(0x3FF00);
if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
phyreg |= NVREG_SLOTTIME_10_100_FULL;
else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
phyreg |= NVREG_SLOTTIME_10_100_FULL;
else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
phyreg |= NVREG_SLOTTIME_1000_FULL;
writel(phyreg, base + NvRegSlotTime);
}
phyreg = readl(base + NvRegPhyInterface);
phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
if (np->duplex == 0)
phyreg |= PHY_HALF;
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
phyreg |= PHY_100;
else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
NVREG_LINKSPEED_1000)
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
if (phyreg & PHY_RGMII) {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
NVREG_LINKSPEED_1000)
txreg = NVREG_TX_DEFERRAL_RGMII_1000;
else
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
} else {
txreg = NVREG_TX_DEFERRAL_DEFAULT;
}
writel(txreg, base + NvRegTxDeferral);
if (np->desc_ver == DESC_VER_1) {
txreg = NVREG_TX_WM_DESC1_DEFAULT;
} else {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
NVREG_LINKSPEED_1000)
txreg = NVREG_TX_WM_DESC2_3_1000;
else
txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
}
writel(txreg, base + NvRegTxWatermark);
writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
}
/**
* nv_update_linkspeed - Setup the MAC according to the link partner
* @dev: Network device to be configured
*
* The function queries the PHY and checks if there is a link partner.
* If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
* set to 10 MBit HD.
*
* The function returns 0 if there is no link partner and 1 if there is
* a good link partner.
*/
static int nv_update_linkspeed(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int adv = 0;
int lpa = 0;
int adv_lpa, adv_pause, lpa_pause;
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
u32 bmcr;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
/* If device loopback is enabled, set carrier on and enable max link
* speed.
*/
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (bmcr & BMCR_LOOPBACK) {
if (netif_running(dev)) {
nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
}
return 1;
}
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
*/
mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (!(mii_status & BMSR_LSTATUS)) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
goto set_speed;
}
if (np->autoneg == 0) {
if (np->fixed_mode & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
} else if (np->fixed_mode & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
} else if (np->fixed_mode & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
} else {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
retval = 1;
goto set_speed;
}
/* check auto negotiation is complete */
if (!(mii_status & BMSR_ANEGCOMPLETE)) {
/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
goto set_speed;
}
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
newdup = 1;
goto set_speed;
}
}
/* FIXME: handle parallel detection properly */
adv_lpa = lpa & adv;
if (adv_lpa & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
} else if (adv_lpa & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
} else if (adv_lpa & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
} else if (adv_lpa & LPA_10HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
set_speed:
if (np->duplex == newdup && np->linkspeed == newls)
return retval;
np->duplex = newdup;
np->linkspeed = newls;
/* The transmitter and receiver must be restarted for safe update */
if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
txrxFlags |= NV_RESTART_TX;
nv_stop_tx(dev);
}
if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
txrxFlags |= NV_RESTART_RX;
nv_stop_rx(dev);
}
if (np->gigabit == PHY_GIGABIT) {
phyreg = readl(base + NvRegSlotTime);
phyreg &= ~(0x3FF00);
if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
phyreg |= NVREG_SLOTTIME_10_100_FULL;
else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
phyreg |= NVREG_SLOTTIME_1000_FULL;
writel(phyreg, base + NvRegSlotTime);
}
phyreg = readl(base + NvRegPhyInterface);
phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
if (np->duplex == 0)
phyreg |= PHY_HALF;
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
phyreg |= PHY_100;
else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
if (phyreg & PHY_RGMII) {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
txreg = NVREG_TX_DEFERRAL_RGMII_1000;
} else {
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
else
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
} else {
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
}
}
} else {
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
else
txreg = NVREG_TX_DEFERRAL_DEFAULT;
}
writel(txreg, base + NvRegTxDeferral);
if (np->desc_ver == DESC_VER_1) {
txreg = NVREG_TX_WM_DESC1_DEFAULT;
} else {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
txreg = NVREG_TX_WM_DESC2_3_1000;
else
txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
}
writel(txreg, base + NvRegTxWatermark);
writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
pause_flags = 0;
/* setup pause frame */
if (netif_running(dev) && (np->duplex != 0)) {
if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
switch (adv_pause) {
case ADVERTISE_PAUSE_CAP:
if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
break;
case ADVERTISE_PAUSE_ASYM:
if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
break;
case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
if (lpa_pause == LPA_PAUSE_ASYM)
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
break;
}
} else {
pause_flags = np->pause_flags;
}
}
nv_update_pause(dev, pause_flags);
if (txrxFlags & NV_RESTART_TX)
nv_start_tx(dev);
if (txrxFlags & NV_RESTART_RX)
nv_start_rx(dev);
return retval;
}
static void nv_linkchange(struct net_device *dev)
{
if (nv_update_linkspeed(dev)) {
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
netdev_info(dev, "link up\n");
nv_txrx_gate(dev, false);
nv_start_rx(dev);
}
} else {
if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
netdev_info(dev, "link down\n");
nv_txrx_gate(dev, true);
nv_stop_rx(dev);
}
}
}
static void nv_link_irq(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
nv_linkchange(dev);
}
static void nv_msi_workaround(struct fe_priv *np)
{
/* Need to toggle the msi irq mask within the ethernet device,
* otherwise, future interrupts will not be detected.
*/
if (np->msi_flags & NV_MSI_ENABLED) {
u8 __iomem *base = np->base;
writel(0, base + NvRegMSIIrqMask);
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
}
}
static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
{
struct fe_priv *np = netdev_priv(dev);
if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
if (total_work > NV_DYNAMIC_THRESHOLD) {
/* transition to poll based interrupts */
np->quiet_count = 0;
if (np->irqmask != NVREG_IRQMASK_CPU) {
np->irqmask = NVREG_IRQMASK_CPU;
return 1;
}
} else {
if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
np->quiet_count++;
} else {
/* reached a period of low activity, switch
to per tx/rx packet interrupts */
if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
np->irqmask = NVREG_IRQMASK_THROUGHPUT;
return 1;
}
}
}
}
return 0;
}
static irqreturn_t nv_nic_irq(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
} else {
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
if (!(np->events & np->irqmask))
return IRQ_NONE;
nv_msi_workaround(np);
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
*/
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
return IRQ_HANDLED;
}
/* All _optimized functions are used to help increase performance
* (reduce CPU and increase throughput). They use descripter version 3,
* compiler directives, and reduce memory accesses.
*/
static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
} else {
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
if (!(np->events & np->irqmask))
return IRQ_NONE;
nv_msi_workaround(np);
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
*/
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
return IRQ_HANDLED;
}
static irqreturn_t nv_nic_irq_tx(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
unsigned long flags;
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
writel(events, base + NvRegMSIXIrqStatus);
netdev_dbg(dev, "tx irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
spin_lock_irqsave(&np->lock, flags);
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
spin_unlock_irqrestore(&np->lock, flags);
if (unlikely(i > max_interrupt_work)) {
spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
if (!np->in_shutdown) {
np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
netdev_dbg(dev, "%s: too many iterations (%d)\n",
__func__, i);
break;
}
}
return IRQ_RETVAL(i);
}
static int nv_napi_poll(struct napi_struct *napi, int budget)
{
struct fe_priv *np = container_of(napi, struct fe_priv, napi);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
int rx_count, tx_work = 0, rx_work = 0;
do {
if (!nv_optimized(np)) {
spin_lock_irqsave(&np->lock, flags);
tx_work += nv_tx_done(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);
rx_count = nv_rx_process(dev, budget - rx_work);
retcode = nv_alloc_rx(dev);
} else {
spin_lock_irqsave(&np->lock, flags);
tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);
rx_count = nv_rx_process_optimized(dev,
budget - rx_work);
retcode = nv_alloc_rx_optimized(dev);
}
} while (retcode == 0 &&
rx_count > 0 && (rx_work += rx_count) < budget);
if (retcode) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irqrestore(&np->lock, flags);
}
nv_change_interrupt_mode(dev, tx_work + rx_work);
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
spin_unlock_irqrestore(&np->lock, flags);
}
if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown) {
np->nic_poll_irq = np->irqmask;
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
napi_complete(napi);
return rx_work;
}
if (rx_work < budget) {
/* re-enable interrupts
(msix not enabled in napi) */
napi_complete_done(napi, rx_work);
writel(np->irqmask, base + NvRegIrqMask);
}
return rx_work;
}
static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
unsigned long flags;
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
writel(events, base + NvRegMSIXIrqStatus);
netdev_dbg(dev, "rx irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx_optimized(dev))) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irqrestore(&np->lock, flags);
}
}
if (unlikely(i > max_interrupt_work)) {
spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
if (!np->in_shutdown) {
np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
netdev_dbg(dev, "%s: too many iterations (%d)\n",
__func__, i);
break;
}
}
return IRQ_RETVAL(i);
}
static irqreturn_t nv_nic_irq_other(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
unsigned long flags;
for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
writel(events, base + NvRegMSIXIrqStatus);
netdev_dbg(dev, "irq events: %08x\n", events);
if (!(events & np->irqmask))
break;
/* check tx in case we reached max loop limit in tx isr */
spin_lock_irqsave(&np->lock, flags);
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
spin_unlock_irqrestore(&np->lock, flags);
if (events & NVREG_IRQ_LINK) {
spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
spin_unlock_irqrestore(&np->lock, flags);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & NVREG_IRQ_RECOVER_ERROR) {
spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
if (!np->in_shutdown) {
np->nic_poll_irq |= NVREG_IRQ_OTHER;
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
break;
}
if (unlikely(i > max_interrupt_work)) {
spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
if (!np->in_shutdown) {
np->nic_poll_irq |= NVREG_IRQ_OTHER;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
netdev_dbg(dev, "%s: too many iterations (%d)\n",
__func__, i);
break;
}
}
return IRQ_RETVAL(i);
}
static irqreturn_t nv_nic_irq_test(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
} else {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
}
pci_push(base);
if (!(events & NVREG_IRQ_TIMER))
return IRQ_RETVAL(0);
nv_msi_workaround(np);
spin_lock(&np->lock);
np->intr_test = 1;
spin_unlock(&np->lock);
return IRQ_RETVAL(1);
}
static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
{
u8 __iomem *base = get_hwbase(dev);
int i;
u32 msixmap = 0;
/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
* MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
* the remaining 8 interrupts.
*/
for (i = 0; i < 8; i++) {
if ((irqmask >> i) & 0x1)
msixmap |= vector << (i << 2);
}
writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
msixmap = 0;
for (i = 0; i < 8; i++) {
if ((irqmask >> (i + 8)) & 0x1)
msixmap |= vector << (i << 2);
}
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret;
int i;
irqreturn_t (*handler)(int foo, void *data);
if (intr_test) {
handler = nv_nic_irq_test;
} else {
if (nv_optimized(np))
handler = nv_nic_irq_optimized;
else
handler = nv_nic_irq;
}
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
ret = pci_enable_msix_range(np->pci_dev,
np->msi_x_entry,
np->msi_flags & NV_MSI_X_VECTORS_MASK,
np->msi_flags & NV_MSI_X_VECTORS_MASK);
if (ret > 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
if (ret) {
netdev_info(dev,
"request_irq failed for rx %d\n",
ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
if (ret) {
netdev_info(dev,
"request_irq failed for tx %d\n",
ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
}
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
if (ret) {
netdev_info(dev,
"request_irq failed for link %d\n",
ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_tx;
}
/* map interrupts to their respective vector */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
handler, IRQF_SHARED, dev->name, dev);
if (ret) {
netdev_info(dev,
"request_irq failed %d\n",
ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
netdev_info(dev, "MSI-X enabled\n");
return 0;
}
}
if (np->msi_flags & NV_MSI_CAPABLE) {
ret = pci_enable_msi(np->pci_dev);
if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
if (ret) {
netdev_info(dev, "request_irq failed %d\n",
ret);
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIMap0);
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
netdev_info(dev, "MSI enabled\n");
return 0;
}
}
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
goto out_err;
return 0;
out_free_tx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
out_free_rx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
out_err:
return 1;
}
static void nv_free_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
int i;
if (np->msi_flags & NV_MSI_X_ENABLED) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
free_irq(np->msi_x_entry[i].vector, dev);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
}
static void nv_do_nic_poll(struct timer_list *t)
{
struct fe_priv *np = from_timer(np, t, nic_poll);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
unsigned long flags;
unsigned int irq = 0;
/*
* First disable irq(s) and then
* reenable interrupts on the nic, we have to do this before calling
* nv_nic_irq because that may decide to do otherwise
*/
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
else
irq = np->pci_dev->irq;
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
mask |= NVREG_IRQ_RX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
mask |= NVREG_IRQ_TX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
mask |= NVREG_IRQ_OTHER;
}
}
disable_irq_nosync_lockdep_irqsave(irq, &flags);
synchronize_irq(irq);
if (np->recover_error) {
np->recover_error = 0;
netdev_info(dev, "MAC in recoverable error state\n");
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* clear interrupts */
if (!(np->msi_flags & NV_MSI_X_ENABLED))
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
else
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
/* restart rx engine */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
}
writel(mask, base + NvRegIrqMask);
pci_push(base);
if (!using_multi_irqs(dev)) {
np->nic_poll_irq = 0;
if (nv_optimized(np))
nv_nic_irq_optimized(0, dev);
else
nv_nic_irq(0, dev);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
nv_nic_irq_rx(0, dev);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
nv_nic_irq_tx(0, dev);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
nv_nic_irq_other(0, dev);
}
}
enable_irq_lockdep_irqrestore(irq, &flags);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
nv_do_nic_poll(&np->nic_poll);
}
#endif
static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = from_timer(np, t, stats_poll);
struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
* and hence fresh enough */
if (spin_trylock(&np->hwstats_lock)) {
nv_update_stats(dev);
spin_unlock(&np->hwstats_lock);
}
if (!np->in_shutdown)
mod_timer(&np->stats_poll,
round_jiffies(jiffies + STATS_INTERVAL));
}
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = netdev_priv(dev);
wolinfo->supported = WAKE_MAGIC;
spin_lock_irq(&np->lock);
if (np->wolenabled)
wolinfo->wolopts = WAKE_MAGIC;
spin_unlock_irq(&np->lock);
}
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 flags = 0;
if (wolinfo->wolopts == 0) {
np->wolenabled = 0;
} else if (wolinfo->wolopts & WAKE_MAGIC) {
np->wolenabled = 1;
flags = NVREG_WAKEUPFLAGS_ENABLE;
}
if (netif_running(dev)) {
spin_lock_irq(&np->lock);
writel(flags, base + NvRegWakeUpFlags);
spin_unlock_irq(&np->lock);
}
device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
return 0;
}
static int nv_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct fe_priv *np = netdev_priv(dev);
u32 speed, supported, advertising;
int adv;
spin_lock_irq(&np->lock);
cmd->base.port = PORT_MII;
if (!netif_running(dev)) {
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
if (nv_update_linkspeed(dev)) {
netif_carrier_on(dev);
} else {
netif_carrier_off(dev);
}
}
if (netif_carrier_ok(dev)) {
switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
speed = SPEED_10;
break;
case NVREG_LINKSPEED_100:
speed = SPEED_100;
break;
case NVREG_LINKSPEED_1000:
speed = SPEED_1000;
break;
default:
speed = -1;
break;
}
cmd->base.duplex = DUPLEX_HALF;
if (np->duplex)
cmd->base.duplex = DUPLEX_FULL;
} else {
speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.speed = speed;
cmd->base.autoneg = np->autoneg;
advertising = ADVERTISED_MII;
if (np->autoneg) {
advertising |= ADVERTISED_Autoneg;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
if (adv & ADVERTISE_10HALF)
advertising |= ADVERTISED_10baseT_Half;
if (adv & ADVERTISE_10FULL)
advertising |= ADVERTISED_10baseT_Full;
if (adv & ADVERTISE_100HALF)
advertising |= ADVERTISED_100baseT_Half;
if (adv & ADVERTISE_100FULL)
advertising |= ADVERTISED_100baseT_Full;
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
if (adv & ADVERTISE_1000FULL)
advertising |= ADVERTISED_1000baseT_Full;
}
}
supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_MII);
if (np->gigabit == PHY_GIGABIT)
supported |= SUPPORTED_1000baseT_Full;
cmd->base.phy_address = np->phyaddr;
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
/* ignore maxtxpkt, maxrxpkt for now */
spin_unlock_irq(&np->lock);
return 0;
}
static int nv_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct fe_priv *np = netdev_priv(dev);
u32 speed = cmd->base.speed;
u32 advertising;
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
if (cmd->base.port != PORT_MII)
return -EINVAL;
if (cmd->base.phy_address != np->phyaddr) {
/* TODO: support switching between multiple phys. Should be
* trivial, but not enabled due to lack of test hardware. */
return -EINVAL;
}
if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 mask;
mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
if (np->gigabit == PHY_GIGABIT)
mask |= ADVERTISED_1000baseT_Full;
if ((advertising & mask) == 0)
return -EINVAL;
} else if (cmd->base.autoneg == AUTONEG_DISABLE) {
/* Note: autonegotiation disable, speed 1000 intentionally
* forbidden - no one should need that. */
if (speed != SPEED_10 && speed != SPEED_100)
return -EINVAL;
if (cmd->base.duplex != DUPLEX_HALF &&
cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
return -EINVAL;
}
netif_carrier_off(dev);
if (netif_running(dev)) {
unsigned long flags;
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
/* with plain spinlock lockdep complains */
spin_lock_irqsave(&np->lock, flags);
/* stop engines */
/* FIXME:
* this can take some time, and interrupts are disabled
* due to spin_lock_irqsave, but let's hope no daemon
* is going to change the settings very often...
* Worst case:
* NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
* + some minor delays, which is up to a second approximately
*/
nv_stop_rxtx(dev);
spin_unlock_irqrestore(&np->lock, flags);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
if (cmd->base.autoneg == AUTONEG_ENABLE) {
int adv, bmcr;
np->autoneg = 1;
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertising & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertising & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
adv |= ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
if (advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
if (netif_running(dev))
netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick,
* and cause autoneg to start */
if (phy_reset(dev, bmcr)) {
netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
}
} else {
int adv, bmcr;
np->autoneg = 0;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
adv |= ADVERTISE_10FULL;
if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
adv |= ADVERTISE_100HALF;
if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
}
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
adv |= ADVERTISE_PAUSE_ASYM;
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
np->fixed_mode = adv;
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
bmcr |= BMCR_FULLDPLX;
if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
if (np->phy_oui == PHY_OUI_MARVELL) {
/* reset the phy in order for forced mode settings to stick */
if (phy_reset(dev, bmcr)) {
netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
if (netif_running(dev)) {
/* Wait a bit and then reconfigure the nic. */
udelay(10);
nv_linkchange(dev);
}
}
}
if (netif_running(dev)) {
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
return 0;
}
#define FORCEDETH_REGS_VER 1
static int nv_get_regs_len(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
return np->register_size;
}
static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 *rbuf = buf;
int i;
regs->version = FORCEDETH_REGS_VER;
spin_lock_irq(&np->lock);
for (i = 0; i < np->register_size/sizeof(u32); i++)
rbuf[i] = readl(base + i*sizeof(u32));
spin_unlock_irq(&np->lock);
}
static int nv_nway_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int ret;
if (np->autoneg) {
int bmcr;
netif_carrier_off(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
netdev_info(dev, "link down\n");
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick*/
if (phy_reset(dev, bmcr)) {
netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
}
if (netif_running(dev)) {
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
ret = 0;
} else {
ret = -EINVAL;
}
return ret;
}
static void nv_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring->rx_pending = np->rx_ring_size;
ring->tx_pending = np->tx_ring_size;
}
static int nv_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
dma_addr_t ring_addr;
if (ring->rx_pending < RX_RING_MIN ||
ring->tx_pending < TX_RING_MIN ||
ring->rx_mini_pending != 0 ||
ring->rx_jumbo_pending != 0 ||
(np->desc_ver == DESC_VER_1 &&
(ring->rx_pending > RING_MAX_DESC_VER_1 ||
ring->tx_pending > RING_MAX_DESC_VER_1)) ||
(np->desc_ver != DESC_VER_1 &&
(ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
return -EINVAL;
}
/* allocate new rings */
if (!nv_optimized(np)) {
rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc) *
(ring->rx_pending +
ring->tx_pending),
&ring_addr, GFP_ATOMIC);
} else {
rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc_ex) *
(ring->rx_pending +
ring->tx_pending),
&ring_addr, GFP_ATOMIC);
}
rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
GFP_KERNEL);
tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
GFP_KERNEL);
if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
/* fall back to old rings */
if (!nv_optimized(np)) {
if (rxtx_ring)
dma_free_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc) *
(ring->rx_pending +
ring->tx_pending),
rxtx_ring, ring_addr);
} else {
if (rxtx_ring)
dma_free_coherent(&np->pci_dev->dev,
sizeof(struct ring_desc_ex) *
(ring->rx_pending +
ring->tx_pending),
rxtx_ring, ring_addr);
}
kfree(rx_skbuff);
kfree(tx_skbuff);
goto exit;
}
if (netif_running(dev)) {
nv_disable_irq(dev);
nv_napi_disable(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain queues */
nv_drain_rxtx(dev);
/* delete queues */
free_rings(dev);
}
/* set new values */
np->rx_ring_size = ring->rx_pending;
np->tx_ring_size = ring->tx_pending;
if (!nv_optimized(np)) {
np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
np->rx_skb = (struct nv_skb_map *)rx_skbuff;
np->tx_skb = (struct nv_skb_map *)tx_skbuff;
np->ring_addr = ring_addr;
memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
if (netif_running(dev)) {
/* reinit driver view of the queues */
set_bufsize(dev);
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the queues */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* restart engines */
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
nv_napi_enable(dev);
nv_enable_irq(dev);
}
return 0;
exit:
return -ENOMEM;
}
static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{
struct fe_priv *np = netdev_priv(dev);
pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
}
static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{
struct fe_priv *np = netdev_priv(dev);
int adv, bmcr;
if ((!np->autoneg && np->duplex == 0) ||
(np->autoneg && !pause->autoneg && np->duplex == 0)) {
netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
return -EINVAL;
}
if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
netdev_info(dev, "hardware does not support tx pause frames\n");
return -EINVAL;
}
netif_carrier_off(dev);
if (netif_running(dev)) {
nv_disable_irq(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
if (pause->rx_pause)
np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
if (pause->tx_pause)
np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
if (np->autoneg && pause->autoneg) {
np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
adv |= ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (netif_running(dev))
netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
} else {
np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
if (pause->rx_pause)
np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (pause->tx_pause)
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
if (!netif_running(dev))
nv_update_linkspeed(dev);
else
nv_update_pause(dev, np->pause_flags);
}
if (netif_running(dev)) {
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
return 0;
}
static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
unsigned long flags;
u32 miicontrol;
int err, retval = 0;
spin_lock_irqsave(&np->lock, flags);
miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (features & NETIF_F_LOOPBACK) {
if (miicontrol & BMCR_LOOPBACK) {
spin_unlock_irqrestore(&np->lock, flags);
netdev_info(dev, "Loopback already enabled\n");
return 0;
}
nv_disable_irq(dev);
/* Turn on loopback mode */
miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
if (err) {
retval = PHY_ERROR;
spin_unlock_irqrestore(&np->lock, flags);
phy_init(dev);
} else {
if (netif_running(dev)) {
/* Force 1000 Mbps full-duplex */
nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
1);
/* Force link up */
netif_carrier_on(dev);
}
spin_unlock_irqrestore(&np->lock, flags);
netdev_info(dev,
"Internal PHY loopback mode enabled.\n");
}
} else {
if (!(miicontrol & BMCR_LOOPBACK)) {
spin_unlock_irqrestore(&np->lock, flags);
netdev_info(dev, "Loopback already disabled\n");
return 0;
}
nv_disable_irq(dev);
/* Turn off loopback */
spin_unlock_irqrestore(&np->lock, flags);
netdev_info(dev, "Internal PHY loopback mode disabled.\n");
phy_init(dev);
}
msleep(500);
spin_lock_irqsave(&np->lock, flags);
nv_enable_irq(dev);
spin_unlock_irqrestore(&np->lock, flags);
return retval;
}
static netdev_features_t nv_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
features |= NETIF_F_RXCSUM;
return features;
}
static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = get_nvpriv(dev);
spin_lock_irq(&np->lock);
if (features & NETIF_F_HW_VLAN_CTAG_RX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
if (features & NETIF_F_HW_VLAN_CTAG_TX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
spin_unlock_irq(&np->lock);
}
static int nv_set_features(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
netdev_features_t changed = dev->features ^ features;
int retval;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
retval = nv_set_loopback(dev, features);
if (retval != 0)
return retval;
}
if (changed & NETIF_F_RXCSUM) {
spin_lock_irq(&np->lock);
if (features & NETIF_F_RXCSUM)
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
else
np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
if (netif_running(dev))
writel(np->txrxctl_bits, base + NvRegTxRxControl);
spin_unlock_irq(&np->lock);
}
if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
nv_vlan_mode(dev, features);
return 0;
}
static int nv_get_sset_count(struct net_device *dev, int sset)
{
struct fe_priv *np = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
if (np->driver_data & DEV_HAS_TEST_EXTENDED)
return NV_TEST_COUNT_EXTENDED;
else
return NV_TEST_COUNT_BASE;
case ETH_SS_STATS:
if (np->driver_data & DEV_HAS_STATISTICS_V3)
return NV_DEV_STATISTICS_V3_COUNT;
else if (np->driver_data & DEV_HAS_STATISTICS_V2)
return NV_DEV_STATISTICS_V2_COUNT;
else if (np->driver_data & DEV_HAS_STATISTICS_V1)
return NV_DEV_STATISTICS_V1_COUNT;
else
return 0;
default:
return -EOPNOTSUPP;
}
}
static void nv_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *buffer)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
spin_lock_bh(&np->hwstats_lock);
nv_update_stats(dev);
memcpy(buffer, &np->estats,
nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
spin_unlock_bh(&np->hwstats_lock);
}
static int nv_link_test(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int mii_status;
mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
/* check phy link status */
if (!(mii_status & BMSR_LSTATUS))
return 0;
else
return 1;
}
static int nv_register_test(struct net_device *dev)
{
u8 __iomem *base = get_hwbase(dev);
int i = 0;
u32 orig_read, new_read;
do {
orig_read = readl(base + nv_registers_test[i].reg);
/* xor with mask to toggle bits */
orig_read ^= nv_registers_test[i].mask;
writel(orig_read, base + nv_registers_test[i].reg);
new_read = readl(base + nv_registers_test[i].reg);
if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
return 0;
/* restore original value */
orig_read ^= nv_registers_test[i].mask;
writel(orig_read, base + nv_registers_test[i].reg);
} while (nv_registers_test[++i].reg != 0);
return 1;
}
static int nv_interrupt_test(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
int testcnt;
u32 save_msi_flags, save_poll_interval = 0;
if (netif_running(dev)) {
/* free current irq */
nv_free_irq(dev);
save_poll_interval = readl(base+NvRegPollingInterval);
}
/* flag to test interrupt handler */
np->intr_test = 0;
/* setup test irq */
save_msi_flags = np->msi_flags;
np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
np->msi_flags |= 0x001; /* setup 1 vector */
if (nv_request_irq(dev, 1))
return 0;
/* setup timer interrupt */
writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
/* wait for at least one interrupt */
msleep(100);
spin_lock_irq(&np->lock);
/* flag should be set within ISR */
testcnt = np->intr_test;
if (!testcnt)
ret = 2;
nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
if (!(np->msi_flags & NV_MSI_X_ENABLED))
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
else
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
spin_unlock_irq(&np->lock);
nv_free_irq(dev);
np->msi_flags = save_msi_flags;
if (netif_running(dev)) {
writel(save_poll_interval, base + NvRegPollingInterval);
writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
/* restore original irq */
if (nv_request_irq(dev, 0))
return 0;
}
return ret;
}
static int nv_loopback_test(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
struct sk_buff *tx_skb, *rx_skb;
dma_addr_t test_dma_addr;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
u32 flags;
int len, i, pkt_len;
u8 *pkt_data;
u32 filter_flags = 0;
u32 misc1_flags = 0;
int ret = 1;
if (netif_running(dev)) {
nv_disable_irq(dev);
filter_flags = readl(base + NvRegPacketFilterFlags);
misc1_flags = readl(base + NvRegMisc1);
} else {
nv_txrx_reset(dev);
}
/* reinit driver view of the rx queue */
set_bufsize(dev);
nv_init_ring(dev);
/* setup hardware for loopback */
writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
/* restart rx engine */
nv_start_rxtx(dev);
/* setup packet for tx */
pkt_len = ETH_DATA_LEN;
tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
ret = 0;
goto out;
}
test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
skb_tailroom(tx_skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
test_dma_addr))) {
dev_kfree_skb_any(tx_skb);
goto out;
}
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
if (!nv_optimized(np)) {
np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
} else {
np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
}
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(get_hwbase(dev));
msleep(500);
/* check for rx of the packet */
if (!nv_optimized(np)) {
flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
} else {
flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
}
if (flags & NV_RX_AVAIL) {
ret = 0;
} else if (np->desc_ver == DESC_VER_1) {
if (flags & NV_RX_ERROR)
ret = 0;
} else {
if (flags & NV_RX2_ERROR)
ret = 0;
}
if (ret) {
if (len != pkt_len) {
ret = 0;
} else {
rx_skb = np->rx_skb[0].skb;
for (i = 0; i < pkt_len; i++) {
if (rx_skb->data[i] != (u8)(i & 0xff)) {
ret = 0;
break;
}
}
}
}
dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
(skb_end_pointer(tx_skb) - tx_skb->data),
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_skb);
out:
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rxtx(dev);
if (netif_running(dev)) {
writel(misc1_flags, base + NvRegMisc1);
writel(filter_flags, base + NvRegPacketFilterFlags);
nv_enable_irq(dev);
}
return ret;
}
static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int result, count;
count = nv_get_sset_count(dev, ETH_SS_TEST);
memset(buffer, 0, count * sizeof(u64));
if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[0] = 1;
}
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (netif_running(dev)) {
netif_stop_queue(dev);
nv_napi_disable(dev);
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
if (!(np->msi_flags & NV_MSI_X_ENABLED))
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
else
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rxtx(dev);
spin_unlock_irq(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
}
if (!nv_register_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[1] = 1;
}
result = nv_interrupt_test(dev);
if (result != 1) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[2] = 1;
}
if (result == 0) {
/* bail out */
return;
}
if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[3] = 1;
}
if (netif_running(dev)) {
/* reinit driver view of the rx queue */
set_bufsize(dev);
if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* restart rx engine */
nv_start_rxtx(dev);
netif_start_queue(dev);
nv_napi_enable(dev);
nv_enable_hw_interrupts(dev, np->irqmask);
}
}
}
static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
break;
case ETH_SS_TEST:
memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
break;
}
}
static const struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
.set_wol = nv_set_wol,
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
.get_ringparam = nv_get_ringparam,
.set_ringparam = nv_set_ringparam,
.get_pauseparam = nv_get_pauseparam,
.set_pauseparam = nv_set_pauseparam,
.get_strings = nv_get_strings,
.get_ethtool_stats = nv_get_ethtool_stats,
.get_sset_count = nv_get_sset_count,
.self_test = nv_self_test,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = nv_get_link_ksettings,
.set_link_ksettings = nv_set_link_ksettings,
};
/* The mgmt unit and driver use a semaphore to access the phy during init */
static int nv_mgmt_acquire_sema(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
u32 tx_ctrl, mgmt_sema;
for (i = 0; i < 10; i++) {
mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
break;
msleep(500);
}
if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
return 0;
for (i = 0; i < 2; i++) {
tx_ctrl = readl(base + NvRegTransmitterControl);
tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
writel(tx_ctrl, base + NvRegTransmitterControl);
/* verify that semaphore was acquired */
tx_ctrl = readl(base + NvRegTransmitterControl);
if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
np->mgmt_sema = 1;
return 1;
} else
udelay(50);
}
return 0;
}
static void nv_mgmt_release_sema(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl;
if (np->driver_data & DEV_HAS_MGMT_UNIT) {
if (np->mgmt_sema) {
tx_ctrl = readl(base + NvRegTransmitterControl);
tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
writel(tx_ctrl, base + NvRegTransmitterControl);
}
}
}
static int nv_mgmt_get_version(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 data_ready = readl(base + NvRegTransmitterControl);
u32 data_ready2 = 0;
unsigned long start;
int ready = 0;
writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
start = jiffies;
while (time_before(jiffies, start + 5*HZ)) {
data_ready2 = readl(base + NvRegTransmitterControl);
if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
ready = 1;
break;
}
schedule_timeout_uninterruptible(1);
}
if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
return 0;
np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
return 1;
}
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
int oom, i;
u32 low;
/* power up phy */
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
nv_txrx_gate(dev, false);
/* erase previous misconfiguration */
if (np->driver_data & DEV_HAS_POWER_CNTRL)
nv_mac_reset(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags);
writel(0, base + NvRegTransmitterControl);
writel(0, base + NvRegReceiverControl);
writel(0, base + NvRegAdapterControl);
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
/* initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
nv_txrx_reset(dev);
writel(0, base + NvRegUnknownSetupReg6);
np->in_shutdown = 0;
/* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
writel(np->linkspeed, base + NvRegLinkSpeed);
if (np->desc_ver == DESC_VER_1)
writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
else
writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
writel(np->vlanctl_bits, base + NvRegVlanControl);
pci_push(base);
writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
if (reg_delay(dev, NvRegUnknownSetupReg5,
NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
netdev_info(dev,
"%s: SetupReg5, Bit 31 remained off\n", __func__);
writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&low, sizeof(low));
low &= NVREG_SLOTTIME_MASK;
if (np->desc_ver == DESC_VER_1) {
writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
} else {
if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
/* setup legacy backoff */
writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
} else {
writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
nv_gear_backoff_reseed(dev);
}
}
writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
if (poll_interval == -1) {
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
else
writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
} else
writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
if (np->wolenabled)
writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
i = readl(base + NvRegPowerState);
if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
pci_push(base);
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
if (nv_request_irq(dev, 0))
goto out_drain;
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
*/
readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
np->linkspeed = 0;
ret = nv_update_linkspeed(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
nv_napi_enable(dev);
if (ret) {
netif_carrier_on(dev);
} else {
netdev_info(dev, "no link during initialization\n");
netif_carrier_off(dev);
}
if (oom)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
/* start statistics timer */
if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
mod_timer(&np->stats_poll,
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
/* If the loopback feature was set while the device was down, make sure
* that it's set correctly now.
*/
if (dev->features & NETIF_F_LOOPBACK)
nv_set_loopback(dev, dev->features);
return 0;
out_drain:
nv_drain_rxtx(dev);
return ret;
}
static int nv_close(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base;
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
spin_unlock_irq(&np->lock);
nv_napi_disable(dev);
synchronize_irq(np->pci_dev->irq);
del_timer_sync(&np->oom_kick);
del_timer_sync(&np->nic_poll);
del_timer_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
spin_unlock_irq(&np->lock);
nv_free_irq(dev);
nv_drain_rxtx(dev);
if (np->wolenabled || !phy_power_down) {
nv_txrx_gate(dev, false);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
nv_start_rx(dev);
} else {
/* power down phy */
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
nv_txrx_gate(dev, true);
}
/* FIXME: power down nic */
return 0;
}
static const struct net_device_ops nv_netdev_ops = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
.ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
.ndo_fix_features = nv_fix_features,
.ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_rx_mode = nv_set_multicast,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
};
static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
.ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
.ndo_fix_features = nv_fix_features,
.ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_rx_mode = nv_set_multicast,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
};
static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
struct net_device *dev;
struct fe_priv *np;
unsigned long addr;
u8 __iomem *base;
int err, i;
u32 powerstate, txreg;
u32 phystate_orig = 0, phystate;
int phyinitialized = 0;
static int printed_version;
u8 mac[ETH_ALEN];
if (!printed_version++)
pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
FORCEDETH_VERSION);
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
if (!dev)
goto out;
np = netdev_priv(dev);
np->dev = dev;
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
if (!np->txrx_stats) {
pr_err("np->txrx_stats, alloc memory error.\n");
err = -ENOMEM;
goto out_alloc_percpu;
}
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
err = pci_enable_device(pci_dev);
if (err)
goto out_free;
pci_set_master(pci_dev);
err = pci_request_regions(pci_dev, DRV_NAME);
if (err < 0)
goto out_disable;
if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
np->register_size = NV_PCI_REGSZ_VER3;
else if (id->driver_data & DEV_HAS_STATISTICS_V1)
np->register_size = NV_PCI_REGSZ_VER2;
else
np->register_size = NV_PCI_REGSZ_VER1;
err = -EINVAL;
addr = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
pci_resource_len(pci_dev, i) >= np->register_size) {
addr = pci_resource_start(pci_dev, i);
break;
}
}
if (i == DEVICE_COUNT_RESOURCE) {
dev_info(&pci_dev->dev, "Couldn't find register window\n");
goto out_relreg;
}
/* copy of driver data */
np->driver_data = id->driver_data;
/* copy of device id */
np->device_id = id->device;
/* handle different descriptor versions */
if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (dma_64bit) {
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39)))
dev_info(&pci_dev->dev,
"64-bit DMA failed, using 32-bit addressing\n");
else
dev->features |= NETIF_F_HIGHDMA;
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
} else {
/* original packet format */
np->desc_ver = DESC_VER_1;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
}
np->pkt_limit = NV_PKTLIMIT_1;
if (id->driver_data & DEV_HAS_LARGEDESC)
np->pkt_limit = NV_PKTLIMIT_2;
if (id->driver_data & DEV_HAS_CHECKSUM) {
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM;
}
np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
}
dev->features |= dev->hw_features;
/* Add loopback capability to the device. */
dev->hw_features |= NETIF_F_LOOPBACK;
/* MTU range: 64 - 1500 or 9100 */
dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
dev->max_mtu = np->pkt_limit;
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
}
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
if (!np->base)
goto out_relreg;
np->rx_ring_size = RX_RING_DEFAULT;
np->tx_ring_size = TX_RING_DEFAULT;
if (!nv_optimized(np)) {
np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
sizeof(struct ring_desc) *
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr,
GFP_KERNEL);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr, GFP_KERNEL);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
if (!np->rx_skb || !np->tx_skb)
goto out_freering;
if (!nv_optimized(np))
dev->netdev_ops = &nv_netdev_ops;
else
dev->netdev_ops = &nv_netdev_ops_optimized;
netif_napi_add(dev, &np->napi, nv_napi_poll);
dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
pci_set_drvdata(pci_dev, dev);
/* read the mac address */
base = get_hwbase(dev);
np->orig_mac[0] = readl(base + NvRegMacAddrA);
np->orig_mac[1] = readl(base + NvRegMacAddrB);
/* check the workaround bit for correct mac address order */
txreg = readl(base + NvRegTransmitPoll);
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
/* mac address is already in correct order */
mac[0] = (np->orig_mac[0] >> 0) & 0xff;
mac[1] = (np->orig_mac[0] >> 8) & 0xff;
mac[2] = (np->orig_mac[0] >> 16) & 0xff;
mac[3] = (np->orig_mac[0] >> 24) & 0xff;
mac[4] = (np->orig_mac[1] >> 0) & 0xff;
mac[5] = (np->orig_mac[1] >> 8) & 0xff;
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
/* mac address is already in correct order */
mac[0] = (np->orig_mac[0] >> 0) & 0xff;
mac[1] = (np->orig_mac[0] >> 8) & 0xff;
mac[2] = (np->orig_mac[0] >> 16) & 0xff;
mac[3] = (np->orig_mac[0] >> 24) & 0xff;
mac[4] = (np->orig_mac[1] >> 0) & 0xff;
mac[5] = (np->orig_mac[1] >> 8) & 0xff;
/*
* Set orig mac address back to the reversed version.
* This flag will be cleared during low power transition.
* Therefore, we should always put back the reversed address.
*/
np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
(mac[3] << 16) + (mac[2] << 24);
np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
} else {
/* need to reverse mac address to correct order */
mac[0] = (np->orig_mac[1] >> 8) & 0xff;
mac[1] = (np->orig_mac[1] >> 0) & 0xff;
mac[2] = (np->orig_mac[0] >> 24) & 0xff;
mac[3] = (np->orig_mac[0] >> 16) & 0xff;
mac[4] = (np->orig_mac[0] >> 8) & 0xff;
mac[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
dev_dbg(&pci_dev->dev,
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev, mac);
} else {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
mac);
eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
}
/* set mac address */
nv_copy_mac_to_hw(dev);
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
device_set_wakeup_enable(&pci_dev->dev, false);
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
/* take phy and nic out of low power mode */
powerstate = readl(base + NvRegPowerState2);
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
pci_dev->revision >= 0xA3)
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
writel(powerstate, base + NvRegPowerState2);
}
if (np->desc_ver == DESC_VER_1)
np->tx_flags = NV_TX_VALID;
else
np->tx_flags = NV_TX2_VALID;
np->msi_flags = 0;
if ((id->driver_data & DEV_HAS_MSI) && msi)
np->msi_flags |= NV_MSI_CAPABLE;
if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
*/
#if 0
np->msi_flags |= NV_MSI_X_CAPABLE;
#endif
}
if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
np->irqmask = NVREG_IRQMASK_CPU;
if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
np->msi_flags |= 0x0001;
} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
!(id->driver_data & DEV_NEED_TIMERIRQ)) {
/* start off in throughput mode */
np->irqmask = NVREG_IRQMASK_THROUGHPUT;
/* remove support for msix mode */
np->msi_flags &= ~NV_MSI_X_CAPABLE;
} else {
optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
np->irqmask = NVREG_IRQMASK_THROUGHPUT;
if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
np->msi_flags |= 0x0003;
}
if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
if (id->driver_data & DEV_NEED_LINKTIMER) {
np->need_linktimer = 1;
np->link_timeout = jiffies + LINK_TIMEOUT;
} else {
np->need_linktimer = 0;
}
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
/* clear phy state and temporarily halt phy interrupts */
writel(0, base + NvRegMIIMask);
phystate = readl(base + NvRegAdapterControl);
if (phystate & NVREG_ADAPTCTL_RUNNING) {
phystate_orig = 1;
phystate &= ~NVREG_ADAPTCTL_RUNNING;
writel(phystate, base + NvRegAdapterControl);
}
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
if (id->driver_data & DEV_HAS_MGMT_UNIT) {
/* management unit running on the mac? */
if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
(readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
nv_mgmt_acquire_sema(dev) &&
nv_mgmt_get_version(dev)) {
np->mac_in_use = 1;
if (np->mgmt_version > 0)
np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
/* management unit setup the phy already? */
if (np->mac_in_use &&
((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
NVREG_XMITCTL_SYNC_PHY_INIT)) {
/* phy is inited by mgmt unit */
phyinitialized = 1;
} else {
/* we need to init the phy */
}
}
}
/* find a suitable phy */
for (i = 1; i <= 32; i++) {
int id1, id2;
int phyaddr = i & 0x1F;
spin_lock_irq(&np->lock);
id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
spin_unlock_irq(&np->lock);
if (id1 < 0 || id1 == 0xffff)
continue;
spin_lock_irq(&np->lock);
id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
spin_unlock_irq(&np->lock);
if (id2 < 0 || id2 == 0xffff)
continue;
np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
np->phyaddr = phyaddr;
np->phy_oui = id1 | id2;
/* Realtek hardcoded phy id1 to all zero's on certain phys */
if (np->phy_oui == PHY_OUI_REALTEK2)
np->phy_oui = PHY_OUI_REALTEK;
/* Setup phy revision for Realtek */
if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
break;
}
if (i == 33) {
dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
goto out_error;
}
if (!phyinitialized) {
/* reset it */
phy_init(dev);
} else {
/* see if it is a gigabit phy */
u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT)
np->gigabit = PHY_GIGABIT;
}
/* set default link speed settings */
np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
np->duplex = 0;
np->autoneg = 1;
err = register_netdev(dev);
if (err) {
dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
goto out_error;
}
netif_carrier_off(dev);
/* Some NICs freeze when TX pause is enabled while NIC is
* down, and this stays across warm reboots. The sequence
* below should be enough to recover from that state.
*/
nv_update_pause(dev, 0);
nv_start_tx(dev);
nv_stop_tx(dev);
if (id->driver_data & DEV_HAS_VLAN)
nv_vlan_mode(dev, dev->features);
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX) ?
"vlan " : "",
dev->features & (NETIF_F_LOOPBACK) ?
"loopback " : "",
id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
np->gigabit == PHY_GIGABIT ? "gbit " : "",
np->need_linktimer ? "lnktim " : "",
np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
np->desc_ver);
return 0;
out_error:
nv_mgmt_release_sema(dev);
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
out_freering:
free_rings(dev);
out_unmap:
iounmap(get_hwbase(dev));
out_relreg:
pci_release_regions(pci_dev);
out_disable:
pci_disable_device(pci_dev);
out_free:
free_percpu(np->txrx_stats);
out_alloc_percpu:
free_netdev(dev);
out:
return err;
}
static void nv_restore_phy(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u16 phy_reserved, mii_control;
if (np->phy_oui == PHY_OUI_REALTEK &&
np->phy_model == PHY_MODEL_REALTEK_8201 &&
phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
phy_reserved |= PHY_REALTEK_INIT8;
mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
/* restart auto negotiation */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
}
}
static void nv_restore_mac_addr(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
/* special op: write back the misordered MAC address - otherwise
* the next nv_probe would see a wrong address.
*/
writel(np->orig_mac[0], base + NvRegMacAddrA);
writel(np->orig_mac[1], base + NvRegMacAddrB);
writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
base + NvRegTransmitPoll);
}
static void nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct fe_priv *np = netdev_priv(dev);
free_percpu(np->txrx_stats);
unregister_netdev(dev);
nv_restore_mac_addr(pci_dev);
/* restore any phy related changes */
nv_restore_phy(dev);
nv_mgmt_release_sema(dev);
/* free all structures */
free_rings(dev);
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
free_netdev(dev);
}
#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
if (netif_running(dev)) {
/* Gross. */
nv_close(dev);
}
netif_device_detach(dev);
/* save non-pci configuration space */
for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32));
return 0;
}
static int nv_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i, rc = 0;
/* restore non-pci configuration space */
for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
if (np->driver_data & DEV_NEED_MSI_FIX)
pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
/* restore phy state, including autoneg */
phy_init(dev);
netif_device_attach(dev);
if (netif_running(dev)) {
rc = nv_open(dev);
nv_set_multicast(dev);
}
return rc;
}
static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
#define NV_PM_OPS (&nv_pm_ops)
#else
#define NV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static void nv_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
if (netif_running(dev))
nv_close(dev);
/*
* Restore the MAC so a kernel started by kexec won't get confused.
* If we really go for poweroff, we must not restore the MAC,
* otherwise the MAC for WOL will be reversed at least on some boards.
*/
if (system_state != SYSTEM_POWER_OFF)
nv_restore_mac_addr(pdev);
pci_disable_device(pdev);
/*
* Apparently it is not possible to reinitialise from D3 hot,
* only put the device into D3 if we really go for poweroff.
*/
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, np->wolenabled);
pci_set_power_state(pdev, PCI_D3hot);
}
}
#else
#define nv_shutdown NULL
#endif /* CONFIG_PM */
static const struct pci_device_id pci_tbl[] = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce2 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0066),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x00D6),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0086),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x008C),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x00E6),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x00DF),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0056),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0057),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0037),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0038),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0268),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0269),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0372),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0373),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x03E5),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x03E6),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x03EE),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x03EF),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0450),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0451),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0452),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0453),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x054C),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x054D),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x054E),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x054F),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x07DC),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x07DD),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x07DE),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x07DF),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0760),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0761),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0762),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0763),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0AB0),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0AB1),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0AB2),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0AB3),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
},
{ /* MCP89 Ethernet Controller */
PCI_DEVICE(0x10DE, 0x0D7D),
.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
},
{0,},
};
static struct pci_driver forcedeth_pci_driver = {
.name = DRV_NAME,
.id_table = pci_tbl,
.probe = nv_probe,
.remove = nv_remove,
.shutdown = nv_shutdown,
.driver.pm = NV_PM_OPS,
};
module_param(max_interrupt_work, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
module_param(optimization_mode, int, 0);
MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
module_param(poll_interval, int, 0);
MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
module_param(msix, int, 0);
MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
module_param(dma_64bit, int, 0);
MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
module_param(debug_tx_timeout, bool, 0);
MODULE_PARM_DESC(debug_tx_timeout,
"Dump tx related registers and ring when tx_timeout happens");
module_pci_driver(forcedeth_pci_driver);
MODULE_AUTHOR("Manfred Spraul <[email protected]>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pci_tbl);
|
// SPDX-License-Identifier: GPL-2.0+
/*
* IPv6 IOAM implementation
*
* Author:
* Justin Iurman <[email protected]>
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/ioam6.h>
#include <linux/ioam6_genl.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <net/addrconf.h>
#include <net/genetlink.h>
#include <net/ioam6.h>
#include <net/sch_generic.h>
static void ioam6_ns_release(struct ioam6_namespace *ns)
{
kfree_rcu(ns, rcu);
}
static void ioam6_sc_release(struct ioam6_schema *sc)
{
kfree_rcu(sc, rcu);
}
static void ioam6_free_ns(void *ptr, void *arg)
{
struct ioam6_namespace *ns = (struct ioam6_namespace *)ptr;
if (ns)
ioam6_ns_release(ns);
}
static void ioam6_free_sc(void *ptr, void *arg)
{
struct ioam6_schema *sc = (struct ioam6_schema *)ptr;
if (sc)
ioam6_sc_release(sc);
}
static int ioam6_ns_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
{
const struct ioam6_namespace *ns = obj;
return (ns->id != *(__be16 *)arg->key);
}
static int ioam6_sc_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
{
const struct ioam6_schema *sc = obj;
return (sc->id != *(u32 *)arg->key);
}
static const struct rhashtable_params rht_ns_params = {
.key_len = sizeof(__be16),
.key_offset = offsetof(struct ioam6_namespace, id),
.head_offset = offsetof(struct ioam6_namespace, head),
.automatic_shrinking = true,
.obj_cmpfn = ioam6_ns_cmpfn,
};
static const struct rhashtable_params rht_sc_params = {
.key_len = sizeof(u32),
.key_offset = offsetof(struct ioam6_schema, id),
.head_offset = offsetof(struct ioam6_schema, head),
.automatic_shrinking = true,
.obj_cmpfn = ioam6_sc_cmpfn,
};
static struct genl_family ioam6_genl_family;
static const struct nla_policy ioam6_genl_policy_addns[] = {
[IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
[IOAM6_ATTR_NS_DATA] = { .type = NLA_U32 },
[IOAM6_ATTR_NS_DATA_WIDE] = { .type = NLA_U64 },
};
static const struct nla_policy ioam6_genl_policy_delns[] = {
[IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
};
static const struct nla_policy ioam6_genl_policy_addsc[] = {
[IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
[IOAM6_ATTR_SC_DATA] = { .type = NLA_BINARY,
.len = IOAM6_MAX_SCHEMA_DATA_LEN },
};
static const struct nla_policy ioam6_genl_policy_delsc[] = {
[IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
};
static const struct nla_policy ioam6_genl_policy_ns_sc[] = {
[IOAM6_ATTR_NS_ID] = { .type = NLA_U16 },
[IOAM6_ATTR_SC_ID] = { .type = NLA_U32 },
[IOAM6_ATTR_SC_NONE] = { .type = NLA_FLAG },
};
static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
{
struct ioam6_pernet_data *nsdata;
struct ioam6_namespace *ns;
u64 data64;
u32 data32;
__be16 id;
int err;
if (!info->attrs[IOAM6_ATTR_NS_ID])
return -EINVAL;
id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
nsdata = ioam6_pernet(genl_info_net(info));
mutex_lock(&nsdata->lock);
ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
if (ns) {
err = -EEXIST;
goto out_unlock;
}
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (!ns) {
err = -ENOMEM;
goto out_unlock;
}
ns->id = id;
data32 = nla_get_u32_default(info->attrs[IOAM6_ATTR_NS_DATA],
IOAM6_U32_UNAVAILABLE);
data64 = nla_get_u64_default(info->attrs[IOAM6_ATTR_NS_DATA_WIDE],
IOAM6_U64_UNAVAILABLE);
ns->data = cpu_to_be32(data32);
ns->data_wide = cpu_to_be64(data64);
err = rhashtable_lookup_insert_fast(&nsdata->namespaces, &ns->head,
rht_ns_params);
if (err)
kfree(ns);
out_unlock:
mutex_unlock(&nsdata->lock);
return err;
}
static int ioam6_genl_delns(struct sk_buff *skb, struct genl_info *info)
{
struct ioam6_pernet_data *nsdata;
struct ioam6_namespace *ns;
struct ioam6_schema *sc;
__be16 id;
int err;
if (!info->attrs[IOAM6_ATTR_NS_ID])
return -EINVAL;
id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
nsdata = ioam6_pernet(genl_info_net(info));
mutex_lock(&nsdata->lock);
ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
if (!ns) {
err = -ENOENT;
goto out_unlock;
}
sc = rcu_dereference_protected(ns->schema,
lockdep_is_held(&nsdata->lock));
err = rhashtable_remove_fast(&nsdata->namespaces, &ns->head,
rht_ns_params);
if (err)
goto out_unlock;
if (sc)
rcu_assign_pointer(sc->ns, NULL);
ioam6_ns_release(ns);
out_unlock:
mutex_unlock(&nsdata->lock);
return err;
}
static int __ioam6_genl_dumpns_element(struct ioam6_namespace *ns,
u32 portid,
u32 seq,
u32 flags,
struct sk_buff *skb,
u8 cmd)
{
struct ioam6_schema *sc;
u64 data64;
u32 data32;
void *hdr;
hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
if (!hdr)
return -ENOMEM;
data32 = be32_to_cpu(ns->data);
data64 = be64_to_cpu(ns->data_wide);
if (nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id)) ||
(data32 != IOAM6_U32_UNAVAILABLE &&
nla_put_u32(skb, IOAM6_ATTR_NS_DATA, data32)) ||
(data64 != IOAM6_U64_UNAVAILABLE &&
nla_put_u64_64bit(skb, IOAM6_ATTR_NS_DATA_WIDE,
data64, IOAM6_ATTR_PAD)))
goto nla_put_failure;
rcu_read_lock();
sc = rcu_dereference(ns->schema);
if (sc && nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id)) {
rcu_read_unlock();
goto nla_put_failure;
}
rcu_read_unlock();
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
{
struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
if (!iter) {
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[0] = (long)iter;
}
rhashtable_walk_enter(&nsdata->namespaces, iter);
return 0;
}
static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
rhashtable_walk_exit(iter);
kfree(iter);
return 0;
}
static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
{
struct rhashtable_iter *iter;
struct ioam6_namespace *ns;
int err;
iter = (struct rhashtable_iter *)cb->args[0];
rhashtable_walk_start(iter);
for (;;) {
ns = rhashtable_walk_next(iter);
if (IS_ERR(ns)) {
if (PTR_ERR(ns) == -EAGAIN)
continue;
err = PTR_ERR(ns);
goto done;
} else if (!ns) {
break;
}
err = __ioam6_genl_dumpns_element(ns,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
skb,
IOAM6_CMD_DUMP_NAMESPACES);
if (err)
goto done;
}
err = skb->len;
done:
rhashtable_walk_stop(iter);
return err;
}
static int ioam6_genl_addsc(struct sk_buff *skb, struct genl_info *info)
{
struct ioam6_pernet_data *nsdata;
int len, len_aligned, err;
struct ioam6_schema *sc;
u32 id;
if (!info->attrs[IOAM6_ATTR_SC_ID] || !info->attrs[IOAM6_ATTR_SC_DATA])
return -EINVAL;
id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
nsdata = ioam6_pernet(genl_info_net(info));
mutex_lock(&nsdata->lock);
sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
if (sc) {
err = -EEXIST;
goto out_unlock;
}
len = nla_len(info->attrs[IOAM6_ATTR_SC_DATA]);
len_aligned = ALIGN(len, 4);
sc = kzalloc(sizeof(*sc) + len_aligned, GFP_KERNEL);
if (!sc) {
err = -ENOMEM;
goto out_unlock;
}
sc->id = id;
sc->len = len_aligned;
sc->hdr = cpu_to_be32(sc->id | ((u8)(sc->len / 4) << 24));
nla_memcpy(sc->data, info->attrs[IOAM6_ATTR_SC_DATA], len);
err = rhashtable_lookup_insert_fast(&nsdata->schemas, &sc->head,
rht_sc_params);
if (err)
goto free_sc;
out_unlock:
mutex_unlock(&nsdata->lock);
return err;
free_sc:
kfree(sc);
goto out_unlock;
}
static int ioam6_genl_delsc(struct sk_buff *skb, struct genl_info *info)
{
struct ioam6_pernet_data *nsdata;
struct ioam6_namespace *ns;
struct ioam6_schema *sc;
int err;
u32 id;
if (!info->attrs[IOAM6_ATTR_SC_ID])
return -EINVAL;
id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
nsdata = ioam6_pernet(genl_info_net(info));
mutex_lock(&nsdata->lock);
sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
if (!sc) {
err = -ENOENT;
goto out_unlock;
}
ns = rcu_dereference_protected(sc->ns, lockdep_is_held(&nsdata->lock));
err = rhashtable_remove_fast(&nsdata->schemas, &sc->head,
rht_sc_params);
if (err)
goto out_unlock;
if (ns)
rcu_assign_pointer(ns->schema, NULL);
ioam6_sc_release(sc);
out_unlock:
mutex_unlock(&nsdata->lock);
return err;
}
static int __ioam6_genl_dumpsc_element(struct ioam6_schema *sc,
u32 portid, u32 seq, u32 flags,
struct sk_buff *skb, u8 cmd)
{
struct ioam6_namespace *ns;
void *hdr;
hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
if (!hdr)
return -ENOMEM;
if (nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id) ||
nla_put(skb, IOAM6_ATTR_SC_DATA, sc->len, sc->data))
goto nla_put_failure;
rcu_read_lock();
ns = rcu_dereference(sc->ns);
if (ns && nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id))) {
rcu_read_unlock();
goto nla_put_failure;
}
rcu_read_unlock();
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
{
struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
if (!iter) {
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[0] = (long)iter;
}
rhashtable_walk_enter(&nsdata->schemas, iter);
return 0;
}
static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
rhashtable_walk_exit(iter);
kfree(iter);
return 0;
}
static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
{
struct rhashtable_iter *iter;
struct ioam6_schema *sc;
int err;
iter = (struct rhashtable_iter *)cb->args[0];
rhashtable_walk_start(iter);
for (;;) {
sc = rhashtable_walk_next(iter);
if (IS_ERR(sc)) {
if (PTR_ERR(sc) == -EAGAIN)
continue;
err = PTR_ERR(sc);
goto done;
} else if (!sc) {
break;
}
err = __ioam6_genl_dumpsc_element(sc,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
skb,
IOAM6_CMD_DUMP_SCHEMAS);
if (err)
goto done;
}
err = skb->len;
done:
rhashtable_walk_stop(iter);
return err;
}
static int ioam6_genl_ns_set_schema(struct sk_buff *skb, struct genl_info *info)
{
struct ioam6_namespace *ns, *ns_ref;
struct ioam6_schema *sc, *sc_ref;
struct ioam6_pernet_data *nsdata;
__be16 ns_id;
u32 sc_id;
int err;
if (!info->attrs[IOAM6_ATTR_NS_ID] ||
(!info->attrs[IOAM6_ATTR_SC_ID] &&
!info->attrs[IOAM6_ATTR_SC_NONE]))
return -EINVAL;
ns_id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
nsdata = ioam6_pernet(genl_info_net(info));
mutex_lock(&nsdata->lock);
ns = rhashtable_lookup_fast(&nsdata->namespaces, &ns_id, rht_ns_params);
if (!ns) {
err = -ENOENT;
goto out_unlock;
}
if (info->attrs[IOAM6_ATTR_SC_NONE]) {
sc = NULL;
} else {
sc_id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
sc = rhashtable_lookup_fast(&nsdata->schemas, &sc_id,
rht_sc_params);
if (!sc) {
err = -ENOENT;
goto out_unlock;
}
}
sc_ref = rcu_dereference_protected(ns->schema,
lockdep_is_held(&nsdata->lock));
if (sc_ref)
rcu_assign_pointer(sc_ref->ns, NULL);
rcu_assign_pointer(ns->schema, sc);
if (sc) {
ns_ref = rcu_dereference_protected(sc->ns,
lockdep_is_held(&nsdata->lock));
if (ns_ref)
rcu_assign_pointer(ns_ref->schema, NULL);
rcu_assign_pointer(sc->ns, ns);
}
err = 0;
out_unlock:
mutex_unlock(&nsdata->lock);
return err;
}
static const struct genl_ops ioam6_genl_ops[] = {
{
.cmd = IOAM6_CMD_ADD_NAMESPACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ioam6_genl_addns,
.flags = GENL_ADMIN_PERM,
.policy = ioam6_genl_policy_addns,
.maxattr = ARRAY_SIZE(ioam6_genl_policy_addns) - 1,
},
{
.cmd = IOAM6_CMD_DEL_NAMESPACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ioam6_genl_delns,
.flags = GENL_ADMIN_PERM,
.policy = ioam6_genl_policy_delns,
.maxattr = ARRAY_SIZE(ioam6_genl_policy_delns) - 1,
},
{
.cmd = IOAM6_CMD_DUMP_NAMESPACES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.start = ioam6_genl_dumpns_start,
.dumpit = ioam6_genl_dumpns,
.done = ioam6_genl_dumpns_done,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = IOAM6_CMD_ADD_SCHEMA,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ioam6_genl_addsc,
.flags = GENL_ADMIN_PERM,
.policy = ioam6_genl_policy_addsc,
.maxattr = ARRAY_SIZE(ioam6_genl_policy_addsc) - 1,
},
{
.cmd = IOAM6_CMD_DEL_SCHEMA,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ioam6_genl_delsc,
.flags = GENL_ADMIN_PERM,
.policy = ioam6_genl_policy_delsc,
.maxattr = ARRAY_SIZE(ioam6_genl_policy_delsc) - 1,
},
{
.cmd = IOAM6_CMD_DUMP_SCHEMAS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.start = ioam6_genl_dumpsc_start,
.dumpit = ioam6_genl_dumpsc,
.done = ioam6_genl_dumpsc_done,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = IOAM6_CMD_NS_SET_SCHEMA,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = ioam6_genl_ns_set_schema,
.flags = GENL_ADMIN_PERM,
.policy = ioam6_genl_policy_ns_sc,
.maxattr = ARRAY_SIZE(ioam6_genl_policy_ns_sc) - 1,
},
};
#define IOAM6_GENL_EV_GRP_OFFSET 0
static const struct genl_multicast_group ioam6_mcgrps[] = {
[IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
.flags = GENL_MCAST_CAP_NET_ADMIN },
};
static int ioam6_event_put_trace(struct sk_buff *skb,
struct ioam6_trace_hdr *trace,
unsigned int len)
{
if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
be16_to_cpu(trace->namespace_id)) ||
nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
be32_to_cpu(trace->type_be32)) ||
nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
trace->data + trace->remlen * 4))
return 1;
return 0;
}
void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
void *opt, unsigned int opt_len)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
if (!genl_has_listeners(&ioam6_genl_family, net,
IOAM6_GENL_EV_GRP_OFFSET))
return;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!skb)
return;
nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
if (!nlh)
goto nla_put_failure;
switch (type) {
case IOAM6_EVENT_UNSPEC:
WARN_ON_ONCE(1);
break;
case IOAM6_EVENT_TRACE:
if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
opt_len))
goto nla_put_failure;
break;
}
genlmsg_end(skb, nlh);
genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
IOAM6_GENL_EV_GRP_OFFSET, gfp);
return;
nla_put_failure:
nlmsg_free(skb);
}
static struct genl_family ioam6_genl_family __ro_after_init = {
.name = IOAM6_GENL_NAME,
.version = IOAM6_GENL_VERSION,
.netnsok = true,
.parallel_ops = true,
.ops = ioam6_genl_ops,
.n_ops = ARRAY_SIZE(ioam6_genl_ops),
.resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
.mcgrps = ioam6_mcgrps,
.n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
.module = THIS_MODULE,
};
struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id)
{
struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
return rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
}
static void __ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
struct ioam6_trace_hdr *trace,
struct ioam6_schema *sc,
u8 sclen, bool is_input)
{
struct timespec64 ts;
ktime_t tstamp;
u64 raw64;
u32 raw32;
u16 raw16;
u8 *data;
u8 byte;
data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4;
/* hop_lim and node_id */
if (trace->type.bit0) {
byte = ipv6_hdr(skb)->hop_limit;
if (is_input)
byte--;
raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
*(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
data += sizeof(__be32);
}
/* ingress_if_id and egress_if_id */
if (trace->type.bit1) {
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
}
/* timestamp seconds */
if (trace->type.bit2) {
if (!skb->dev) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
} else {
tstamp = skb_tstamp_cond(skb, true);
ts = ktime_to_timespec64(tstamp);
*(__be32 *)data = cpu_to_be32((u32)ts.tv_sec);
}
data += sizeof(__be32);
}
/* timestamp subseconds */
if (trace->type.bit3) {
if (!skb->dev) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
} else {
if (!trace->type.bit2) {
tstamp = skb_tstamp_cond(skb, true);
ts = ktime_to_timespec64(tstamp);
}
*(__be32 *)data = cpu_to_be32((u32)(ts.tv_nsec / NSEC_PER_USEC));
}
data += sizeof(__be32);
}
/* transit delay */
if (trace->type.bit4) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* namespace data */
if (trace->type.bit5) {
*(__be32 *)data = ns->data;
data += sizeof(__be32);
}
/* queue depth */
if (trace->type.bit6) {
struct netdev_queue *queue;
struct Qdisc *qdisc;
__u32 qlen, backlog;
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
} else {
queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
qdisc = rcu_dereference(queue->qdisc);
qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
*(__be32 *)data = cpu_to_be32(backlog);
}
data += sizeof(__be32);
}
/* checksum complement */
if (trace->type.bit7) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* hop_lim and node_id (wide) */
if (trace->type.bit8) {
byte = ipv6_hdr(skb)->hop_limit;
if (is_input)
byte--;
raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
*(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
data += sizeof(__be64);
}
/* ingress_if_id and egress_if_id (wide) */
if (trace->type.bit9) {
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
}
/* namespace data (wide) */
if (trace->type.bit10) {
*(__be64 *)data = ns->data_wide;
data += sizeof(__be64);
}
/* buffer occupancy */
if (trace->type.bit11) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit12 undefined: filled with empty value */
if (trace->type.bit12) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit13 undefined: filled with empty value */
if (trace->type.bit13) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit14 undefined: filled with empty value */
if (trace->type.bit14) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit15 undefined: filled with empty value */
if (trace->type.bit15) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit16 undefined: filled with empty value */
if (trace->type.bit16) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit17 undefined: filled with empty value */
if (trace->type.bit17) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit18 undefined: filled with empty value */
if (trace->type.bit18) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit19 undefined: filled with empty value */
if (trace->type.bit19) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit20 undefined: filled with empty value */
if (trace->type.bit20) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* bit21 undefined: filled with empty value */
if (trace->type.bit21) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
data += sizeof(__be32);
}
/* opaque state snapshot */
if (trace->type.bit22) {
if (!sc) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE >> 8);
} else {
*(__be32 *)data = sc->hdr;
data += sizeof(__be32);
memcpy(data, sc->data, sc->len);
}
}
}
/* called with rcu_read_lock() */
void ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_namespace *ns,
struct ioam6_trace_hdr *trace,
bool is_input)
{
struct ioam6_schema *sc;
u8 sclen = 0;
/* Skip if Overflow flag is set
*/
if (trace->overflow)
return;
/* NodeLen does not include Opaque State Snapshot length. We need to
* take it into account if the corresponding bit is set (bit 22) and
* if the current IOAM namespace has an active schema attached to it
*/
sc = rcu_dereference(ns->schema);
if (trace->type.bit22) {
sclen = sizeof_field(struct ioam6_schema, hdr) / 4;
if (sc)
sclen += sc->len / 4;
}
/* If there is no space remaining, we set the Overflow flag and we
* skip without filling the trace
*/
if (!trace->remlen || trace->remlen < trace->nodelen + sclen) {
trace->overflow = 1;
return;
}
__ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input);
trace->remlen -= trace->nodelen + sclen;
}
static int __net_init ioam6_net_init(struct net *net)
{
struct ioam6_pernet_data *nsdata;
int err = -ENOMEM;
nsdata = kzalloc(sizeof(*nsdata), GFP_KERNEL);
if (!nsdata)
goto out;
mutex_init(&nsdata->lock);
net->ipv6.ioam6_data = nsdata;
err = rhashtable_init(&nsdata->namespaces, &rht_ns_params);
if (err)
goto free_nsdata;
err = rhashtable_init(&nsdata->schemas, &rht_sc_params);
if (err)
goto free_rht_ns;
out:
return err;
free_rht_ns:
rhashtable_destroy(&nsdata->namespaces);
free_nsdata:
kfree(nsdata);
net->ipv6.ioam6_data = NULL;
goto out;
}
static void __net_exit ioam6_net_exit(struct net *net)
{
struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
rhashtable_free_and_destroy(&nsdata->namespaces, ioam6_free_ns, NULL);
rhashtable_free_and_destroy(&nsdata->schemas, ioam6_free_sc, NULL);
kfree(nsdata);
}
static struct pernet_operations ioam6_net_ops = {
.init = ioam6_net_init,
.exit = ioam6_net_exit,
};
int __init ioam6_init(void)
{
int err = register_pernet_subsys(&ioam6_net_ops);
if (err)
goto out;
err = genl_register_family(&ioam6_genl_family);
if (err)
goto out_unregister_pernet_subsys;
#ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
err = ioam6_iptunnel_init();
if (err)
goto out_unregister_genl;
#endif
pr_info("In-situ OAM (IOAM) with IPv6\n");
out:
return err;
#ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
out_unregister_genl:
genl_unregister_family(&ioam6_genl_family);
#endif
out_unregister_pernet_subsys:
unregister_pernet_subsys(&ioam6_net_ops);
goto out;
}
void ioam6_exit(void)
{
#ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
ioam6_iptunnel_exit();
#endif
genl_unregister_family(&ioam6_genl_family);
unregister_pernet_subsys(&ioam6_net_ops);
}
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __I915_DRM_CLIENT_H__
#define __I915_DRM_CLIENT_H__
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <uapi/drm/i915_drm.h>
#include "i915_file_private.h"
#include "gem/i915_gem_object_types.h"
#include "gt/intel_context_types.h"
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
struct drm_printer;
struct i915_drm_client {
struct kref kref;
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
#ifdef CONFIG_PROC_FS
/**
* @objects_lock: lock protecting @objects_list
*/
spinlock_t objects_lock;
/**
* @objects_list: list of objects created by this client
*
* Protected by @objects_lock.
*/
struct list_head objects_list;
#endif
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1];
};
static inline struct i915_drm_client *
i915_drm_client_get(struct i915_drm_client *client)
{
kref_get(&client->kref);
return client;
}
void __i915_drm_client_free(struct kref *kref);
static inline void i915_drm_client_put(struct i915_drm_client *client)
{
kref_put(&client->kref, __i915_drm_client_free);
}
struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
#ifdef CONFIG_PROC_FS
void i915_drm_client_add_object(struct i915_drm_client *client,
struct drm_i915_gem_object *obj);
void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
void i915_drm_client_add_context_objects(struct i915_drm_client *client,
struct intel_context *ce);
#else
static inline void i915_drm_client_add_object(struct i915_drm_client *client,
struct drm_i915_gem_object *obj)
{
}
static inline void
i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
{
}
static inline void
i915_drm_client_add_context_objects(struct i915_drm_client *client,
struct intel_context *ce)
{
}
#endif
#endif /* !__I915_DRM_CLIENT_H__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* StarFive JH71XX PMU (Power Management Unit) Controller Driver
*
* Copyright (C) 2022-2023 StarFive Technology Co., Ltd.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <dt-bindings/power/starfive,jh7110-pmu.h>
/* register offset */
#define JH71XX_PMU_SW_TURN_ON_POWER 0x0C
#define JH71XX_PMU_SW_TURN_OFF_POWER 0x10
#define JH71XX_PMU_SW_ENCOURAGE 0x44
#define JH71XX_PMU_TIMER_INT_MASK 0x48
#define JH71XX_PMU_CURR_POWER_MODE 0x80
#define JH71XX_PMU_EVENT_STATUS 0x88
#define JH71XX_PMU_INT_STATUS 0x8C
/* aon pmu register offset */
#define JH71XX_AON_PMU_SWITCH 0x00
/* sw encourage cfg */
#define JH71XX_PMU_SW_ENCOURAGE_EN_LO 0x05
#define JH71XX_PMU_SW_ENCOURAGE_EN_HI 0x50
#define JH71XX_PMU_SW_ENCOURAGE_DIS_LO 0x0A
#define JH71XX_PMU_SW_ENCOURAGE_DIS_HI 0xA0
#define JH71XX_PMU_SW_ENCOURAGE_ON 0xFF
/* pmu int status */
#define JH71XX_PMU_INT_SEQ_DONE BIT(0)
#define JH71XX_PMU_INT_HW_REQ BIT(1)
#define JH71XX_PMU_INT_SW_FAIL GENMASK(3, 2)
#define JH71XX_PMU_INT_HW_FAIL GENMASK(5, 4)
#define JH71XX_PMU_INT_PCH_FAIL GENMASK(8, 6)
#define JH71XX_PMU_INT_ALL_MASK GENMASK(8, 0)
/*
* The time required for switching power status is based on the time
* to turn on the largest domain's power, which is at microsecond level
*/
#define JH71XX_PMU_TIMEOUT_US 100
struct jh71xx_domain_info {
const char * const name;
unsigned int flags;
u8 bit;
};
struct jh71xx_pmu;
struct jh71xx_pmu_dev;
struct jh71xx_pmu_match_data {
const struct jh71xx_domain_info *domain_info;
int num_domains;
unsigned int pmu_status;
int (*pmu_parse_irq)(struct platform_device *pdev,
struct jh71xx_pmu *pmu);
int (*pmu_set_state)(struct jh71xx_pmu_dev *pmd,
u32 mask, bool on);
};
struct jh71xx_pmu {
struct device *dev;
const struct jh71xx_pmu_match_data *match_data;
void __iomem *base;
struct generic_pm_domain **genpd;
struct genpd_onecell_data genpd_data;
int irq;
spinlock_t lock; /* protects pmu reg */
};
struct jh71xx_pmu_dev {
const struct jh71xx_domain_info *domain_info;
struct jh71xx_pmu *pmu;
struct generic_pm_domain genpd;
};
static int jh71xx_pmu_get_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool *is_on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
if (!mask)
return -EINVAL;
*is_on = readl(pmu->base + pmu->match_data->pmu_status) & mask;
return 0;
}
static int jh7110_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
unsigned long flags;
u32 val;
u32 mode;
u32 encourage_lo;
u32 encourage_hi;
int ret;
spin_lock_irqsave(&pmu->lock, flags);
/*
* The PMU accepts software encourage to switch power mode in the following 2 steps:
*
* 1.Configure the register SW_TURN_ON_POWER (offset 0x0c) by writing 1 to
* the bit corresponding to the power domain that will be turned on
* and writing 0 to the others.
* Likewise, configure the register SW_TURN_OFF_POWER (offset 0x10) by
* writing 1 to the bit corresponding to the power domain that will be
* turned off and writing 0 to the others.
*/
if (on) {
mode = JH71XX_PMU_SW_TURN_ON_POWER;
encourage_lo = JH71XX_PMU_SW_ENCOURAGE_EN_LO;
encourage_hi = JH71XX_PMU_SW_ENCOURAGE_EN_HI;
} else {
mode = JH71XX_PMU_SW_TURN_OFF_POWER;
encourage_lo = JH71XX_PMU_SW_ENCOURAGE_DIS_LO;
encourage_hi = JH71XX_PMU_SW_ENCOURAGE_DIS_HI;
}
writel(mask, pmu->base + mode);
/*
* 2.Write SW encourage command sequence to the Software Encourage Reg (offset 0x44)
* First write SW_MODE_ENCOURAGE_ON to JH71XX_PMU_SW_ENCOURAGE. This will reset
* the state machine which parses the command sequence. This register must be
* written every time software wants to power on/off a domain.
* Then write the lower bits of the command sequence, followed by the upper
* bits. The sequence differs between powering on & off a domain.
*/
writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
writel(encourage_hi, pmu->base + JH71XX_PMU_SW_ENCOURAGE);
spin_unlock_irqrestore(&pmu->lock, flags);
/* Wait for the power domain bit to be enabled / disabled */
if (on) {
ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
val, val & mask,
1, JH71XX_PMU_TIMEOUT_US);
} else {
ret = readl_poll_timeout_atomic(pmu->base + JH71XX_PMU_CURR_POWER_MODE,
val, !(val & mask),
1, JH71XX_PMU_TIMEOUT_US);
}
if (ret) {
dev_err(pmu->dev, "%s: failed to power %s\n",
pmd->genpd.name, on ? "on" : "off");
return -ETIMEDOUT;
}
return 0;
}
static int jh7110_aon_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
unsigned long flags;
u32 val;
spin_lock_irqsave(&pmu->lock, flags);
val = readl(pmu->base + JH71XX_AON_PMU_SWITCH);
if (on)
val |= mask;
else
val &= ~mask;
writel(val, pmu->base + JH71XX_AON_PMU_SWITCH);
spin_unlock_irqrestore(&pmu->lock, flags);
return 0;
}
static int jh71xx_pmu_set_state(struct jh71xx_pmu_dev *pmd, u32 mask, bool on)
{
struct jh71xx_pmu *pmu = pmd->pmu;
const struct jh71xx_pmu_match_data *match_data = pmu->match_data;
bool is_on;
int ret;
ret = jh71xx_pmu_get_state(pmd, mask, &is_on);
if (ret) {
dev_dbg(pmu->dev, "unable to get current state for %s\n",
pmd->genpd.name);
return ret;
}
if (is_on == on) {
dev_dbg(pmu->dev, "pm domain [%s] is already %sable status.\n",
pmd->genpd.name, on ? "en" : "dis");
return 0;
}
return match_data->pmu_set_state(pmd, mask, on);
}
static int jh71xx_pmu_on(struct generic_pm_domain *genpd)
{
struct jh71xx_pmu_dev *pmd = container_of(genpd,
struct jh71xx_pmu_dev, genpd);
u32 pwr_mask = BIT(pmd->domain_info->bit);
return jh71xx_pmu_set_state(pmd, pwr_mask, true);
}
static int jh71xx_pmu_off(struct generic_pm_domain *genpd)
{
struct jh71xx_pmu_dev *pmd = container_of(genpd,
struct jh71xx_pmu_dev, genpd);
u32 pwr_mask = BIT(pmd->domain_info->bit);
return jh71xx_pmu_set_state(pmd, pwr_mask, false);
}
static void jh71xx_pmu_int_enable(struct jh71xx_pmu *pmu, u32 mask, bool enable)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags);
val = readl(pmu->base + JH71XX_PMU_TIMER_INT_MASK);
if (enable)
val &= ~mask;
else
val |= mask;
writel(val, pmu->base + JH71XX_PMU_TIMER_INT_MASK);
spin_unlock_irqrestore(&pmu->lock, flags);
}
static irqreturn_t jh71xx_pmu_interrupt(int irq, void *data)
{
struct jh71xx_pmu *pmu = data;
u32 val;
val = readl(pmu->base + JH71XX_PMU_INT_STATUS);
if (val & JH71XX_PMU_INT_SEQ_DONE)
dev_dbg(pmu->dev, "sequence done.\n");
if (val & JH71XX_PMU_INT_HW_REQ)
dev_dbg(pmu->dev, "hardware encourage requestion.\n");
if (val & JH71XX_PMU_INT_SW_FAIL)
dev_err(pmu->dev, "software encourage fail.\n");
if (val & JH71XX_PMU_INT_HW_FAIL)
dev_err(pmu->dev, "hardware encourage fail.\n");
if (val & JH71XX_PMU_INT_PCH_FAIL)
dev_err(pmu->dev, "p-channel fail event.\n");
/* clear interrupts */
writel(val, pmu->base + JH71XX_PMU_INT_STATUS);
writel(val, pmu->base + JH71XX_PMU_EVENT_STATUS);
return IRQ_HANDLED;
}
static int jh7110_pmu_parse_irq(struct platform_device *pdev, struct jh71xx_pmu *pmu)
{
struct device *dev = &pdev->dev;
int ret;
pmu->irq = platform_get_irq(pdev, 0);
if (pmu->irq < 0)
return pmu->irq;
ret = devm_request_irq(dev, pmu->irq, jh71xx_pmu_interrupt,
0, pdev->name, pmu);
if (ret)
dev_err(dev, "failed to request irq\n");
jh71xx_pmu_int_enable(pmu, JH71XX_PMU_INT_ALL_MASK & ~JH71XX_PMU_INT_PCH_FAIL, true);
return 0;
}
static int jh71xx_pmu_init_domain(struct jh71xx_pmu *pmu, int index)
{
struct jh71xx_pmu_dev *pmd;
u32 pwr_mask;
int ret;
bool is_on = false;
pmd = devm_kzalloc(pmu->dev, sizeof(*pmd), GFP_KERNEL);
if (!pmd)
return -ENOMEM;
pmd->domain_info = &pmu->match_data->domain_info[index];
pmd->pmu = pmu;
pwr_mask = BIT(pmd->domain_info->bit);
pmd->genpd.name = pmd->domain_info->name;
pmd->genpd.flags = pmd->domain_info->flags;
ret = jh71xx_pmu_get_state(pmd, pwr_mask, &is_on);
if (ret)
dev_warn(pmu->dev, "unable to get current state for %s\n",
pmd->genpd.name);
pmd->genpd.power_on = jh71xx_pmu_on;
pmd->genpd.power_off = jh71xx_pmu_off;
pm_genpd_init(&pmd->genpd, NULL, !is_on);
pmu->genpd_data.domains[index] = &pmd->genpd;
return 0;
}
static int jh71xx_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct jh71xx_pmu_match_data *match_data;
struct jh71xx_pmu *pmu;
unsigned int i;
int ret;
pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu->base))
return PTR_ERR(pmu->base);
spin_lock_init(&pmu->lock);
match_data = of_device_get_match_data(dev);
if (!match_data)
return -EINVAL;
if (match_data->pmu_parse_irq) {
ret = match_data->pmu_parse_irq(pdev, pmu);
if (ret) {
dev_err(dev, "failed to parse irq\n");
return ret;
}
}
pmu->genpd = devm_kcalloc(dev, match_data->num_domains,
sizeof(struct generic_pm_domain *),
GFP_KERNEL);
if (!pmu->genpd)
return -ENOMEM;
pmu->dev = dev;
pmu->match_data = match_data;
pmu->genpd_data.domains = pmu->genpd;
pmu->genpd_data.num_domains = match_data->num_domains;
for (i = 0; i < match_data->num_domains; i++) {
ret = jh71xx_pmu_init_domain(pmu, i);
if (ret) {
dev_err(dev, "failed to initialize power domain\n");
return ret;
}
}
ret = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
if (ret) {
dev_err(dev, "failed to register genpd driver: %d\n", ret);
return ret;
}
dev_dbg(dev, "registered %u power domains\n", i);
return 0;
}
static const struct jh71xx_domain_info jh7110_power_domains[] = {
[JH7110_PD_SYSTOP] = {
.name = "SYSTOP",
.bit = 0,
.flags = GENPD_FLAG_ALWAYS_ON,
},
[JH7110_PD_CPU] = {
.name = "CPU",
.bit = 1,
.flags = GENPD_FLAG_ALWAYS_ON,
},
[JH7110_PD_GPUA] = {
.name = "GPUA",
.bit = 2,
},
[JH7110_PD_VDEC] = {
.name = "VDEC",
.bit = 3,
},
[JH7110_PD_VOUT] = {
.name = "VOUT",
.bit = 4,
},
[JH7110_PD_ISP] = {
.name = "ISP",
.bit = 5,
},
[JH7110_PD_VENC] = {
.name = "VENC",
.bit = 6,
},
};
static const struct jh71xx_pmu_match_data jh7110_pmu = {
.num_domains = ARRAY_SIZE(jh7110_power_domains),
.domain_info = jh7110_power_domains,
.pmu_status = JH71XX_PMU_CURR_POWER_MODE,
.pmu_parse_irq = jh7110_pmu_parse_irq,
.pmu_set_state = jh7110_pmu_set_state,
};
static const struct jh71xx_domain_info jh7110_aon_power_domains[] = {
[JH7110_AON_PD_DPHY_TX] = {
.name = "DPHY-TX",
.bit = 30,
},
[JH7110_AON_PD_DPHY_RX] = {
.name = "DPHY-RX",
.bit = 31,
},
};
static const struct jh71xx_pmu_match_data jh7110_aon_pmu = {
.num_domains = ARRAY_SIZE(jh7110_aon_power_domains),
.domain_info = jh7110_aon_power_domains,
.pmu_status = JH71XX_AON_PMU_SWITCH,
.pmu_set_state = jh7110_aon_pmu_set_state,
};
static const struct of_device_id jh71xx_pmu_of_match[] = {
{
.compatible = "starfive,jh7110-pmu",
.data = (void *)&jh7110_pmu,
}, {
.compatible = "starfive,jh7110-aon-syscon",
.data = (void *)&jh7110_aon_pmu,
}, {
/* sentinel */
}
};
static struct platform_driver jh71xx_pmu_driver = {
.probe = jh71xx_pmu_probe,
.driver = {
.name = "jh71xx-pmu",
.of_match_table = jh71xx_pmu_of_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(jh71xx_pmu_driver);
MODULE_AUTHOR("Walker Chen <[email protected]>");
MODULE_AUTHOR("Changhuang Liang <[email protected]>");
MODULE_DESCRIPTION("StarFive JH71XX PMU Driver");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
#define ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
/*
*****************************************
* ARC_FARM_ARC0_DUP_ENG_AXUSER
* (Prototype: AXUSER)
*****************************************
*/
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID 0x4E89900
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP 0x4E89904
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_STRONG_ORDER 0x4E89908
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_NO_SNOOP 0x4E8990C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_REDUCTION 0x4E89910
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_ATOMIC 0x4E89914
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_QOS 0x4E89918
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RSVD 0x4E8991C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_EMEM_CPAGE 0x4E89920
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_CORE 0x4E89924
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_E2E_COORD 0x4E89928
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_LO 0x4E89930
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_HI 0x4E89934
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_LO 0x4E89938
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_HI 0x4E8993C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_COORD 0x4E89940
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_LOCK 0x4E89944
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_RSVD 0x4E89948
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_OVRD 0x4E8994C
#endif /* ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_ */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2016 Andreas Färber
*/
/dts-v1/;
#include "meson-gxbb-vega-s95.dtsi"
/ {
compatible = "tronsmart,vega-s95-meta", "tronsmart,vega-s95", "amlogic,meson-gxbb";
model = "Tronsmart Vega S95 Meta";
memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MixCom Watchdog: A Simple Hardware Watchdog Device
* Based on Softdog driver by Alan Cox and PC Watchdog driver by Ken Hollis
*
* Author: Gergely Madarasz <[email protected]>
*
* Copyright (c) 1999 ITConsult-Pro Co. <[email protected]>
*
* Version 0.1 (99/04/15):
* - first version
*
* Version 0.2 (99/06/16):
* - added kernel timer watchdog ping after close
* since the hardware does not support watchdog shutdown
*
* Version 0.3 (99/06/21):
* - added WDIOC_GETSTATUS and WDIOC_GETSUPPORT ioctl calls
*
* Version 0.3.1 (99/06/22):
* - allow module removal while internal timer is active,
* print warning about probable reset
*
* Version 0.4 (99/11/15):
* - support for one more type board
*
* Version 0.5 (2001/12/14) Matt Domsch <[email protected]>
* - added nowayout module option to override
* CONFIG_WATCHDOG_NOWAYOUT
*
* Version 0.6 (2002/04/12): Rob Radez <[email protected]>
* - make mixcomwd_opened unsigned,
* removed lock_kernel/unlock_kernel from mixcomwd_release,
* modified ioctl a bit to conform to API
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define VERSION "0.6"
#define WATCHDOG_NAME "mixcomwd"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/*
* We have two types of cards that can be probed:
* 1) The Mixcom cards: these cards can be found at addresses
* 0x180, 0x280, 0x380 with an additional offset of 0xc10.
* (Or 0xd90, 0xe90, 0xf90).
* 2) The FlashCOM cards: these cards can be set up at
* 0x300 -> 0x378, in 0x8 jumps with an offset of 0x04.
* (Or 0x304 -> 0x37c in 0x8 jumps).
* Each card has it's own ID.
*/
#define MIXCOM_ID 0x11
#define FLASHCOM_ID 0x18
static struct {
int ioport;
int id;
} mixcomwd_io_info[] = {
/* The Mixcom cards */
{0x0d90, MIXCOM_ID},
{0x0e90, MIXCOM_ID},
{0x0f90, MIXCOM_ID},
/* The FlashCOM cards */
{0x0304, FLASHCOM_ID},
{0x030c, FLASHCOM_ID},
{0x0314, FLASHCOM_ID},
{0x031c, FLASHCOM_ID},
{0x0324, FLASHCOM_ID},
{0x032c, FLASHCOM_ID},
{0x0334, FLASHCOM_ID},
{0x033c, FLASHCOM_ID},
{0x0344, FLASHCOM_ID},
{0x034c, FLASHCOM_ID},
{0x0354, FLASHCOM_ID},
{0x035c, FLASHCOM_ID},
{0x0364, FLASHCOM_ID},
{0x036c, FLASHCOM_ID},
{0x0374, FLASHCOM_ID},
{0x037c, FLASHCOM_ID},
/* The end of the list */
{0x0000, 0},
};
static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
static int watchdog_port;
static int mixcomwd_timer_alive;
static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun);
static char expect_close;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static void mixcomwd_ping(void)
{
outb_p(55, watchdog_port);
return;
}
static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
}
/*
* Allow only one person to hold it open
*/
static int mixcomwd_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &mixcomwd_opened))
return -EBUSY;
mixcomwd_ping();
if (nowayout)
/*
* fops_get() code via open() has already done
* a try_module_get() so it is safe to do the
* __module_get().
*/
__module_get(THIS_MODULE);
else {
if (mixcomwd_timer_alive) {
del_timer(&mixcomwd_timer);
mixcomwd_timer_alive = 0;
}
}
return stream_open(inode, file);
}
static int mixcomwd_release(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
if (mixcomwd_timer_alive) {
pr_err("release called while internal timer alive\n");
return -EBUSY;
}
mixcomwd_timer_alive = 1;
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
} else
pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
clear_bit(0, &mixcomwd_opened);
expect_close = 0;
return 0;
}
static ssize_t mixcomwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
/* In case it was set long ago */
expect_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
mixcomwd_ping();
}
return len;
}
static long mixcomwd_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int status;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "MixCOM watchdog",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
status = mixcomwd_opened;
if (!nowayout)
status |= mixcomwd_timer_alive;
return put_user(status, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
mixcomwd_ping();
break;
default:
return -ENOTTY;
}
return 0;
}
static const struct file_operations mixcomwd_fops = {
.owner = THIS_MODULE,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mixcomwd_open,
.release = mixcomwd_release,
};
static struct miscdevice mixcomwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mixcomwd_fops,
};
static int __init checkcard(int port, int card_id)
{
int id;
if (!request_region(port, 1, "MixCOM watchdog"))
return 0;
id = inb_p(port);
if (card_id == MIXCOM_ID)
id &= 0x3f;
if (id != card_id) {
release_region(port, 1);
return 0;
}
return 1;
}
static int __init mixcomwd_init(void)
{
int i, ret, found = 0;
for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
if (checkcard(mixcomwd_io_info[i].ioport,
mixcomwd_io_info[i].id)) {
found = 1;
watchdog_port = mixcomwd_io_info[i].ioport;
}
}
if (!found) {
pr_err("No card detected, or port not available\n");
return -ENODEV;
}
ret = misc_register(&mixcomwd_miscdev);
if (ret) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto error_misc_register_watchdog;
}
pr_info("MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
VERSION, watchdog_port);
return 0;
error_misc_register_watchdog:
release_region(watchdog_port, 1);
watchdog_port = 0x0000;
return ret;
}
static void __exit mixcomwd_exit(void)
{
if (!nowayout) {
if (mixcomwd_timer_alive) {
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer_sync(&mixcomwd_timer);
mixcomwd_timer_alive = 0;
}
}
misc_deregister(&mixcomwd_miscdev);
release_region(watchdog_port, 1);
}
module_init(mixcomwd_init);
module_exit(mixcomwd_exit);
MODULE_AUTHOR("Gergely Madarasz <[email protected]>");
MODULE_DESCRIPTION("MixCom Watchdog driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2002 LSI Logic Corporation.
*
* Copyright (c) 2002 Red Hat, Inc. All rights reserved.
* - fixes
* - speed-ups (list handling fixes, issued_list, optimizations.)
* - lots of cleanups.
*
* Copyright (c) 2003 Christoph Hellwig <[email protected]>
* - new-style, hotplug-aware pci probing and scsi registration
*
* Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
* <[email protected]>
*
* Description: Linux device driver for LSI Logic MegaRAID controller
*
* Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
* 518, 520, 531, 532
*
* This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
* and others. Please send updates to the mailing list
* [email protected] .
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include "megaraid.h"
#define MEGARAID_MODULE_VERSION "2.00.4"
MODULE_AUTHOR ("[email protected]");
MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
MODULE_LICENSE ("GPL");
MODULE_VERSION(MEGARAID_MODULE_VERSION);
static DEFINE_MUTEX(megadev_mutex);
static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
module_param(max_cmd_per_lun, uint, 0);
MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
module_param(max_sectors_per_io, ushort, 0);
MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
module_param(max_mbox_busy_wait, ushort, 0);
MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
/*
* Global variables
*/
static int hba_count;
static adapter_t *hba_soft_state[MAX_CONTROLLERS];
static struct proc_dir_entry *mega_proc_dir_entry;
/* For controller re-ordering */
static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
static long
megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
/*
* The File Operations structure for the serial/ioctl interface of the driver
*/
static const struct file_operations megadev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = megadev_unlocked_ioctl,
.open = megadev_open,
.llseek = noop_llseek,
};
/*
* Array to structures for storing the information about the controllers. This
* information is sent to the user level applications, when they do an ioctl
* for this information.
*/
static struct mcontroller mcontroller[MAX_CONTROLLERS];
/* The current driver version */
static u32 driver_ver = 0x02000000;
/* major number used by the device for character interface */
static int major;
#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
/*
* Debug variable to print some diagnostic messages
*/
static int trace_level;
/**
* mega_setup_mailbox()
* @adapter: pointer to our soft state
*
* Allocates a 8 byte aligned memory for the handshake mailbox.
*/
static int
mega_setup_mailbox(adapter_t *adapter)
{
unsigned long align;
adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
sizeof(mbox64_t),
&adapter->una_mbox64_dma,
GFP_KERNEL);
if( !adapter->una_mbox64 ) return -1;
adapter->mbox = &adapter->una_mbox64->mbox;
adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
(~0UL ^ 0xFUL));
adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
/*
* Register the mailbox if the controller is an io-mapped controller
*/
if( adapter->flag & BOARD_IOMAP ) {
outb(adapter->mbox_dma & 0xFF,
adapter->host->io_port + MBOX_PORT0);
outb((adapter->mbox_dma >> 8) & 0xFF,
adapter->host->io_port + MBOX_PORT1);
outb((adapter->mbox_dma >> 16) & 0xFF,
adapter->host->io_port + MBOX_PORT2);
outb((adapter->mbox_dma >> 24) & 0xFF,
adapter->host->io_port + MBOX_PORT3);
outb(ENABLE_MBOX_BYTE,
adapter->host->io_port + ENABLE_MBOX_REGION);
irq_ack(adapter);
irq_enable(adapter);
}
return 0;
}
/*
* mega_query_adapter()
* @adapter - pointer to our soft state
*
* Issue the adapter inquiry commands to the controller and find out
* information and parameter about the devices attached
*/
static int
mega_query_adapter(adapter_t *adapter)
{
dma_addr_t prod_info_dma_handle;
mega_inquiry3 *inquiry3;
struct mbox_out mbox;
u8 *raw_mbox = (u8 *)&mbox;
int retval;
/* Initialize adapter inquiry mailbox */
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
memset(&mbox, 0, sizeof(mbox));
/*
* Try to issue Inquiry3 command
* if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
mbox.xferaddr = (u32)adapter->buf_dma_handle;
inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
/* Issue a blocking command to the card */
if (issue_scb_block(adapter, raw_mbox)) {
/* the adapter does not support 40ld */
mraid_ext_inquiry *ext_inq;
mraid_inquiry *inq;
dma_addr_t dma_handle;
ext_inq = dma_alloc_coherent(&adapter->dev->dev,
sizeof(mraid_ext_inquiry),
&dma_handle, GFP_KERNEL);
if( ext_inq == NULL ) return -1;
inq = &ext_inq->raid_inq;
mbox.xferaddr = (u32)dma_handle;
/*issue old 0x04 command to adapter */
mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
issue_scb_block(adapter, raw_mbox);
/*
* update Enquiry3 and ProductInfo structures with
* mraid_inquiry structure
*/
mega_8_to_40ld(inq, inquiry3,
(mega_product_info *)&adapter->product_info);
dma_free_coherent(&adapter->dev->dev,
sizeof(mraid_ext_inquiry), ext_inq,
dma_handle);
} else { /*adapter supports 40ld */
adapter->flag |= BOARD_40LD;
/*
* get product_info, which is static information and will be
* unchanged
*/
prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
(void *)&adapter->product_info,
sizeof(mega_product_info),
DMA_FROM_DEVICE);
mbox.xferaddr = prod_info_dma_handle;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
if ((retval = issue_scb_block(adapter, raw_mbox)))
dev_warn(&adapter->dev->dev,
"Product_info cmd failed with error: %d\n",
retval);
dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
sizeof(mega_product_info), DMA_FROM_DEVICE);
}
/*
* kernel scans the channels from 0 to <= max_channel
*/
adapter->host->max_channel =
adapter->product_info.nchannels + NVIRT_CHAN -1;
adapter->host->max_id = 16; /* max targets per channel */
adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
adapter->host->cmd_per_lun = max_cmd_per_lun;
adapter->numldrv = inquiry3->num_ldrv;
adapter->max_cmds = adapter->product_info.max_commands;
if(adapter->max_cmds > MAX_COMMANDS)
adapter->max_cmds = MAX_COMMANDS;
adapter->host->can_queue = adapter->max_cmds - 1;
/*
* Get the maximum number of scatter-gather elements supported by this
* firmware
*/
mega_get_max_sgl(adapter);
adapter->host->sg_tablesize = adapter->sglen;
/* use HP firmware and bios version encoding
Note: fw_version[0|1] and bios_version[0|1] were originally shifted
right 8 bits making them zero. This 0 value was hardcoded to fix
sparse warnings. */
if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
"%c%d%d.%d%d",
adapter->product_info.fw_version[2],
0,
adapter->product_info.fw_version[1] & 0x0f,
0,
adapter->product_info.fw_version[0] & 0x0f);
snprintf(adapter->bios_version, sizeof(adapter->fw_version),
"%c%d%d.%d%d",
adapter->product_info.bios_version[2],
0,
adapter->product_info.bios_version[1] & 0x0f,
0,
adapter->product_info.bios_version[0] & 0x0f);
} else {
memcpy(adapter->fw_version,
(char *)adapter->product_info.fw_version, 4);
adapter->fw_version[4] = 0;
memcpy(adapter->bios_version,
(char *)adapter->product_info.bios_version, 4);
adapter->bios_version[4] = 0;
}
dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
adapter->fw_version, adapter->bios_version, adapter->numldrv);
/*
* Do we support extended (>10 bytes) cdbs
*/
adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
if (adapter->support_ext_cdb)
dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
return 0;
}
/**
* mega_runpendq()
* @adapter: pointer to our soft state
*
* Runs through the list of pending requests.
*/
static inline void
mega_runpendq(adapter_t *adapter)
{
if(!list_empty(&adapter->pending_list))
__mega_runpendq(adapter);
}
/*
* megaraid_queue()
* @scmd - Issue this scsi command
* @done - the callback hook into the scsi mid-layer
*
* The command queuing entry point for the mid-layer.
*/
static int megaraid_queue_lck(struct scsi_cmnd *scmd)
{
adapter_t *adapter;
scb_t *scb;
int busy=0;
unsigned long flags;
adapter = (adapter_t *)scmd->device->host->hostdata;
/*
* Allocate and build a SCB request
* busy flag will be set if mega_build_cmd() command could not
* allocate scb. We will return non-zero status in that case.
* NOTE: scb can be null even though certain commands completed
* successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
* return 0 in that case.
*/
spin_lock_irqsave(&adapter->lock, flags);
scb = mega_build_cmd(adapter, scmd, &busy);
if (!scb)
goto out;
scb->state |= SCB_PENDQ;
list_add_tail(&scb->list, &adapter->pending_list);
/*
* Check if the HBA is in quiescent state, e.g., during a
* delete logical drive opertion. If it is, don't run
* the pending_list.
*/
if (atomic_read(&adapter->quiescent) == 0)
mega_runpendq(adapter);
busy = 0;
out:
spin_unlock_irqrestore(&adapter->lock, flags);
return busy;
}
static DEF_SCSI_QCMD(megaraid_queue)
/**
* mega_allocate_scb()
* @adapter: pointer to our soft state
* @cmd: scsi command from the mid-layer
*
* Allocate a SCB structure. This is the central structure for controller
* commands.
*/
static inline scb_t *
mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
{
struct list_head *head = &adapter->free_list;
scb_t *scb;
/* Unlink command from Free List */
if( !list_empty(head) ) {
scb = list_entry(head->next, scb_t, list);
list_del_init(head->next);
scb->state = SCB_ACTIVE;
scb->cmd = cmd;
scb->dma_type = MEGA_DMA_TYPE_NONE;
return scb;
}
return NULL;
}
/**
* mega_get_ldrv_num()
* @adapter: pointer to our soft state
* @cmd: scsi mid layer command
* @channel: channel on the controller
*
* Calculate the logical drive number based on the information in scsi command
* and the channel number.
*/
static inline int
mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
{
int tgt;
int ldrv_num;
tgt = cmd->device->id;
if ( tgt > adapter->this_id )
tgt--; /* we do not get inquires for initiator id */
ldrv_num = (channel * 15) + tgt;
/*
* If we have a logical drive with boot enabled, project it first
*/
if( adapter->boot_ldrv_enabled ) {
if( ldrv_num == 0 ) {
ldrv_num = adapter->boot_ldrv;
}
else {
if( ldrv_num <= adapter->boot_ldrv ) {
ldrv_num--;
}
}
}
/*
* If "delete logical drive" feature is enabled on this controller.
* Do only if at least one delete logical drive operation was done.
*
* Also, after logical drive deletion, instead of logical drive number,
* the value returned should be 0x80+logical drive id.
*
* These is valid only for IO commands.
*/
if (adapter->support_random_del && adapter->read_ldidmap )
switch (cmd->cmnd[0]) {
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
ldrv_num += 0x80;
}
return ldrv_num;
}
/**
* mega_build_cmd()
* @adapter: pointer to our soft state
* @cmd: Prepare using this scsi command
* @busy: busy flag if no resources
*
* Prepares a command and scatter gather list for the controller. This routine
* also finds out if the commands is intended for a logical drive or a
* physical device and prepares the controller command accordingly.
*
* We also re-order the logical drives and physical devices based on their
* boot settings.
*/
static scb_t *
mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
{
mega_passthru *pthru;
scb_t *scb;
mbox_t *mbox;
u32 seg;
char islogical;
int max_ldrv_num;
int channel = 0;
int target = 0;
int ldrv_num = 0; /* logical drive number */
/*
* We know what channels our logical drives are on - mega_find_card()
*/
islogical = adapter->logdrv_chan[cmd->device->channel];
/*
* The theory: If physical drive is chosen for boot, all the physical
* devices are exported before the logical drives, otherwise physical
* devices are pushed after logical drives, in which case - Kernel sees
* the physical devices on virtual channel which is obviously converted
* to actual channel on the HBA.
*/
if( adapter->boot_pdrv_enabled ) {
if( islogical ) {
/* logical channel */
channel = cmd->device->channel -
adapter->product_info.nchannels;
}
else {
/* this is physical channel */
channel = cmd->device->channel;
target = cmd->device->id;
/*
* boot from a physical disk, that disk needs to be
* exposed first IF both the channels are SCSI, then
* booting from the second channel is not allowed.
*/
if( target == 0 ) {
target = adapter->boot_pdrv_tgt;
}
else if( target == adapter->boot_pdrv_tgt ) {
target = 0;
}
}
}
else {
if( islogical ) {
/* this is the logical channel */
channel = cmd->device->channel;
}
else {
/* physical channel */
channel = cmd->device->channel - NVIRT_CHAN;
target = cmd->device->id;
}
}
if(islogical) {
/* have just LUN 0 for each target on virtual channels */
if (cmd->device->lun) {
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
return NULL;
}
ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
max_ldrv_num = (adapter->flag & BOARD_40LD) ?
MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
/*
* max_ldrv_num increases by 0x80 if some logical drive was
* deleted.
*/
if(adapter->read_ldidmap)
max_ldrv_num += 0x80;
if(ldrv_num > max_ldrv_num ) {
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
return NULL;
}
}
else {
if( cmd->device->lun > 7) {
/*
* Do not support lun >7 for physically accessed
* devices
*/
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
return NULL;
}
}
/*
*
* Logical drive commands
*
*/
if(islogical) {
switch (cmd->cmnd[0]) {
case TEST_UNIT_READY:
#if MEGA_HAVE_CLUSTERING
/*
* Do we support clustering and is the support enabled
* If no, return success always
*/
if( !adapter->has_cluster ) {
cmd->result = (DID_OK << 16);
scsi_done(cmd);
return NULL;
}
if(!(scb = mega_allocate_scb(adapter, cmd))) {
*busy = 1;
return NULL;
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
scb->raw_mbox[3] = ldrv_num;
scb->dma_direction = DMA_NONE;
return scb;
#else
cmd->result = (DID_OK << 16);
scsi_done(cmd);
return NULL;
#endif
case MODE_SENSE: {
char *buf;
struct scatterlist *sg;
sg = scsi_sglist(cmd);
buf = kmap_atomic(sg_page(sg)) + sg->offset;
memset(buf, 0, cmd->cmnd[4]);
kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16);
scsi_done(cmd);
return NULL;
}
case READ_CAPACITY:
case INQUIRY:
if(!(adapter->flag & (1L << cmd->device->channel))) {
dev_notice(&adapter->dev->dev,
"scsi%d: scanning scsi channel %d "
"for logical drives\n",
adapter->host->host_no,
cmd->device->channel);
adapter->flag |= (1L << cmd->device->channel);
}
/* Allocate a SCB and initialize passthru */
if(!(scb = mega_allocate_scb(adapter, cmd))) {
*busy = 1;
return NULL;
}
pthru = scb->pthru;
mbox = (mbox_t *)scb->raw_mbox;
memset(mbox, 0, sizeof(scb->raw_mbox));
memset(pthru, 0, sizeof(mega_passthru));
pthru->timeout = 0;
pthru->ars = 1;
pthru->reqsenselen = 14;
pthru->islogical = 1;
pthru->logdrv = ldrv_num;
pthru->cdblen = cmd->cmd_len;
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
if( adapter->has_64bit_addr ) {
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
}
else {
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
}
scb->dma_direction = DMA_FROM_DEVICE;
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
mbox->m_out.xferaddr = scb->pthru_dma_addr;
return scb;
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
/* Allocate a SCB and initialize mailbox */
if(!(scb = mega_allocate_scb(adapter, cmd))) {
*busy = 1;
return NULL;
}
mbox = (mbox_t *)scb->raw_mbox;
memset(mbox, 0, sizeof(scb->raw_mbox));
mbox->m_out.logdrv = ldrv_num;
/*
* A little hack: 2nd bit is zero for all scsi read
* commands and is set for all scsi write commands
*/
if( adapter->has_64bit_addr ) {
mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
MEGA_MBOXCMD_LWRITE64:
MEGA_MBOXCMD_LREAD64 ;
}
else {
mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
MEGA_MBOXCMD_LWRITE:
MEGA_MBOXCMD_LREAD ;
}
/*
* 6-byte READ(0x08) or WRITE(0x0A) cdb
*/
if( cmd->cmd_len == 6 ) {
mbox->m_out.numsectors = (u32) cmd->cmnd[4];
mbox->m_out.lba =
((u32)cmd->cmnd[1] << 16) |
((u32)cmd->cmnd[2] << 8) |
(u32)cmd->cmnd[3];
mbox->m_out.lba &= 0x1FFFFF;
#if MEGA_HAVE_STATS
/*
* Take modulo 0x80, since the logical drive
* number increases by 0x80 when a logical
* drive was deleted
*/
if (*cmd->cmnd == READ_6) {
adapter->nreads[ldrv_num%0x80]++;
adapter->nreadblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
} else {
adapter->nwrites[ldrv_num%0x80]++;
adapter->nwriteblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
}
#endif
}
/*
* 10-byte READ(0x28) or WRITE(0x2A) cdb
*/
if( cmd->cmd_len == 10 ) {
mbox->m_out.numsectors =
(u32)cmd->cmnd[8] |
((u32)cmd->cmnd[7] << 8);
mbox->m_out.lba =
((u32)cmd->cmnd[2] << 24) |
((u32)cmd->cmnd[3] << 16) |
((u32)cmd->cmnd[4] << 8) |
(u32)cmd->cmnd[5];
#if MEGA_HAVE_STATS
if (*cmd->cmnd == READ_10) {
adapter->nreads[ldrv_num%0x80]++;
adapter->nreadblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
} else {
adapter->nwrites[ldrv_num%0x80]++;
adapter->nwriteblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
}
#endif
}
/*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/
if( cmd->cmd_len == 12 ) {
mbox->m_out.lba =
((u32)cmd->cmnd[2] << 24) |
((u32)cmd->cmnd[3] << 16) |
((u32)cmd->cmnd[4] << 8) |
(u32)cmd->cmnd[5];
mbox->m_out.numsectors =
((u32)cmd->cmnd[6] << 24) |
((u32)cmd->cmnd[7] << 16) |
((u32)cmd->cmnd[8] << 8) |
(u32)cmd->cmnd[9];
#if MEGA_HAVE_STATS
if (*cmd->cmnd == READ_12) {
adapter->nreads[ldrv_num%0x80]++;
adapter->nreadblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
} else {
adapter->nwrites[ldrv_num%0x80]++;
adapter->nwriteblocks[ldrv_num%0x80] +=
mbox->m_out.numsectors;
}
#endif
}
/*
* If it is a read command
*/
if( (*cmd->cmnd & 0x0F) == 0x08 ) {
scb->dma_direction = DMA_FROM_DEVICE;
}
else {
scb->dma_direction = DMA_TO_DEVICE;
}
/* Calculate Scatter-Gather info */
mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
(u32 *)&mbox->m_out.xferaddr, &seg);
return scb;
#if MEGA_HAVE_CLUSTERING
case RESERVE:
case RELEASE:
/*
* Do we support clustering and is the support enabled
*/
if( ! adapter->has_cluster ) {
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
return NULL;
}
/* Allocate a SCB and initialize mailbox */
if(!(scb = mega_allocate_scb(adapter, cmd))) {
*busy = 1;
return NULL;
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
scb->dma_direction = DMA_NONE;
return scb;
#endif
default:
cmd->result = (DID_BAD_TARGET << 16);
scsi_done(cmd);
return NULL;
}
}
/*
* Passthru drive commands
*/
else {
/* Allocate a SCB and initialize passthru */
if(!(scb = mega_allocate_scb(adapter, cmd))) {
*busy = 1;
return NULL;
}
mbox = (mbox_t *)scb->raw_mbox;
memset(mbox, 0, sizeof(scb->raw_mbox));
if( adapter->support_ext_cdb ) {
mega_prepare_extpassthru(adapter, scb, cmd,
channel, target);
mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
mbox->m_out.xferaddr = scb->epthru_dma_addr;
}
else {
pthru = mega_prepare_passthru(adapter, scb, cmd,
channel, target);
/* Initialize mailbox */
if( adapter->has_64bit_addr ) {
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
}
else {
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
}
mbox->m_out.xferaddr = scb->pthru_dma_addr;
}
return scb;
}
return NULL;
}
/**
* mega_prepare_passthru()
* @adapter: pointer to our soft state
* @scb: our scsi control block
* @cmd: scsi command from the mid-layer
* @channel: actual channel on the controller
* @target: actual id on the controller.
*
* prepare a command for the scsi physical devices.
*/
static mega_passthru *
mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
int channel, int target)
{
mega_passthru *pthru;
pthru = scb->pthru;
memset(pthru, 0, sizeof (mega_passthru));
/* 0=6sec/1=60sec/2=10min/3=3hrs */
pthru->timeout = 2;
pthru->ars = 1;
pthru->reqsenselen = 14;
pthru->islogical = 0;
pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
pthru->target = (adapter->flag & BOARD_40LD) ?
(channel << 4) | target : target;
pthru->cdblen = cmd->cmd_len;
pthru->logdrv = cmd->device->lun;
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
scb->dma_direction = DMA_BIDIRECTIONAL;
/* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
switch (cmd->cmnd[0]) {
case INQUIRY:
case READ_CAPACITY:
if(!(adapter->flag & (1L << cmd->device->channel))) {
dev_notice(&adapter->dev->dev,
"scsi%d: scanning scsi channel %d [P%d] "
"for physical devices\n",
adapter->host->host_no,
cmd->device->channel, channel);
adapter->flag |= (1L << cmd->device->channel);
}
fallthrough;
default:
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
break;
}
return pthru;
}
/**
* mega_prepare_extpassthru()
* @adapter: pointer to our soft state
* @scb: our scsi control block
* @cmd: scsi command from the mid-layer
* @channel: actual channel on the controller
* @target: actual id on the controller.
*
* prepare a command for the scsi physical devices. This rountine prepares
* commands for devices which can take extended CDBs (>10 bytes)
*/
static mega_ext_passthru *
mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
struct scsi_cmnd *cmd,
int channel, int target)
{
mega_ext_passthru *epthru;
epthru = scb->epthru;
memset(epthru, 0, sizeof(mega_ext_passthru));
/* 0=6sec/1=60sec/2=10min/3=3hrs */
epthru->timeout = 2;
epthru->ars = 1;
epthru->reqsenselen = 14;
epthru->islogical = 0;
epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
epthru->target = (adapter->flag & BOARD_40LD) ?
(channel << 4) | target : target;
epthru->cdblen = cmd->cmd_len;
epthru->logdrv = cmd->device->lun;
memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
scb->dma_direction = DMA_BIDIRECTIONAL;
switch(cmd->cmnd[0]) {
case INQUIRY:
case READ_CAPACITY:
if(!(adapter->flag & (1L << cmd->device->channel))) {
dev_notice(&adapter->dev->dev,
"scsi%d: scanning scsi channel %d [P%d] "
"for physical devices\n",
adapter->host->host_no,
cmd->device->channel, channel);
adapter->flag |= (1L << cmd->device->channel);
}
fallthrough;
default:
epthru->numsgelements = mega_build_sglist(adapter, scb,
&epthru->dataxferaddr, &epthru->dataxferlen);
break;
}
return epthru;
}
static void
__mega_runpendq(adapter_t *adapter)
{
scb_t *scb;
struct list_head *pos, *next;
/* Issue any pending commands to the card */
list_for_each_safe(pos, next, &adapter->pending_list) {
scb = list_entry(pos, scb_t, list);
if( !(scb->state & SCB_ISSUED) ) {
if( issue_scb(adapter, scb) != 0 )
return;
}
}
return;
}
/**
* issue_scb()
* @adapter: pointer to our soft state
* @scb: scsi control block
*
* Post a command to the card if the mailbox is available, otherwise return
* busy. We also take the scb from the pending list if the mailbox is
* available.
*/
static int
issue_scb(adapter_t *adapter, scb_t *scb)
{
volatile mbox64_t *mbox64 = adapter->mbox64;
volatile mbox_t *mbox = adapter->mbox;
unsigned int i = 0;
if(unlikely(mbox->m_in.busy)) {
do {
udelay(1);
i++;
} while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
if(mbox->m_in.busy) return -1;
}
/* Copy mailbox data into host structure */
memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
sizeof(struct mbox_out));
mbox->m_out.cmdid = scb->idx; /* Set cmdid */
mbox->m_in.busy = 1; /* Set busy */
/*
* Increment the pending queue counter
*/
atomic_inc(&adapter->pend_cmds);
switch (mbox->m_out.cmd) {
case MEGA_MBOXCMD_LREAD64:
case MEGA_MBOXCMD_LWRITE64:
case MEGA_MBOXCMD_PASSTHRU64:
case MEGA_MBOXCMD_EXTPTHRU:
mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
mbox64->xfer_segment_hi = 0;
mbox->m_out.xferaddr = 0xFFFFFFFF;
break;
default:
mbox64->xfer_segment_lo = 0;
mbox64->xfer_segment_hi = 0;
}
/*
* post the command
*/
scb->state |= SCB_ISSUED;
if( likely(adapter->flag & BOARD_MEMMAP) ) {
mbox->m_in.poll = 0;
mbox->m_in.ack = 0;
WRINDOOR(adapter, adapter->mbox_dma | 0x1);
}
else {
irq_enable(adapter);
issue_command(adapter);
}
return 0;
}
/*
* Wait until the controller's mailbox is available
*/
static inline int
mega_busywait_mbox (adapter_t *adapter)
{
if (adapter->mbox->m_in.busy)
return __mega_busywait_mbox(adapter);
return 0;
}
/**
* issue_scb_block()
* @adapter: pointer to our soft state
* @raw_mbox: the mailbox
*
* Issue a scb in synchronous and non-interrupt mode
*/
static int
issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
{
volatile mbox64_t *mbox64 = adapter->mbox64;
volatile mbox_t *mbox = adapter->mbox;
u8 byte;
/* Wait until mailbox is free */
if(mega_busywait_mbox (adapter))
goto bug_blocked_mailbox;
/* Copy mailbox data into host structure */
memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
mbox->m_out.cmdid = 0xFE;
mbox->m_in.busy = 1;
switch (raw_mbox[0]) {
case MEGA_MBOXCMD_LREAD64:
case MEGA_MBOXCMD_LWRITE64:
case MEGA_MBOXCMD_PASSTHRU64:
case MEGA_MBOXCMD_EXTPTHRU:
mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
mbox64->xfer_segment_hi = 0;
mbox->m_out.xferaddr = 0xFFFFFFFF;
break;
default:
mbox64->xfer_segment_lo = 0;
mbox64->xfer_segment_hi = 0;
}
if( likely(adapter->flag & BOARD_MEMMAP) ) {
mbox->m_in.poll = 0;
mbox->m_in.ack = 0;
mbox->m_in.numstatus = 0xFF;
mbox->m_in.status = 0xFF;
WRINDOOR(adapter, adapter->mbox_dma | 0x1);
while((volatile u8)mbox->m_in.numstatus == 0xFF)
cpu_relax();
mbox->m_in.numstatus = 0xFF;
while( (volatile u8)mbox->m_in.poll != 0x77 )
cpu_relax();
mbox->m_in.poll = 0;
mbox->m_in.ack = 0x77;
WRINDOOR(adapter, adapter->mbox_dma | 0x2);
while(RDINDOOR(adapter) & 0x2)
cpu_relax();
}
else {
irq_disable(adapter);
issue_command(adapter);
while (!((byte = irq_state(adapter)) & INTR_VALID))
cpu_relax();
set_irq_state(adapter, byte);
irq_enable(adapter);
irq_ack(adapter);
}
return mbox->m_in.status;
bug_blocked_mailbox:
dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
udelay (1000);
return -1;
}
/**
* megaraid_isr_iomapped()
* @irq: irq
* @devp: pointer to our soft state
*
* Interrupt service routine for io-mapped controllers.
* Find out if our device is interrupting. If yes, acknowledge the interrupt
* and service the completed commands.
*/
static irqreturn_t
megaraid_isr_iomapped(int irq, void *devp)
{
adapter_t *adapter = devp;
unsigned long flags;
u8 status;
u8 nstatus;
u8 completed[MAX_FIRMWARE_STATUS];
u8 byte;
int handled = 0;
/*
* loop till F/W has more commands for us to complete.
*/
spin_lock_irqsave(&adapter->lock, flags);
do {
/* Check if a valid interrupt is pending */
byte = irq_state(adapter);
if( (byte & VALID_INTR_BYTE) == 0 ) {
/*
* No more pending commands
*/
goto out_unlock;
}
set_irq_state(adapter, byte);
while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
== 0xFF)
cpu_relax();
adapter->mbox->m_in.numstatus = 0xFF;
status = adapter->mbox->m_in.status;
/*
* decrement the pending queue counter
*/
atomic_sub(nstatus, &adapter->pend_cmds);
memcpy(completed, (void *)adapter->mbox->m_in.completed,
nstatus);
/* Acknowledge interrupt */
irq_ack(adapter);
mega_cmd_done(adapter, completed, nstatus, status);
mega_rundoneq(adapter);
handled = 1;
/* Loop through any pending requests */
if(atomic_read(&adapter->quiescent) == 0) {
mega_runpendq(adapter);
}
} while(1);
out_unlock:
spin_unlock_irqrestore(&adapter->lock, flags);
return IRQ_RETVAL(handled);
}
/**
* megaraid_isr_memmapped()
* @irq: irq
* @devp: pointer to our soft state
*
* Interrupt service routine for memory-mapped controllers.
* Find out if our device is interrupting. If yes, acknowledge the interrupt
* and service the completed commands.
*/
static irqreturn_t
megaraid_isr_memmapped(int irq, void *devp)
{
adapter_t *adapter = devp;
unsigned long flags;
u8 status;
u32 dword = 0;
u8 nstatus;
u8 completed[MAX_FIRMWARE_STATUS];
int handled = 0;
/*
* loop till F/W has more commands for us to complete.
*/
spin_lock_irqsave(&adapter->lock, flags);
do {
/* Check if a valid interrupt is pending */
dword = RDOUTDOOR(adapter);
if(dword != 0x10001234) {
/*
* No more pending commands
*/
goto out_unlock;
}
WROUTDOOR(adapter, 0x10001234);
while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
== 0xFF) {
cpu_relax();
}
adapter->mbox->m_in.numstatus = 0xFF;
status = adapter->mbox->m_in.status;
/*
* decrement the pending queue counter
*/
atomic_sub(nstatus, &adapter->pend_cmds);
memcpy(completed, (void *)adapter->mbox->m_in.completed,
nstatus);
/* Acknowledge interrupt */
WRINDOOR(adapter, 0x2);
handled = 1;
while( RDINDOOR(adapter) & 0x02 )
cpu_relax();
mega_cmd_done(adapter, completed, nstatus, status);
mega_rundoneq(adapter);
/* Loop through any pending requests */
if(atomic_read(&adapter->quiescent) == 0) {
mega_runpendq(adapter);
}
} while(1);
out_unlock:
spin_unlock_irqrestore(&adapter->lock, flags);
return IRQ_RETVAL(handled);
}
/**
* mega_cmd_done()
* @adapter: pointer to our soft state
* @completed: array of ids of completed commands
* @nstatus: number of completed commands
* @status: status of the last command completed
*
* Complete the commands and call the scsi mid-layer callback hooks.
*/
static void
mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
{
mega_ext_passthru *epthru = NULL;
struct scatterlist *sgl;
struct scsi_cmnd *cmd = NULL;
mega_passthru *pthru = NULL;
mbox_t *mbox = NULL;
u8 c;
scb_t *scb;
int islogical;
int cmdid;
int i;
/*
* for all the commands completed, call the mid-layer callback routine
* and free the scb.
*/
for( i = 0; i < nstatus; i++ ) {
cmdid = completed[i];
/*
* Only free SCBs for the commands coming down from the
* mid-layer, not for which were issued internally
*
* For internal command, restore the status returned by the
* firmware so that user can interpret it.
*/
if (cmdid == CMDID_INT_CMDS) {
scb = &adapter->int_scb;
cmd = scb->cmd;
list_del_init(&scb->list);
scb->state = SCB_FREE;
adapter->int_status = status;
complete(&adapter->int_waitq);
} else {
scb = &adapter->scb_list[cmdid];
/*
* Make sure f/w has completed a valid command
*/
if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
dev_crit(&adapter->dev->dev, "invalid command "
"Id %d, scb->state:%x, scsi cmd:%p\n",
cmdid, scb->state, scb->cmd);
continue;
}
/*
* Was a abort issued for this command
*/
if( scb->state & SCB_ABORT ) {
dev_warn(&adapter->dev->dev,
"aborted cmd [%x] complete\n",
scb->idx);
scb->cmd->result = (DID_ABORT << 16);
list_add_tail(SCSI_LIST(scb->cmd),
&adapter->completed_list);
mega_free_scb(adapter, scb);
continue;
}
/*
* Was a reset issued for this command
*/
if( scb->state & SCB_RESET ) {
dev_warn(&adapter->dev->dev,
"reset cmd [%x] complete\n",
scb->idx);
scb->cmd->result = (DID_RESET << 16);
list_add_tail(SCSI_LIST(scb->cmd),
&adapter->completed_list);
mega_free_scb (adapter, scb);
continue;
}
cmd = scb->cmd;
pthru = scb->pthru;
epthru = scb->epthru;
mbox = (mbox_t *)scb->raw_mbox;
#if MEGA_HAVE_STATS
{
int logdrv = mbox->m_out.logdrv;
islogical = adapter->logdrv_chan[cmd->channel];
/*
* Maintain an error counter for the logical drive.
* Some application like SNMP agent need such
* statistics
*/
if( status && islogical && (cmd->cmnd[0] == READ_6 ||
cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == READ_12)) {
/*
* Logical drive number increases by 0x80 when
* a logical drive is deleted
*/
adapter->rd_errors[logdrv%0x80]++;
}
if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
cmd->cmnd[0] == WRITE_10 ||
cmd->cmnd[0] == WRITE_12)) {
/*
* Logical drive number increases by 0x80 when
* a logical drive is deleted
*/
adapter->wr_errors[logdrv%0x80]++;
}
}
#endif
}
/*
* Do not return the presence of hard disk on the channel so,
* inquiry sent, and returned data==hard disk or removable
* hard disk and not logical, request should return failure! -
* PJ
*/
islogical = adapter->logdrv_chan[cmd->device->channel];
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
sgl = scsi_sglist(cmd);
if( sg_page(sgl) ) {
c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
dev_warn(&adapter->dev->dev, "invalid sg\n");
c = 0;
}
if(IS_RAID_CH(adapter, cmd->device->channel) &&
((c & 0x1F ) == TYPE_DISK)) {
status = 0xF0;
}
}
/* clear result; otherwise, success returns corrupt value */
cmd->result = 0;
/* Convert MegaRAID status to Linux error code */
switch (status) {
case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
cmd->result |= (DID_OK << 16);
break;
case 0x02: /* ERROR_ABORTED, i.e.
SCSI_STATUS_CHECK_CONDITION */
/* set sense_buffer and result fields */
if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
memcpy(cmd->sense_buffer, pthru->reqsensearea,
14);
cmd->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
memcpy(cmd->sense_buffer,
epthru->reqsensearea, 14);
cmd->result = SAM_STAT_CHECK_CONDITION;
} else
scsi_build_sense(cmd, 0,
ABORTED_COMMAND, 0, 0);
}
break;
case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
SCSI_STATUS_BUSY */
cmd->result |= (DID_BUS_BUSY << 16) | status;
break;
default:
#if MEGA_HAVE_CLUSTERING
/*
* If TEST_UNIT_READY fails, we know
* MEGA_RESERVATION_STATUS failed
*/
if( cmd->cmnd[0] == TEST_UNIT_READY ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
if( status == 1 &&
(cmd->cmnd[0] == RESERVE ||
cmd->cmnd[0] == RELEASE) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
}
else
#endif
cmd->result |= (DID_BAD_TARGET << 16)|status;
}
mega_free_scb(adapter, scb);
/* Add Scsi_Command to end of completed queue */
list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
}
}
/*
* mega_runpendq()
*
* Run through the list of completed requests and finish it
*/
static void
mega_rundoneq (adapter_t *adapter)
{
struct megaraid_cmd_priv *cmd_priv;
list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
scsi_done(megaraid_to_scsi_cmd(cmd_priv));
INIT_LIST_HEAD(&adapter->completed_list);
}
/*
* Free a SCB structure
* Note: We assume the scsi commands associated with this scb is not free yet.
*/
static void
mega_free_scb(adapter_t *adapter, scb_t *scb)
{
switch( scb->dma_type ) {
case MEGA_DMA_TYPE_NONE:
break;
case MEGA_SGLIST:
scsi_dma_unmap(scb->cmd);
break;
default:
break;
}
/*
* Remove from the pending list
*/
list_del_init(&scb->list);
/* Link the scb back into free list */
scb->state = SCB_FREE;
scb->cmd = NULL;
list_add(&scb->list, &adapter->free_list);
}
static int
__mega_busywait_mbox (adapter_t *adapter)
{
volatile mbox_t *mbox = adapter->mbox;
long counter;
for (counter = 0; counter < 10000; counter++) {
if (!mbox->m_in.busy)
return 0;
udelay(100);
cond_resched();
}
return -1; /* give up after 1 second */
}
/*
* Copies data to SGLIST
* Note: For 64 bit cards, we need a minimum of one SG element for read/write
*/
static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
struct scatterlist *sg;
struct scsi_cmnd *cmd;
int sgcnt;
int idx;
cmd = scb->cmd;
/*
* Copy Scatter-Gather list info into controller structure.
*
* The number of sg elements returned must not exceed our limit
*/
sgcnt = scsi_dma_map(cmd);
scb->dma_type = MEGA_SGLIST;
BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
*len = 0;
if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
sg = scsi_sglist(cmd);
scb->dma_h_bulkdata = sg_dma_address(sg);
*buf = (u32)scb->dma_h_bulkdata;
*len = sg_dma_len(sg);
return 0;
}
scsi_for_each_sg(cmd, sg, sgcnt, idx) {
if (adapter->has_64bit_addr) {
scb->sgl64[idx].address = sg_dma_address(sg);
*len += scb->sgl64[idx].length = sg_dma_len(sg);
} else {
scb->sgl[idx].address = sg_dma_address(sg);
*len += scb->sgl[idx].length = sg_dma_len(sg);
}
}
/* Reset pointer and length fields */
*buf = scb->sgl_dma_addr;
/* Return count of SG requests */
return sgcnt;
}
/*
* mega_8_to_40ld()
*
* takes all info in AdapterInquiry structure and puts it into ProductInfo and
* Enquiry3 structures for later use
*/
static void
mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
mega_product_info *product_info)
{
int i;
product_info->max_commands = inquiry->adapter_info.max_commands;
enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
product_info->nchannels = inquiry->adapter_info.nchannels;
for (i = 0; i < 4; i++) {
product_info->fw_version[i] =
inquiry->adapter_info.fw_version[i];
product_info->bios_version[i] =
inquiry->adapter_info.bios_version[i];
}
enquiry3->cache_flush_interval =
inquiry->adapter_info.cache_flush_interval;
product_info->dram_size = inquiry->adapter_info.dram_size;
enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
}
for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
}
static inline void
mega_free_sgl(adapter_t *adapter)
{
scb_t *scb;
int i;
for(i = 0; i < adapter->max_cmds; i++) {
scb = &adapter->scb_list[i];
if( scb->sgl64 ) {
dma_free_coherent(&adapter->dev->dev,
sizeof(mega_sgl64) * adapter->sglen,
scb->sgl64, scb->sgl_dma_addr);
scb->sgl64 = NULL;
}
if( scb->pthru ) {
dma_free_coherent(&adapter->dev->dev,
sizeof(mega_passthru), scb->pthru,
scb->pthru_dma_addr);
scb->pthru = NULL;
}
if( scb->epthru ) {
dma_free_coherent(&adapter->dev->dev,
sizeof(mega_ext_passthru),
scb->epthru, scb->epthru_dma_addr);
scb->epthru = NULL;
}
}
}
/*
* Get information about the card/driver
*/
const char *
megaraid_info(struct Scsi_Host *host)
{
static char buffer[512];
adapter_t *adapter;
adapter = (adapter_t *)host->hostdata;
sprintf (buffer,
"LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
adapter->fw_version, adapter->product_info.max_commands,
adapter->host->max_id, adapter->host->max_channel,
(u32)adapter->host->max_lun);
return buffer;
}
/*
* Abort a previous SCSI request. Only commands on the pending list can be
* aborted. All the commands issued to the F/W must complete.
*/
static int
megaraid_abort(struct scsi_cmnd *cmd)
{
adapter_t *adapter;
int rval;
adapter = (adapter_t *)cmd->device->host->hostdata;
rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
/*
* This is required here to complete any completed requests
* to be communicated over to the mid layer.
*/
mega_rundoneq(adapter);
return rval;
}
static int
megaraid_reset(struct scsi_cmnd *cmd)
{
adapter_t *adapter;
megacmd_t mc;
int rval;
adapter = (adapter_t *)cmd->device->host->hostdata;
#if MEGA_HAVE_CLUSTERING
mc.cmd = MEGA_CLUSTER_CMD;
mc.opcode = MEGA_RESET_RESERVATIONS;
if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
dev_warn(&adapter->dev->dev, "reservation reset failed\n");
}
else {
dev_info(&adapter->dev->dev, "reservation reset\n");
}
#endif
spin_lock_irq(&adapter->lock);
rval = megaraid_abort_and_reset(adapter, NULL, SCB_RESET);
/*
* This is required here to complete any completed requests
* to be communicated over to the mid layer.
*/
mega_rundoneq(adapter);
spin_unlock_irq(&adapter->lock);
return rval;
}
/**
* megaraid_abort_and_reset()
* @adapter: megaraid soft state
* @cmd: scsi command to be aborted or reset
* @aor: abort or reset flag
*
* Try to locate the scsi command in the pending queue. If found and is not
* issued to the controller, abort/reset it. Otherwise return failure
*/
static int
megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
{
struct list_head *pos, *next;
scb_t *scb;
if (aor == SCB_ABORT)
dev_warn(&adapter->dev->dev,
"ABORTING cmd=%x <c=%d t=%d l=%d>\n",
cmd->cmnd[0], cmd->device->channel,
cmd->device->id, (u32)cmd->device->lun);
else
dev_warn(&adapter->dev->dev, "RESETTING\n");
if(list_empty(&adapter->pending_list))
return FAILED;
list_for_each_safe(pos, next, &adapter->pending_list) {
scb = list_entry(pos, scb_t, list);
if (!cmd || scb->cmd == cmd) { /* Found command */
scb->state |= aor;
/*
* Check if this command has firmware ownership. If
* yes, we cannot reset this command. Whenever f/w
* completes this command, we will return appropriate
* status from ISR.
*/
if( scb->state & SCB_ISSUED ) {
dev_warn(&adapter->dev->dev,
"%s[%x], fw owner\n",
(aor==SCB_ABORT) ? "ABORTING":"RESET",
scb->idx);
return FAILED;
}
/*
* Not yet issued! Remove from the pending
* list
*/
dev_warn(&adapter->dev->dev,
"%s-[%x], driver owner\n",
(cmd) ? "ABORTING":"RESET",
scb->idx);
mega_free_scb(adapter, scb);
if (cmd) {
cmd->result = (DID_ABORT << 16);
list_add_tail(SCSI_LIST(cmd),
&adapter->completed_list);
}
return SUCCESS;
}
}
return FAILED;
}
static inline int
make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
{
*pdev = pci_alloc_dev(NULL);
if( *pdev == NULL ) return -1;
memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
kfree(*pdev);
return -1;
}
return 0;
}
static inline void
free_local_pdev(struct pci_dev *pdev)
{
kfree(pdev);
}
/**
* mega_allocate_inquiry()
* @dma_handle: handle returned for dma address
* @pdev: handle to pci device
*
* allocates memory for inquiry structure
*/
static inline void *
mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
{
return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
dma_handle, GFP_KERNEL);
}
static inline void
mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
{
dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
dma_handle);
}
#ifdef CONFIG_PROC_FS
/* Following code handles /proc fs */
/**
* proc_show_config()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display configuration information about the controller.
*/
static int
proc_show_config(struct seq_file *m, void *v)
{
adapter_t *adapter = m->private;
seq_puts(m, MEGARAID_VERSION);
if(adapter->product_info.product_name[0])
seq_printf(m, "%s\n", adapter->product_info.product_name);
seq_puts(m, "Controller Type: ");
if( adapter->flag & BOARD_MEMMAP )
seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
else
seq_puts(m, "418/428/434\n");
if(adapter->flag & BOARD_40LD)
seq_puts(m, "Controller Supports 40 Logical Drives\n");
if(adapter->flag & BOARD_64BIT)
seq_puts(m, "Controller capable of 64-bit memory addressing\n");
if( adapter->has_64bit_addr )
seq_puts(m, "Controller using 64-bit memory addressing\n");
else
seq_puts(m, "Controller is not using 64-bit memory addressing\n");
seq_printf(m, "Base = %08lx, Irq = %d, ",
adapter->base, adapter->host->irq);
seq_printf(m, "Logical Drives = %d, Channels = %d\n",
adapter->numldrv, adapter->product_info.nchannels);
seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
adapter->fw_version, adapter->bios_version,
adapter->product_info.dram_size);
seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
adapter->product_info.max_commands, adapter->max_cmds);
seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
seq_printf(m, "quiescent = %d\n",
atomic_read(&adapter->quiescent));
seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
seq_puts(m, "\nModule Parameters:\n");
seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
return 0;
}
/**
* proc_show_stat()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display statistical information about the I/O activity.
*/
static int
proc_show_stat(struct seq_file *m, void *v)
{
adapter_t *adapter = m->private;
#if MEGA_HAVE_STATS
int i;
#endif
seq_puts(m, "Statistical Information for this controller\n");
seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
#if MEGA_HAVE_STATS
for(i = 0; i < adapter->numldrv; i++) {
seq_printf(m, "Logical Drive %d:\n", i);
seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
adapter->nreads[i], adapter->nwrites[i]);
seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
adapter->nreadblocks[i], adapter->nwriteblocks[i]);
seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
adapter->rd_errors[i], adapter->wr_errors[i]);
}
#else
seq_puts(m, "IO and error counters not compiled in driver.\n");
#endif
return 0;
}
/**
* proc_show_mbox()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display mailbox information for the last command issued. This information
* is good for debugging.
*/
static int
proc_show_mbox(struct seq_file *m, void *v)
{
adapter_t *adapter = m->private;
volatile mbox_t *mbox = adapter->mbox;
seq_puts(m, "Contents of Mail Box Structure\n");
seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
return 0;
}
/**
* proc_show_rebuild_rate()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display current rebuild rate
*/
static int
proc_show_rebuild_rate(struct seq_file *m, void *v)
{
adapter_t *adapter = m->private;
dma_addr_t dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
if( make_local_pdev(adapter, &pdev) != 0 )
return 0;
if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
if( adapter->flag & BOARD_40LD )
seq_printf(m, "Rebuild Rate: [%d%%]\n",
((mega_inquiry3 *)inquiry)->rebuild_rate);
else
seq_printf(m, "Rebuild Rate: [%d%%]\n",
((mraid_ext_inquiry *)
inquiry)->raid_inq.adapter_info.rebuild_rate);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
free_local_pdev(pdev);
return 0;
}
/**
* proc_show_battery()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display information about the battery module on the controller.
*/
static int
proc_show_battery(struct seq_file *m, void *v)
{
adapter_t *adapter = m->private;
dma_addr_t dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
u8 battery_status;
if( make_local_pdev(adapter, &pdev) != 0 )
return 0;
if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
if( adapter->flag & BOARD_40LD ) {
battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
}
else {
battery_status = ((mraid_ext_inquiry *)inquiry)->
raid_inq.adapter_info.battery_status;
}
/*
* Decode the battery status
*/
seq_printf(m, "Battery Status:[%d]", battery_status);
if(battery_status == MEGA_BATT_CHARGE_DONE)
seq_puts(m, " Charge Done");
if(battery_status & MEGA_BATT_MODULE_MISSING)
seq_puts(m, " Module Missing");
if(battery_status & MEGA_BATT_LOW_VOLTAGE)
seq_puts(m, " Low Voltage");
if(battery_status & MEGA_BATT_TEMP_HIGH)
seq_puts(m, " Temperature High");
if(battery_status & MEGA_BATT_PACK_MISSING)
seq_puts(m, " Pack Missing");
if(battery_status & MEGA_BATT_CHARGE_INPROG)
seq_puts(m, " Charge In-progress");
if(battery_status & MEGA_BATT_CHARGE_FAIL)
seq_puts(m, " Charge Fail");
if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
seq_puts(m, " Cycles Exceeded");
seq_putc(m, '\n');
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
free_local_pdev(pdev);
return 0;
}
/*
* Display scsi inquiry
*/
static void
mega_print_inquiry(struct seq_file *m, char *scsi_inq)
{
int i;
seq_puts(m, " Vendor: ");
seq_write(m, scsi_inq + 8, 8);
seq_puts(m, " Model: ");
seq_write(m, scsi_inq + 16, 16);
seq_puts(m, " Rev: ");
seq_write(m, scsi_inq + 32, 4);
seq_putc(m, '\n');
i = scsi_inq[0] & 0x1f;
seq_printf(m, " Type: %s ", scsi_device_type(i));
seq_printf(m, " ANSI SCSI revision: %02x",
scsi_inq[2] & 0x07);
if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
seq_puts(m, " CCS\n");
else
seq_putc(m, '\n');
}
/**
* proc_show_pdrv()
* @m: Synthetic file construction data
* @adapter: pointer to our soft state
* @channel: channel
*
* Display information about the physical drives.
*/
static int
proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
{
dma_addr_t dma_handle;
char *scsi_inq;
dma_addr_t scsi_inq_dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
u8 *pdrv_state;
u8 state;
int tgt;
int max_channels;
int i;
if( make_local_pdev(adapter, &pdev) != 0 )
return 0;
if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
GFP_KERNEL);
if( scsi_inq == NULL ) {
seq_puts(m, "memory not available for scsi inq.\n");
goto free_inquiry;
}
if( adapter->flag & BOARD_40LD ) {
pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
}
else {
pdrv_state = ((mraid_ext_inquiry *)inquiry)->
raid_inq.pdrv_info.pdrv_state;
}
max_channels = adapter->product_info.nchannels;
if( channel >= max_channels ) {
goto free_pci;
}
for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
i = channel*16 + tgt;
state = *(pdrv_state + i);
switch( state & 0x0F ) {
case PDRV_ONLINE:
seq_printf(m, "Channel:%2d Id:%2d State: Online",
channel, tgt);
break;
case PDRV_FAILED:
seq_printf(m, "Channel:%2d Id:%2d State: Failed",
channel, tgt);
break;
case PDRV_RBLD:
seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
channel, tgt);
break;
case PDRV_HOTSPARE:
seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
channel, tgt);
break;
default:
seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
channel, tgt);
break;
}
/*
* This interface displays inquiries for disk drives
* only. Inquries for logical drives and non-disk
* devices are available through /proc/scsi/scsi
*/
memset(scsi_inq, 0, 256);
if( mega_internal_dev_inquiry(adapter, channel, tgt,
scsi_inq_dma_handle) ||
(scsi_inq[0] & 0x1F) != TYPE_DISK ) {
continue;
}
/*
* Check for overflow. We print less than 240
* characters for inquiry
*/
seq_puts(m, ".\n");
mega_print_inquiry(m, scsi_inq);
}
free_pci:
dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
free_local_pdev(pdev);
return 0;
}
/**
* proc_show_pdrv_ch0()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display information about the physical drives on physical channel 0.
*/
static int
proc_show_pdrv_ch0(struct seq_file *m, void *v)
{
return proc_show_pdrv(m, m->private, 0);
}
/**
* proc_show_pdrv_ch1()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display information about the physical drives on physical channel 1.
*/
static int
proc_show_pdrv_ch1(struct seq_file *m, void *v)
{
return proc_show_pdrv(m, m->private, 1);
}
/**
* proc_show_pdrv_ch2()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display information about the physical drives on physical channel 2.
*/
static int
proc_show_pdrv_ch2(struct seq_file *m, void *v)
{
return proc_show_pdrv(m, m->private, 2);
}
/**
* proc_show_pdrv_ch3()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display information about the physical drives on physical channel 3.
*/
static int
proc_show_pdrv_ch3(struct seq_file *m, void *v)
{
return proc_show_pdrv(m, m->private, 3);
}
/**
* proc_show_rdrv()
* @m: Synthetic file construction data
* @adapter: pointer to our soft state
* @start: starting logical drive to display
* @end: ending logical drive to display
*
* We do not print the inquiry information since its already available through
* /proc/scsi/scsi interface
*/
static int
proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
{
dma_addr_t dma_handle;
logdrv_param *lparam;
megacmd_t mc;
char *disk_array;
dma_addr_t disk_array_dma_handle;
caddr_t inquiry;
struct pci_dev *pdev;
u8 *rdrv_state;
int num_ldrv;
u32 array_sz;
int i;
if( make_local_pdev(adapter, &pdev) != 0 )
return 0;
if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
goto free_pdev;
if( mega_adapinq(adapter, dma_handle) != 0 ) {
seq_puts(m, "Adapter inquiry failed.\n");
dev_warn(&adapter->dev->dev, "inquiry failed\n");
goto free_inquiry;
}
memset(&mc, 0, sizeof(megacmd_t));
if( adapter->flag & BOARD_40LD ) {
array_sz = sizeof(disk_array_40ld);
rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
}
else {
array_sz = sizeof(disk_array_8ld);
rdrv_state = ((mraid_ext_inquiry *)inquiry)->
raid_inq.logdrv_info.ldrv_state;
num_ldrv = ((mraid_ext_inquiry *)inquiry)->
raid_inq.logdrv_info.num_ldrv;
}
disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
&disk_array_dma_handle, GFP_KERNEL);
if( disk_array == NULL ) {
seq_puts(m, "memory not available.\n");
goto free_inquiry;
}
mc.xferaddr = (u32)disk_array_dma_handle;
if( adapter->flag & BOARD_40LD ) {
mc.cmd = FC_NEW_CONFIG;
mc.opcode = OP_DCMD_READ_CONFIG;
if( mega_internal_command(adapter, &mc, NULL) ) {
seq_puts(m, "40LD read config failed.\n");
goto free_pci;
}
}
else {
mc.cmd = NEW_READ_CONFIG_8LD;
if( mega_internal_command(adapter, &mc, NULL) ) {
mc.cmd = READ_CONFIG_8LD;
if( mega_internal_command(adapter, &mc, NULL) ) {
seq_puts(m, "8LD read config failed.\n");
goto free_pci;
}
}
}
for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
if( adapter->flag & BOARD_40LD ) {
lparam =
&((disk_array_40ld *)disk_array)->ldrv[i].lparam;
}
else {
lparam =
&((disk_array_8ld *)disk_array)->ldrv[i].lparam;
}
/*
* Check for overflow. We print less than 240 characters for
* information about each logical drive.
*/
seq_printf(m, "Logical drive:%2d:, ", i);
switch( rdrv_state[i] & 0x0F ) {
case RDRV_OFFLINE:
seq_puts(m, "state: offline");
break;
case RDRV_DEGRADED:
seq_puts(m, "state: degraded");
break;
case RDRV_OPTIMAL:
seq_puts(m, "state: optimal");
break;
case RDRV_DELETED:
seq_puts(m, "state: deleted");
break;
default:
seq_puts(m, "state: unknown");
break;
}
/*
* Check if check consistency or initialization is going on
* for this logical drive.
*/
if( (rdrv_state[i] & 0xF0) == 0x20 )
seq_puts(m, ", check-consistency in progress");
else if( (rdrv_state[i] & 0xF0) == 0x10 )
seq_puts(m, ", initialization in progress");
seq_putc(m, '\n');
seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
seq_printf(m, "RAID level:%3d, ", lparam->level);
seq_printf(m, "Stripe size:%3d, ",
lparam->stripe_sz ? lparam->stripe_sz/2: 128);
seq_printf(m, "Row size:%3d\n", lparam->row_size);
seq_puts(m, "Read Policy: ");
switch(lparam->read_ahead) {
case NO_READ_AHEAD:
seq_puts(m, "No read ahead, ");
break;
case READ_AHEAD:
seq_puts(m, "Read ahead, ");
break;
case ADAP_READ_AHEAD:
seq_puts(m, "Adaptive, ");
break;
}
seq_puts(m, "Write Policy: ");
switch(lparam->write_mode) {
case WRMODE_WRITE_THRU:
seq_puts(m, "Write thru, ");
break;
case WRMODE_WRITE_BACK:
seq_puts(m, "Write back, ");
break;
}
seq_puts(m, "Cache Policy: ");
switch(lparam->direct_io) {
case CACHED_IO:
seq_puts(m, "Cached IO\n\n");
break;
case DIRECT_IO:
seq_puts(m, "Direct IO\n\n");
break;
}
}
free_pci:
dma_free_coherent(&pdev->dev, array_sz, disk_array,
disk_array_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
free_local_pdev(pdev);
return 0;
}
/**
* proc_show_rdrv_10()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
static int
proc_show_rdrv_10(struct seq_file *m, void *v)
{
return proc_show_rdrv(m, m->private, 0, 9);
}
/**
* proc_show_rdrv_20()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
static int
proc_show_rdrv_20(struct seq_file *m, void *v)
{
return proc_show_rdrv(m, m->private, 10, 19);
}
/**
* proc_show_rdrv_30()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
static int
proc_show_rdrv_30(struct seq_file *m, void *v)
{
return proc_show_rdrv(m, m->private, 20, 29);
}
/**
* proc_show_rdrv_40()
* @m: Synthetic file construction data
* @v: File iterator
*
* Display real time information about the logical drives 0 through 9.
*/
static int
proc_show_rdrv_40(struct seq_file *m, void *v)
{
return proc_show_rdrv(m, m->private, 30, 39);
}
/**
* mega_create_proc_entry()
* @index: index in soft state array
* @parent: parent node for this /proc entry
*
* Creates /proc entries for our controllers.
*/
static void
mega_create_proc_entry(int index, struct proc_dir_entry *parent)
{
adapter_t *adapter = hba_soft_state[index];
struct proc_dir_entry *dir;
u8 string[16];
sprintf(string, "hba%d", adapter->host->host_no);
dir = proc_mkdir_data(string, 0, parent, adapter);
if (!dir) {
dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
return;
}
proc_create_single_data("config", S_IRUSR, dir,
proc_show_config, adapter);
proc_create_single_data("stat", S_IRUSR, dir,
proc_show_stat, adapter);
proc_create_single_data("mailbox", S_IRUSR, dir,
proc_show_mbox, adapter);
#if MEGA_HAVE_ENH_PROC
proc_create_single_data("rebuild-rate", S_IRUSR, dir,
proc_show_rebuild_rate, adapter);
proc_create_single_data("battery-status", S_IRUSR, dir,
proc_show_battery, adapter);
proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
proc_show_pdrv_ch0, adapter);
proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
proc_show_pdrv_ch1, adapter);
proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
proc_show_pdrv_ch2, adapter);
proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
proc_show_pdrv_ch3, adapter);
proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
proc_show_rdrv_10, adapter);
proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
proc_show_rdrv_20, adapter);
proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
proc_show_rdrv_30, adapter);
proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
proc_show_rdrv_40, adapter);
#endif
}
#else
static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
{
}
#endif
/*
* megaraid_biosparam()
*
* Return the disk geometry for a particular disk
*/
static int
megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[])
{
adapter_t *adapter;
int heads;
int sectors;
int cylinders;
/* Get pointer to host config structure */
adapter = (adapter_t *)sdev->host->hostdata;
if (IS_RAID_CH(adapter, sdev->channel)) {
/* Default heads (64) & sectors (32) */
heads = 64;
sectors = 32;
cylinders = (ulong)capacity / (heads * sectors);
/*
* Handle extended translation size for logical drives
* > 1Gb
*/
if ((ulong)capacity >= 0x200000) {
heads = 255;
sectors = 63;
cylinders = (ulong)capacity / (heads * sectors);
}
/* return result */
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
}
else {
if (scsi_partsize(bdev, capacity, geom))
return 0;
dev_info(&adapter->dev->dev,
"invalid partition on this disk on channel %d\n",
sdev->channel);
/* Default heads (64) & sectors (32) */
heads = 64;
sectors = 32;
cylinders = (ulong)capacity / (heads * sectors);
/* Handle extended translation size for logical drives > 1Gb */
if ((ulong)capacity >= 0x200000) {
heads = 255;
sectors = 63;
cylinders = (ulong)capacity / (heads * sectors);
}
/* return result */
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
}
return 0;
}
/**
* mega_init_scb()
* @adapter: pointer to our soft state
*
* Allocate memory for the various pointers in the scb structures:
* scatter-gather list pointer, passthru and extended passthru structure
* pointers.
*/
static int
mega_init_scb(adapter_t *adapter)
{
scb_t *scb;
int i;
for( i = 0; i < adapter->max_cmds; i++ ) {
scb = &adapter->scb_list[i];
scb->sgl64 = NULL;
scb->sgl = NULL;
scb->pthru = NULL;
scb->epthru = NULL;
}
for( i = 0; i < adapter->max_cmds; i++ ) {
scb = &adapter->scb_list[i];
scb->idx = i;
scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
sizeof(mega_sgl64) * adapter->sglen,
&scb->sgl_dma_addr, GFP_KERNEL);
scb->sgl = (mega_sglist *)scb->sgl64;
if( !scb->sgl ) {
dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
mega_free_sgl(adapter);
return -1;
}
scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
sizeof(mega_passthru),
&scb->pthru_dma_addr, GFP_KERNEL);
if( !scb->pthru ) {
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
mega_free_sgl(adapter);
return -1;
}
scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
sizeof(mega_ext_passthru),
&scb->epthru_dma_addr, GFP_KERNEL);
if( !scb->epthru ) {
dev_warn(&adapter->dev->dev,
"Can't allocate extended passthru\n");
mega_free_sgl(adapter);
return -1;
}
scb->dma_type = MEGA_DMA_TYPE_NONE;
/*
* Link to free list
* lock not required since we are loading the driver, so no
* commands possible right now.
*/
scb->state = SCB_FREE;
scb->cmd = NULL;
list_add(&scb->list, &adapter->free_list);
}
return 0;
}
/**
* megadev_open()
* @inode: unused
* @filep: unused
*
* Routines for the character/ioctl interface to the driver. Find out if this
* is a valid open.
*/
static int
megadev_open (struct inode *inode, struct file *filep)
{
/*
* Only allow superuser to access private ioctl interface
*/
if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
return 0;
}
/**
* megadev_ioctl()
* @filep: Our device file
* @cmd: ioctl command
* @arg: user buffer
*
* ioctl entry point for our private ioctl interface. We move the data in from
* the user space, prepare the command (if necessary, convert the old MIMD
* ioctl to new ioctl command), and issue a synchronous command to the
* controller.
*/
static int
megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
adapter_t *adapter;
nitioctl_t uioc;
int adapno;
int rval;
mega_passthru __user *upthru; /* user address for passthru */
mega_passthru *pthru; /* copy user passthru here */
dma_addr_t pthru_dma_hndl;
void *data = NULL; /* data to be transferred */
dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
megacmd_t mc;
#if MEGA_HAVE_STATS
megastat_t __user *ustats = NULL;
int num_ldrv = 0;
#endif
u32 uxferaddr = 0;
struct pci_dev *pdev;
/*
* Make sure only USCSICMD are issued through this interface.
* MIMD application would still fire different command.
*/
if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
return -EINVAL;
}
/*
* Check and convert a possible MIMD command to NIT command.
* mega_m_to_n() copies the data from the user space, so we do not
* have to do it here.
* NOTE: We will need some user address to copyout the data, therefore
* the inteface layer will also provide us with the required user
* addresses.
*/
memset(&uioc, 0, sizeof(nitioctl_t));
if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
return rval;
switch( uioc.opcode ) {
case GET_DRIVER_VER:
if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
return (-EFAULT);
break;
case GET_N_ADAP:
if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
return (-EFAULT);
/*
* Shucks. MIMD interface returns a positive value for number
* of adapters. TODO: Change it to return 0 when there is no
* applicatio using mimd interface.
*/
return hba_count;
case GET_ADAP_INFO:
/*
* Which adapter
*/
if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
return (-ENODEV);
if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
sizeof(struct mcontroller)) )
return (-EFAULT);
break;
#if MEGA_HAVE_STATS
case GET_STATS:
/*
* Which adapter
*/
if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
return (-ENODEV);
adapter = hba_soft_state[adapno];
ustats = uioc.uioc_uaddr;
if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
return (-EFAULT);
/*
* Check for the validity of the logical drive number
*/
if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
if( copy_to_user(ustats->nreads, adapter->nreads,
num_ldrv*sizeof(u32)) )
return -EFAULT;
if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
num_ldrv*sizeof(u32)) )
return -EFAULT;
if( copy_to_user(ustats->nwrites, adapter->nwrites,
num_ldrv*sizeof(u32)) )
return -EFAULT;
if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
num_ldrv*sizeof(u32)) )
return -EFAULT;
if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
num_ldrv*sizeof(u32)) )
return -EFAULT;
if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
num_ldrv*sizeof(u32)) )
return -EFAULT;
return 0;
#endif
case MBOX_CMD:
/*
* Which adapter
*/
if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
return (-ENODEV);
adapter = hba_soft_state[adapno];
/*
* Deletion of logical drive is a special case. The adapter
* should be quiescent before this command is issued.
*/
if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
/*
* Do we support this feature
*/
if( !adapter->support_random_del ) {
dev_warn(&adapter->dev->dev, "logdrv "
"delete on non-supporting F/W\n");
return (-EINVAL);
}
rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
if( rval == 0 ) {
memset(&mc, 0, sizeof(megacmd_t));
mc.status = rval;
rval = mega_n_to_m((void __user *)arg, &mc);
}
return rval;
}
/*
* This interface only support the regular passthru commands.
* Reject extended passthru and 64-bit passthru
*/
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
dev_warn(&adapter->dev->dev, "rejected passthru\n");
return (-EINVAL);
}
/*
* For all internal commands, the buffer must be allocated in
* <4GB address range
*/
if( make_local_pdev(adapter, &pdev) != 0 )
return -EIO;
/* Is it a passthru command or a DCMD */
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
/* Passthru commands */
pthru = dma_alloc_coherent(&pdev->dev,
sizeof(mega_passthru),
&pthru_dma_hndl, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
return (-ENOMEM);
}
/*
* The user passthru structure
*/
upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
/*
* Copy in the user passthru here.
*/
if( copy_from_user(pthru, upthru,
sizeof(mega_passthru)) ) {
dma_free_coherent(&pdev->dev,
sizeof(mega_passthru),
pthru, pthru_dma_hndl);
free_local_pdev(pdev);
return (-EFAULT);
}
/*
* Is there a data transfer
*/
if( pthru->dataxferlen ) {
data = dma_alloc_coherent(&pdev->dev,
pthru->dataxferlen,
&data_dma_hndl,
GFP_KERNEL);
if( data == NULL ) {
dma_free_coherent(&pdev->dev,
sizeof(mega_passthru),
pthru,
pthru_dma_hndl);
free_local_pdev(pdev);
return (-ENOMEM);
}
/*
* Save the user address and point the kernel
* address at just allocated memory
*/
uxferaddr = pthru->dataxferaddr;
pthru->dataxferaddr = data_dma_hndl;
}
/*
* Is data coming down-stream
*/
if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
/*
* Get the user data
*/
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
pthru->dataxferlen) ) {
rval = (-EFAULT);
goto freemem_and_return;
}
}
memset(&mc, 0, sizeof(megacmd_t));
mc.cmd = MEGA_MBOXCMD_PASSTHRU;
mc.xferaddr = (u32)pthru_dma_hndl;
/*
* Issue the command
*/
mega_internal_command(adapter, &mc, pthru);
rval = mega_n_to_m((void __user *)arg, &mc);
if( rval ) goto freemem_and_return;
/*
* Is data going up-stream
*/
if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
pthru->dataxferlen) ) {
rval = (-EFAULT);
}
}
/*
* Send the request sense data also, irrespective of
* whether the user has asked for it or not.
*/
if (copy_to_user(upthru->reqsensearea,
pthru->reqsensearea, 14))
rval = -EFAULT;
freemem_and_return:
if( pthru->dataxferlen ) {
dma_free_coherent(&pdev->dev,
pthru->dataxferlen, data,
data_dma_hndl);
}
dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
pthru, pthru_dma_hndl);
free_local_pdev(pdev);
return rval;
}
else {
/* DCMD commands */
/*
* Is there a data transfer
*/
if( uioc.xferlen ) {
data = dma_alloc_coherent(&pdev->dev,
uioc.xferlen,
&data_dma_hndl,
GFP_KERNEL);
if( data == NULL ) {
free_local_pdev(pdev);
return (-ENOMEM);
}
uxferaddr = MBOX(uioc)->xferaddr;
}
/*
* Is data coming down-stream
*/
if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
/*
* Get the user data
*/
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
dma_free_coherent(&pdev->dev,
uioc.xferlen, data,
data_dma_hndl);
free_local_pdev(pdev);
return (-EFAULT);
}
}
memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
mc.xferaddr = (u32)data_dma_hndl;
/*
* Issue the command
*/
mega_internal_command(adapter, &mc, NULL);
rval = mega_n_to_m((void __user *)arg, &mc);
if( rval ) {
if( uioc.xferlen ) {
dma_free_coherent(&pdev->dev,
uioc.xferlen, data,
data_dma_hndl);
}
free_local_pdev(pdev);
return rval;
}
/*
* Is data going up-stream
*/
if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
uioc.xferlen) ) {
rval = (-EFAULT);
}
}
if( uioc.xferlen ) {
dma_free_coherent(&pdev->dev, uioc.xferlen,
data, data_dma_hndl);
}
free_local_pdev(pdev);
return rval;
}
default:
return (-EINVAL);
}
return 0;
}
static long
megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
int ret;
mutex_lock(&megadev_mutex);
ret = megadev_ioctl(filep, cmd, arg);
mutex_unlock(&megadev_mutex);
return ret;
}
/**
* mega_m_to_n()
* @arg: user address
* @uioc: new ioctl structure
*
* A thin layer to convert older mimd interface ioctl structure to NIT ioctl
* structure
*
* Converts the older mimd ioctl structure to newer NIT structure
*/
static int
mega_m_to_n(void __user *arg, nitioctl_t *uioc)
{
struct uioctl_t uioc_mimd;
char signature[8] = {0};
u8 opcode;
u8 subopcode;
/*
* check is the application conforms to NIT. We do not have to do much
* in that case.
* We exploit the fact that the signature is stored in the very
* beginning of the structure.
*/
if( copy_from_user(signature, arg, 7) )
return (-EFAULT);
if( memcmp(signature, "MEGANIT", 7) == 0 ) {
/*
* NOTE NOTE: The nit ioctl is still under flux because of
* change of mailbox definition, in HPE. No applications yet
* use this interface and let's not have applications use this
* interface till the new specifitions are in place.
*/
return -EINVAL;
#if 0
if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
return (-EFAULT);
return 0;
#endif
}
/*
* Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
*
* Get the user ioctl structure
*/
if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
return (-EFAULT);
/*
* Get the opcode and subopcode for the commands
*/
opcode = uioc_mimd.ui.fcs.opcode;
subopcode = uioc_mimd.ui.fcs.subopcode;
switch (opcode) {
case 0x82:
switch (subopcode) {
case MEGAIOC_QDRVRVER: /* Query driver version */
uioc->opcode = GET_DRIVER_VER;
uioc->uioc_uaddr = uioc_mimd.data;
break;
case MEGAIOC_QNADAP: /* Get # of adapters */
uioc->opcode = GET_N_ADAP;
uioc->uioc_uaddr = uioc_mimd.data;
break;
case MEGAIOC_QADAPINFO: /* Get adapter information */
uioc->opcode = GET_ADAP_INFO;
uioc->adapno = uioc_mimd.ui.fcs.adapno;
uioc->uioc_uaddr = uioc_mimd.data;
break;
default:
return(-EINVAL);
}
break;
case 0x81:
uioc->opcode = MBOX_CMD;
uioc->adapno = uioc_mimd.ui.fcs.adapno;
memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
uioc->xferlen = uioc_mimd.ui.fcs.length;
if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
break;
case 0x80:
uioc->opcode = MBOX_CMD;
uioc->adapno = uioc_mimd.ui.fcs.adapno;
memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
/*
* Choose the xferlen bigger of input and output data
*/
uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
uioc_mimd.outlen : uioc_mimd.inlen;
if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
break;
default:
return (-EINVAL);
}
return 0;
}
/*
* mega_n_to_m()
* @arg: user address
* @mc: mailbox command
*
* Updates the status information to the application, depending on application
* conforms to older mimd ioctl interface or newer NIT ioctl interface
*/
static int
mega_n_to_m(void __user *arg, megacmd_t *mc)
{
nitioctl_t __user *uiocp;
megacmd_t __user *umc;
mega_passthru __user *upthru;
struct uioctl_t __user *uioc_mimd;
char signature[8] = {0};
/*
* check is the application conforms to NIT.
*/
if( copy_from_user(signature, arg, 7) )
return -EFAULT;
if( memcmp(signature, "MEGANIT", 7) == 0 ) {
uiocp = arg;
if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
return (-EFAULT);
if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
umc = MBOX_P(uiocp);
if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
return -EFAULT;
if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
return (-EFAULT);
}
}
else {
uioc_mimd = arg;
if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
return (-EFAULT);
if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
umc = (megacmd_t __user *)uioc_mimd->mbox;
if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
return (-EFAULT);
if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
return (-EFAULT);
}
}
return 0;
}
/*
* MEGARAID 'FW' commands.
*/
/**
* mega_is_bios_enabled()
* @adapter: pointer to our soft state
*
* issue command to find out if the BIOS is enabled for this controller
*/
static int
mega_is_bios_enabled(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = IS_BIOS_ENABLED;
raw_mbox[2] = GET_BIOS;
issue_scb_block(adapter, raw_mbox);
return *(char *)adapter->mega_buffer;
}
/**
* mega_enum_raid_scsi()
* @adapter: pointer to our soft state
*
* Find out what channels are RAID/SCSI. This information is used to
* differentiate the virtual channels and physical channels and to support
* ROMB feature and non-disk devices.
*/
static void
mega_enum_raid_scsi(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
int i;
memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out what channels are raid/scsi
*/
raw_mbox[0] = CHNL_CLASS;
raw_mbox[2] = GET_CHNL_CLASS;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Non-ROMB firmware fail this command, so all channels
* must be shown RAID
*/
adapter->mega_ch_class = 0xFF;
if(!issue_scb_block(adapter, raw_mbox)) {
adapter->mega_ch_class = *((char *)adapter->mega_buffer);
}
for( i = 0; i < adapter->product_info.nchannels; i++ ) {
if( (adapter->mega_ch_class >> i) & 0x01 ) {
dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
i);
}
else {
dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
i);
}
}
return;
}
/**
* mega_get_boot_drv()
* @adapter: pointer to our soft state
*
* Find out which device is the boot device. Note, any logical drive or any
* phyical device (e.g., a CDROM) can be designated as a boot device.
*/
static void
mega_get_boot_drv(adapter_t *adapter)
{
struct private_bios_data *prv_bios_data;
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
u16 cksum = 0;
u8 *cksum_p;
u8 boot_pdrv;
int i;
memset(&mbox, 0, sizeof(mbox));
raw_mbox[0] = BIOS_PVT_DATA;
raw_mbox[2] = GET_BIOS_PVT_DATA;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
mbox.xferaddr = (u32)adapter->buf_dma_handle;
adapter->boot_ldrv_enabled = 0;
adapter->boot_ldrv = 0;
adapter->boot_pdrv_enabled = 0;
adapter->boot_pdrv_ch = 0;
adapter->boot_pdrv_tgt = 0;
if(issue_scb_block(adapter, raw_mbox) == 0) {
prv_bios_data =
(struct private_bios_data *)adapter->mega_buffer;
cksum = 0;
cksum_p = (char *)prv_bios_data;
for (i = 0; i < 14; i++ ) {
cksum += (u16)(*cksum_p++);
}
if (prv_bios_data->cksum == (u16)(0-cksum) ) {
/*
* If MSB is set, a physical drive is set as boot
* device
*/
if( prv_bios_data->boot_drv & 0x80 ) {
adapter->boot_pdrv_enabled = 1;
boot_pdrv = prv_bios_data->boot_drv & 0x7F;
adapter->boot_pdrv_ch = boot_pdrv / 16;
adapter->boot_pdrv_tgt = boot_pdrv % 16;
}
else {
adapter->boot_ldrv_enabled = 1;
adapter->boot_ldrv = prv_bios_data->boot_drv;
}
}
}
}
/**
* mega_support_random_del()
* @adapter: pointer to our soft state
*
* Find out if this controller supports random deletion and addition of
* logical drives
*/
static int
mega_support_random_del(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
memset(&mbox, 0, sizeof(mbox));
/*
* issue command
*/
raw_mbox[0] = FC_DEL_LOGDRV;
raw_mbox[2] = OP_SUP_DEL_LOGDRV;
rval = issue_scb_block(adapter, raw_mbox);
return !rval;
}
/**
* mega_support_ext_cdb()
* @adapter: pointer to our soft state
*
* Find out if this firmware support cdblen > 10
*/
static int
mega_support_ext_cdb(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out if controller supports extended CDBs.
*/
raw_mbox[0] = 0xA4;
raw_mbox[2] = 0x16;
rval = issue_scb_block(adapter, raw_mbox);
return !rval;
}
/**
* mega_del_logdrv()
* @adapter: pointer to our soft state
* @logdrv: logical drive to be deleted
*
* Delete the specified logical drive. It is the responsibility of the user
* app to let the OS know about this operation.
*/
static int
mega_del_logdrv(adapter_t *adapter, int logdrv)
{
unsigned long flags;
scb_t *scb;
int rval;
/*
* Stop sending commands to the controller, queue them internally.
* When deletion is complete, ISR will flush the queue.
*/
atomic_set(&adapter->quiescent, 1);
/*
* Wait till all the issued commands are complete and there are no
* commands in the pending queue
*/
while (atomic_read(&adapter->pend_cmds) > 0 ||
!list_empty(&adapter->pending_list))
msleep(1000); /* sleep for 1s */
rval = mega_do_del_logdrv(adapter, logdrv);
spin_lock_irqsave(&adapter->lock, flags);
/*
* If delete operation was successful, add 0x80 to the logical drive
* ids for commands in the pending queue.
*/
if (adapter->read_ldidmap) {
struct list_head *pos;
list_for_each(pos, &adapter->pending_list) {
scb = list_entry(pos, scb_t, list);
if (scb->pthru->logdrv < 0x80 )
scb->pthru->logdrv += 0x80;
}
}
atomic_set(&adapter->quiescent, 0);
mega_runpendq(adapter);
spin_unlock_irqrestore(&adapter->lock, flags);
return rval;
}
static int
mega_do_del_logdrv(adapter_t *adapter, int logdrv)
{
megacmd_t mc;
int rval;
memset( &mc, 0, sizeof(megacmd_t));
mc.cmd = FC_DEL_LOGDRV;
mc.opcode = OP_DEL_LOGDRV;
mc.subopcode = logdrv;
rval = mega_internal_command(adapter, &mc, NULL);
/* log this event */
if(rval) {
dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
return rval;
}
/*
* After deleting first logical drive, the logical drives must be
* addressed by adding 0x80 to the logical drive id.
*/
adapter->read_ldidmap = 1;
return rval;
}
/**
* mega_get_max_sgl()
* @adapter: pointer to our soft state
*
* Find out the maximum number of scatter-gather elements supported by this
* version of the firmware
*/
static void
mega_get_max_sgl(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
if( issue_scb_block(adapter, raw_mbox) ) {
/*
* f/w does not support this command. Choose the default value
*/
adapter->sglen = MIN_SGLIST;
}
else {
adapter->sglen = *((char *)adapter->mega_buffer);
/*
* Make sure this is not more than the resources we are
* planning to allocate
*/
if ( adapter->sglen > MAX_SGLIST )
adapter->sglen = MAX_SGLIST;
}
return;
}
/**
* mega_support_cluster()
* @adapter: pointer to our soft state
*
* Find out if this firmware support cluster calls.
*/
static int
mega_support_cluster(adapter_t *adapter)
{
struct mbox_out mbox;
unsigned char *raw_mbox = (u8 *)&mbox;
memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Try to get the initiator id. This command will succeed iff the
* clustering is available on this HBA.
*/
raw_mbox[0] = MEGA_GET_TARGET_ID;
if( issue_scb_block(adapter, raw_mbox) == 0 ) {
/*
* Cluster support available. Get the initiator target id.
* Tell our id to mid-layer too.
*/
adapter->this_id = *(u32 *)adapter->mega_buffer;
adapter->host->this_id = adapter->this_id;
return 1;
}
return 0;
}
#ifdef CONFIG_PROC_FS
/**
* mega_adapinq()
* @adapter: pointer to our soft state
* @dma_handle: DMA address of the buffer
*
* Issue internal commands while interrupts are available.
* We only issue direct mailbox commands from within the driver. ioctl()
* interface using these routines can issue passthru commands.
*/
static int
mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
{
megacmd_t mc;
memset(&mc, 0, sizeof(megacmd_t));
if( adapter->flag & BOARD_40LD ) {
mc.cmd = FC_NEW_CONFIG;
mc.opcode = NC_SUBOP_ENQUIRY3;
mc.subopcode = ENQ3_GET_SOLICITED_FULL;
}
else {
mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
}
mc.xferaddr = (u32)dma_handle;
if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
return -1;
}
return 0;
}
/**
* mega_internal_dev_inquiry()
* @adapter: pointer to our soft state
* @ch: channel for this device
* @tgt: ID of this device
* @buf_dma_handle: DMA address of the buffer
*
* Issue the scsi inquiry for the specified device.
*/
static int
mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
dma_addr_t buf_dma_handle)
{
mega_passthru *pthru;
dma_addr_t pthru_dma_handle;
megacmd_t mc;
int rval;
struct pci_dev *pdev;
/*
* For all internal commands, the buffer must be allocated in <4GB
* address range
*/
if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
&pthru_dma_handle, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
return -1;
}
pthru->timeout = 2;
pthru->ars = 1;
pthru->reqsenselen = 14;
pthru->islogical = 0;
pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
pthru->cdblen = 6;
pthru->cdb[0] = INQUIRY;
pthru->cdb[1] = 0;
pthru->cdb[2] = 0;
pthru->cdb[3] = 0;
pthru->cdb[4] = 255;
pthru->cdb[5] = 0;
pthru->dataxferaddr = (u32)buf_dma_handle;
pthru->dataxferlen = 256;
memset(&mc, 0, sizeof(megacmd_t));
mc.cmd = MEGA_MBOXCMD_PASSTHRU;
mc.xferaddr = (u32)pthru_dma_handle;
rval = mega_internal_command(adapter, &mc, pthru);
dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
pthru_dma_handle);
free_local_pdev(pdev);
return rval;
}
#endif
/**
* mega_internal_command()
* @adapter: pointer to our soft state
* @mc: the mailbox command
* @pthru: Passthru structure for DCDB commands
*
* Issue the internal commands in interrupt mode.
* The last argument is the address of the passthru structure if the command
* to be fired is a passthru command
*
* Note: parameter 'pthru' is null for non-passthru commands.
*/
static int
mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
{
unsigned long flags;
scb_t *scb;
int rval;
/*
* The internal commands share one command id and hence are
* serialized. This is so because we want to reserve maximum number of
* available command ids for the I/O commands.
*/
mutex_lock(&adapter->int_mtx);
scb = &adapter->int_scb;
memset(scb, 0, sizeof(scb_t));
scb->idx = CMDID_INT_CMDS;
scb->state |= SCB_ACTIVE | SCB_PENDQ;
memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
/*
* Is it a passthru command
*/
if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
scb->pthru = pthru;
spin_lock_irqsave(&adapter->lock, flags);
list_add_tail(&scb->list, &adapter->pending_list);
/*
* Check if the HBA is in quiescent state, e.g., during a
* delete logical drive opertion. If it is, don't run
* the pending_list.
*/
if (atomic_read(&adapter->quiescent) == 0)
mega_runpendq(adapter);
spin_unlock_irqrestore(&adapter->lock, flags);
wait_for_completion(&adapter->int_waitq);
mc->status = rval = adapter->int_status;
/*
* Print a debug message for all failed commands. Applications can use
* this information.
*/
if (rval && trace_level) {
dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
mc->cmd, mc->opcode, mc->subopcode, rval);
}
mutex_unlock(&adapter->int_mtx);
return rval;
}
static const struct scsi_host_template megaraid_template = {
.module = THIS_MODULE,
.name = "MegaRAID",
.proc_name = "megaraid_legacy",
.info = megaraid_info,
.queuecommand = megaraid_queue,
.bios_param = megaraid_biosparam,
.max_sectors = MAX_SECTORS_PER_IO,
.can_queue = MAX_COMMANDS,
.this_id = DEFAULT_INITIATOR_ID,
.sg_tablesize = MAX_SGLIST,
.cmd_per_lun = DEF_CMD_PER_LUN,
.eh_abort_handler = megaraid_abort,
.eh_host_reset_handler = megaraid_reset,
.no_write_same = 1,
.cmd_size = sizeof(struct megaraid_cmd_priv),
};
static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
adapter_t *adapter;
unsigned long mega_baseport, tbase, flag = 0;
u16 subsysid, subsysvid;
u8 pci_bus, pci_dev_func;
int irq, i, j;
int error = -ENODEV;
if (hba_count >= MAX_CONTROLLERS)
goto out;
if (pci_enable_device(pdev))
goto out;
pci_set_master(pdev);
pci_bus = pdev->bus->number;
pci_dev_func = pdev->devfn;
/*
* The megaraid3 stuff reports the ID of the Intel part which is not
* remotely specific to the megaraid
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
u16 magic;
/*
* Don't fall over the Compaq management cards using the same
* PCI identifier
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
pdev->subsystem_device == 0xC000)
goto out_disable_device;
/* Now check the magic signature byte */
pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
goto out_disable_device;
/* Ok it is probably a megaraid */
}
/*
* For these vendor and device ids, signature offsets are not
* valid and 64 bit is implicit
*/
if (id->driver_data & BOARD_64BIT)
flag |= BOARD_64BIT;
else {
u32 magic64;
pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
if (magic64 == HBA_SIGNATURE_64BIT)
flag |= BOARD_64BIT;
}
subsysvid = pdev->subsystem_vendor;
subsysid = pdev->subsystem_device;
dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
id->vendor, id->device);
/* Read the base port and IRQ from PCI */
mega_baseport = pci_resource_start(pdev, 0);
irq = pdev->irq;
tbase = mega_baseport;
if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
flag |= BOARD_MEMMAP;
if (!request_mem_region(mega_baseport, 128, "megaraid")) {
dev_warn(&pdev->dev, "mem region busy!\n");
goto out_disable_device;
}
mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
if (!mega_baseport) {
dev_warn(&pdev->dev, "could not map hba memory\n");
goto out_release_region;
}
} else {
flag |= BOARD_IOMAP;
mega_baseport += 0x10;
if (!request_region(mega_baseport, 16, "megaraid"))
goto out_disable_device;
}
/* Initialize SCSI Host structure */
host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
if (!host)
goto out_iounmap;
adapter = (adapter_t *)host->hostdata;
memset(adapter, 0, sizeof(adapter_t));
dev_notice(&pdev->dev,
"scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
host->host_no, mega_baseport, irq);
adapter->base = mega_baseport;
if (flag & BOARD_MEMMAP)
adapter->mmio_base = (void __iomem *) mega_baseport;
INIT_LIST_HEAD(&adapter->free_list);
INIT_LIST_HEAD(&adapter->pending_list);
INIT_LIST_HEAD(&adapter->completed_list);
adapter->flag = flag;
spin_lock_init(&adapter->lock);
host->cmd_per_lun = max_cmd_per_lun;
host->max_sectors = max_sectors_per_io;
adapter->dev = pdev;
adapter->host = host;
adapter->host->irq = irq;
if (flag & BOARD_MEMMAP)
adapter->host->base = tbase;
else {
adapter->host->io_port = tbase;
adapter->host->n_io_port = 16;
}
adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
/*
* Allocate buffer to issue internal commands.
*/
adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
MEGA_BUFFER_SIZE,
&adapter->buf_dma_handle,
GFP_KERNEL);
if (!adapter->mega_buffer) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_host_put;
}
adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
GFP_KERNEL);
if (!adapter->scb_list) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_free_cmd_buffer;
}
if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
megaraid_isr_memmapped : megaraid_isr_iomapped,
IRQF_SHARED, "megaraid", adapter)) {
dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
goto out_free_scb_list;
}
if (mega_setup_mailbox(adapter))
goto out_free_irq;
if (mega_query_adapter(adapter))
goto out_free_mbox;
/*
* Have checks for some buggy f/w
*/
if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
/*
* Which firmware
*/
if (!strcmp(adapter->fw_version, "3.00") ||
!strcmp(adapter->fw_version, "3.01")) {
dev_warn(&pdev->dev,
"Your card is a Dell PERC "
"2/SC RAID controller with "
"firmware\nmegaraid: 3.00 or 3.01. "
"This driver is known to have "
"corruption issues\nmegaraid: with "
"those firmware versions on this "
"specific card. In order\nmegaraid: "
"to protect your data, please upgrade "
"your firmware to version\nmegaraid: "
"3.10 or later, available from the "
"Dell Technical Support web\n"
"megaraid: site at\nhttp://support."
"dell.com/us/en/filelib/download/"
"index.asp?fileid=2940\n"
);
}
}
/*
* If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
* firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
* support, since this firmware cannot handle 64 bit
* addressing
*/
if ((subsysvid == PCI_VENDOR_ID_HP) &&
((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
/*
* which firmware
*/
if (!strcmp(adapter->fw_version, "H01.07") ||
!strcmp(adapter->fw_version, "H01.08") ||
!strcmp(adapter->fw_version, "H01.09") ) {
dev_warn(&pdev->dev,
"Firmware H.01.07, "
"H.01.08, and H.01.09 on 1M/2M "
"controllers\n"
"do not support 64 bit "
"addressing.\nDISABLING "
"64 bit support.\n");
adapter->flag &= ~BOARD_64BIT;
}
}
if (mega_is_bios_enabled(adapter))
mega_hbas[hba_count].is_bios_enabled = 1;
mega_hbas[hba_count].hostdata_addr = adapter;
/*
* Find out which channel is raid and which is scsi. This is
* for ROMB support.
*/
mega_enum_raid_scsi(adapter);
/*
* Find out if a logical drive is set as the boot drive. If
* there is one, will make that as the first logical drive.
* ROMB: Do we have to boot from a physical drive. Then all
* the physical drives would appear before the logical disks.
* Else, all the physical drives would be exported to the mid
* layer after logical drives.
*/
mega_get_boot_drv(adapter);
if (adapter->boot_pdrv_enabled) {
j = adapter->product_info.nchannels;
for( i = 0; i < j; i++ )
adapter->logdrv_chan[i] = 0;
for( i = j; i < NVIRT_CHAN + j; i++ )
adapter->logdrv_chan[i] = 1;
} else {
for (i = 0; i < NVIRT_CHAN; i++)
adapter->logdrv_chan[i] = 1;
for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
adapter->logdrv_chan[i] = 0;
adapter->mega_ch_class <<= NVIRT_CHAN;
}
/*
* Do we support random deletion and addition of logical
* drives
*/
adapter->read_ldidmap = 0; /* set it after first logdrv
delete cmd */
adapter->support_random_del = mega_support_random_del(adapter);
/* Initialize SCBs */
if (mega_init_scb(adapter))
goto out_free_mbox;
/*
* Reset the pending commands counter
*/
atomic_set(&adapter->pend_cmds, 0);
/*
* Reset the adapter quiescent flag
*/
atomic_set(&adapter->quiescent, 0);
hba_soft_state[hba_count] = adapter;
/*
* Fill in the structure which needs to be passed back to the
* application when it does an ioctl() for controller related
* information.
*/
i = hba_count;
mcontroller[i].base = mega_baseport;
mcontroller[i].irq = irq;
mcontroller[i].numldrv = adapter->numldrv;
mcontroller[i].pcibus = pci_bus;
mcontroller[i].pcidev = id->device;
mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
mcontroller[i].pciid = -1;
mcontroller[i].pcivendor = id->vendor;
mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
/* Set the Mode of addressing to 64 bit if we can */
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
adapter->has_64bit_addr = 1;
} else {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
adapter->has_64bit_addr = 0;
}
mutex_init(&adapter->int_mtx);
init_completion(&adapter->int_waitq);
adapter->this_id = DEFAULT_INITIATOR_ID;
adapter->host->this_id = DEFAULT_INITIATOR_ID;
#if MEGA_HAVE_CLUSTERING
/*
* Is cluster support enabled on this controller
* Note: In a cluster the HBAs ( the initiators ) will have
* different target IDs and we cannot assume it to be 7. Call
* to mega_support_cluster() will get the target ids also if
* the cluster support is available
*/
adapter->has_cluster = mega_support_cluster(adapter);
if (adapter->has_cluster) {
dev_notice(&pdev->dev,
"Cluster driver, initiator id:%d\n",
adapter->this_id);
}
#endif
pci_set_drvdata(pdev, host);
mega_create_proc_entry(hba_count, mega_proc_dir_entry);
error = scsi_add_host(host, &pdev->dev);
if (error)
goto out_free_mbox;
scsi_scan_host(host);
hba_count++;
return 0;
out_free_mbox:
dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
adapter->una_mbox64, adapter->una_mbox64_dma);
out_free_irq:
free_irq(adapter->host->irq, adapter);
out_free_scb_list:
kfree(adapter->scb_list);
out_free_cmd_buffer:
dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
adapter->mega_buffer, adapter->buf_dma_handle);
out_host_put:
scsi_host_put(host);
out_iounmap:
if (flag & BOARD_MEMMAP)
iounmap((void *)mega_baseport);
out_release_region:
if (flag & BOARD_MEMMAP)
release_mem_region(tbase, 128);
else
release_region(mega_baseport, 16);
out_disable_device:
pci_disable_device(pdev);
out:
return error;
}
static void
__megaraid_shutdown(adapter_t *adapter)
{
u_char raw_mbox[sizeof(struct mbox_out)];
mbox_t *mbox = (mbox_t *)raw_mbox;
int i;
/* Flush adapter cache */
memset(&mbox->m_out, 0, sizeof(raw_mbox));
raw_mbox[0] = FLUSH_ADAPTER;
free_irq(adapter->host->irq, adapter);
/* Issue a blocking (interrupts disabled) command to the card */
issue_scb_block(adapter, raw_mbox);
/* Flush disks cache */
memset(&mbox->m_out, 0, sizeof(raw_mbox));
raw_mbox[0] = FLUSH_SYSTEM;
/* Issue a blocking (interrupts disabled) command to the card */
issue_scb_block(adapter, raw_mbox);
if (atomic_read(&adapter->pend_cmds) > 0)
dev_warn(&adapter->dev->dev, "pending commands!!\n");
/*
* Have a delibrate delay to make sure all the caches are
* actually flushed.
*/
for (i = 0; i <= 10; i++)
mdelay(1000);
}
static void
megaraid_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
adapter_t *adapter = (adapter_t *)host->hostdata;
char buf[12] = { 0 };
scsi_remove_host(host);
__megaraid_shutdown(adapter);
/* Free our resources */
if (adapter->flag & BOARD_MEMMAP) {
iounmap((void *)adapter->base);
release_mem_region(adapter->host->base, 128);
} else
release_region(adapter->base, 16);
mega_free_sgl(adapter);
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_subtree(buf, mega_proc_dir_entry);
dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
adapter->mega_buffer, adapter->buf_dma_handle);
kfree(adapter->scb_list);
dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
adapter->una_mbox64, adapter->una_mbox64_dma);
scsi_host_put(host);
pci_disable_device(pdev);
hba_count--;
}
static void
megaraid_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
adapter_t *adapter = (adapter_t *)host->hostdata;
__megaraid_shutdown(adapter);
}
static struct pci_device_id megaraid_pci_tbl[] = {
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
static struct pci_driver megaraid_pci_driver = {
.name = "megaraid_legacy",
.id_table = megaraid_pci_tbl,
.probe = megaraid_probe_one,
.remove = megaraid_remove_one,
.shutdown = megaraid_shutdown,
};
static int __init megaraid_init(void)
{
int error;
if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
max_cmd_per_lun = MAX_CMD_PER_LUN;
if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
max_mbox_busy_wait = MBOX_BUSY_WAIT;
#ifdef CONFIG_PROC_FS
mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
if (!mega_proc_dir_entry) {
printk(KERN_WARNING
"megaraid: failed to create megaraid root\n");
}
#endif
error = pci_register_driver(&megaraid_pci_driver);
if (error) {
#ifdef CONFIG_PROC_FS
remove_proc_entry("megaraid", NULL);
#endif
return error;
}
/*
* Register the driver as a character device, for applications
* to access it for ioctls.
* First argument (major) to register_chrdev implies a dynamic
* major number allocation.
*/
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
if (major < 0) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
}
return 0;
}
static void __exit megaraid_exit(void)
{
/*
* Unregister the character device interface to the driver.
*/
unregister_chrdev(major, "megadev_legacy");
pci_unregister_driver(&megaraid_pci_driver);
#ifdef CONFIG_PROC_FS
remove_proc_entry("megaraid", NULL);
#endif
}
module_init(megaraid_init);
module_exit(megaraid_exit);
/* vi: set ts=8 sw=8 tw=78: */
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef __ISYS_IRQ_LOCAL_H__
#define __ISYS_IRQ_LOCAL_H__
#include <type_support.h>
typedef struct isys_irqc_state_s isys_irqc_state_t;
struct isys_irqc_state_s {
hrt_data edge;
hrt_data mask;
hrt_data status;
hrt_data enable;
hrt_data level_no;
/*hrt_data clear; */ /* write-only register */
};
#endif /* __ISYS_IRQ_LOCAL_H__ */
|
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU13_DRIVER_IF_SMU_13_0_7_H
#define SMU13_DRIVER_IF_SMU_13_0_7_H
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version on any change to this file
#define SMU13_0_7_DRIVER_IF_VERSION 0x35
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x27
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
#define NUM_MP0CLK_DPM_LEVELS 2
#define NUM_DCLK_DPM_LEVELS 8
#define NUM_VCLK_DPM_LEVELS 8
#define NUM_DISPCLK_DPM_LEVELS 8
#define NUM_DPPCLK_DPM_LEVELS 8
#define NUM_DPREFCLK_DPM_LEVELS 8
#define NUM_DCFCLK_DPM_LEVELS 8
#define NUM_DTBCLK_DPM_LEVELS 8
#define NUM_UCLK_DPM_LEVELS 4
#define NUM_LINK_LEVELS 3
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_OD_FAN_MAX_POINTS 6
// Feature Control Defines
#define FEATURE_FW_DATA_READ_BIT 0
#define FEATURE_DPM_GFXCLK_BIT 1
#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2
#define FEATURE_DPM_UCLK_BIT 3
#define FEATURE_DPM_FCLK_BIT 4
#define FEATURE_DPM_SOCCLK_BIT 5
#define FEATURE_DPM_MP0CLK_BIT 6
#define FEATURE_DPM_LINK_BIT 7
#define FEATURE_DPM_DCN_BIT 8
#define FEATURE_VMEMP_SCALING_BIT 9
#define FEATURE_VDDIO_MEM_SCALING_BIT 10
#define FEATURE_DS_GFXCLK_BIT 11
#define FEATURE_DS_SOCCLK_BIT 12
#define FEATURE_DS_FCLK_BIT 13
#define FEATURE_DS_LCLK_BIT 14
#define FEATURE_DS_DCFCLK_BIT 15
#define FEATURE_DS_UCLK_BIT 16
#define FEATURE_GFX_ULV_BIT 17
#define FEATURE_FW_DSTATE_BIT 18
#define FEATURE_GFXOFF_BIT 19
#define FEATURE_BACO_BIT 20
#define FEATURE_MM_DPM_BIT 21
#define FEATURE_SOC_MPCLK_DS_BIT 22
#define FEATURE_BACO_MPCLK_DS_BIT 23
#define FEATURE_THROTTLERS_BIT 24
#define FEATURE_SMARTSHIFT_BIT 25
#define FEATURE_GTHR_BIT 26
#define FEATURE_ACDC_BIT 27
#define FEATURE_VR0HOT_BIT 28
#define FEATURE_FW_CTF_BIT 29
#define FEATURE_FAN_CONTROL_BIT 30
#define FEATURE_GFX_DCS_BIT 31
#define FEATURE_GFX_READ_MARGIN_BIT 32
#define FEATURE_LED_DISPLAY_BIT 33
#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 34
#define FEATURE_OUT_OF_BAND_MONITOR_BIT 35
#define FEATURE_OPTIMIZED_VMIN_BIT 36
#define FEATURE_GFX_IMU_BIT 37
#define FEATURE_BOOT_TIME_CAL_BIT 38
#define FEATURE_GFX_PCC_DFLL_BIT 39
#define FEATURE_SOC_CG_BIT 40
#define FEATURE_DF_CSTATE_BIT 41
#define FEATURE_GFX_EDC_BIT 42
#define FEATURE_BOOT_POWER_OPT_BIT 43
#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 44
#define FEATURE_DS_VCN_BIT 45
#define FEATURE_BACO_CG_BIT 46
#define FEATURE_MEM_TEMP_READ_BIT 47
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
#define FEATURE_SOC_PCC_BIT 49
#define FEATURE_EDC_PWRBRK_BIT 50
#define FEATURE_SPARE_51_BIT 51
#define FEATURE_SPARE_52_BIT 52
#define FEATURE_SPARE_53_BIT 53
#define FEATURE_SPARE_54_BIT 54
#define FEATURE_SPARE_55_BIT 55
#define FEATURE_SPARE_56_BIT 56
#define FEATURE_SPARE_57_BIT 57
#define FEATURE_SPARE_58_BIT 58
#define FEATURE_SPARE_59_BIT 59
#define FEATURE_SPARE_60_BIT 60
#define FEATURE_SPARE_61_BIT 61
#define FEATURE_SPARE_62_BIT 62
#define FEATURE_SPARE_63_BIT 63
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
(1 << FEATURE_DPM_UCLK_BIT) | \
(1 << FEATURE_DPM_FCLK_BIT) | \
(1 << FEATURE_DPM_SOCCLK_BIT) | \
(1 << FEATURE_DPM_MP0CLK_BIT) | \
(1 << FEATURE_DPM_LINK_BIT) | \
(1 << FEATURE_DPM_DCN_BIT) | \
(1 << FEATURE_DS_GFXCLK_BIT) | \
(1 << FEATURE_DS_SOCCLK_BIT) | \
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT) | \
(1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
FEATURE_PWR_ALL,
FEATURE_PWR_S5,
FEATURE_PWR_BACO,
FEATURE_PWR_SOC,
FEATURE_PWR_GFX,
FEATURE_PWR_DOMAIN_COUNT,
} FEATURE_PWR_DOMAIN_e;
// Debug Overrides Bitmask
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010
#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020
#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040
#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080
#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100
#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
#define VR_MAPPING_VR_SELECT_SHIFT 0x00
#define VR_MAPPING_PLANE_SELECT_MASK 0x02
#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
// PSI Bit Defines
#define PSI_SEL_VR0_PLANE0_PSI0 0x01
#define PSI_SEL_VR0_PLANE0_PSI1 0x02
#define PSI_SEL_VR0_PLANE1_PSI0 0x04
#define PSI_SEL_VR0_PLANE1_PSI1 0x08
#define PSI_SEL_VR1_PLANE0_PSI0 0x10
#define PSI_SEL_VR1_PLANE0_PSI1 0x20
#define PSI_SEL_VR1_PLANE1_PSI0 0x40
#define PSI_SEL_VR1_PLANE1_PSI1 0x80
typedef enum {
SVI_PSI_0, // Full phase count (default)
SVI_PSI_1, // Phase count 1st level
SVI_PSI_2, // Phase count 2nd level
SVI_PSI_3, // Single phase operation + active diode emulation
SVI_PSI_4, // Single phase operation + passive diode emulation *optional*
SVI_PSI_5, // Reserved
SVI_PSI_6, // Power down to 0V (voltage regulation disabled)
SVI_PSI_7, // Automated phase shedding and diode emulation
} SVI_PSI_e;
// Throttler Control/Status Bits
#define THROTTLER_TEMP_EDGE_BIT 0
#define THROTTLER_TEMP_HOTSPOT_BIT 1
#define THROTTLER_TEMP_HOTSPOT_G_BIT 2
#define THROTTLER_TEMP_HOTSPOT_M_BIT 3
#define THROTTLER_TEMP_MEM_BIT 4
#define THROTTLER_TEMP_VR_GFX_BIT 5
#define THROTTLER_TEMP_VR_MEM0_BIT 6
#define THROTTLER_TEMP_VR_MEM1_BIT 7
#define THROTTLER_TEMP_VR_SOC_BIT 8
#define THROTTLER_TEMP_VR_U_BIT 9
#define THROTTLER_TEMP_LIQUID0_BIT 10
#define THROTTLER_TEMP_LIQUID1_BIT 11
#define THROTTLER_TEMP_PLX_BIT 12
#define THROTTLER_TDC_GFX_BIT 13
#define THROTTLER_TDC_SOC_BIT 14
#define THROTTLER_TDC_U_BIT 15
#define THROTTLER_PPT0_BIT 16
#define THROTTLER_PPT1_BIT 17
#define THROTTLER_PPT2_BIT 18
#define THROTTLER_PPT3_BIT 19
#define THROTTLER_FIT_BIT 20
#define THROTTLER_GFX_APCC_PLUS_BIT 21
#define THROTTLER_COUNT 22
// FW DState Features Control Bits
#define FW_DSTATE_SOC_ULV_BIT 0
#define FW_DSTATE_G6_HSR_BIT 1
#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2
#define FW_DSTATE_SMN_DS_BIT 3
#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4
#define FW_DSTATE_SOC_LIV_MIN_BIT 5
#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6
#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7
#define FW_DSTATE_MALL_ALLOC_BIT 8
#define FW_DSTATE_MEM_PSI_BIT 9
#define FW_DSTATE_HSR_NON_STROBE_BIT 10
#define FW_DSTATE_MP0_ENTER_WFI_BIT 11
#define FW_DSTATE_U_ULV_BIT 12
#define FW_DSTATE_MALL_FLUSH_BIT 13
#define FW_DSTATE_SOC_PSI_BIT 14
#define FW_DSTATE_U_PSI_BIT 15
#define FW_DSTATE_UCP_DS_BIT 16
#define FW_DSTATE_CSRCLK_DS_BIT 17
#define FW_DSTATE_MMHUB_INTERLOCK_BIT 18
#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 19
#define FW_DSTATE_CLDO_PRG_BIT 20
#define FW_DSTATE_DF_PLL_PWRDN_BIT 21
#define FW_DSTATE_U_LOW_PWR_MODE_EN_BIT 22
#define FW_DSTATE_GFX_PSI6_BIT 23
#define FW_DSTATE_GFX_VR_PWR_STAGE_BIT 24
//LED Display Mask & Control Bits
#define LED_DISPLAY_GFX_DPM_BIT 0
#define LED_DISPLAY_PCIE_BIT 1
#define LED_DISPLAY_ERROR_BIT 2
#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0
#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1
#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2
typedef enum {
SMARTSHIFT_VERSION_1,
SMARTSHIFT_VERSION_2,
SMARTSHIFT_VERSION_3,
} SMARTSHIFT_VERSION_e;
typedef enum {
FOPT_CALC_AC_CALC_DC,
FOPT_PPTABLE_AC_CALC_DC,
FOPT_CALC_AC_PPTABLE_DC,
FOPT_PPTABLE_AC_PPTABLE_DC,
} FOPT_CALC_e;
typedef enum {
DRAM_BIT_WIDTH_DISABLED = 0,
DRAM_BIT_WIDTH_X_8 = 8,
DRAM_BIT_WIDTH_X_16 = 16,
DRAM_BIT_WIDTH_X_32 = 32,
DRAM_BIT_WIDTH_X_64 = 64,
DRAM_BIT_WIDTH_X_128 = 128,
DRAM_BIT_WIDTH_COUNT,
} DRAM_BIT_WIDTH_TYPE_e;
//I2C Interface
#define NUM_I2C_CONTROLLERS 8
#define I2C_CONTROLLER_ENABLED 1
#define I2C_CONTROLLER_DISABLED 0
#define MAX_SW_I2C_COMMANDS 24
typedef enum {
I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
I2C_CONTROLLER_PORT_COUNT,
} I2cControllerPort_e;
typedef enum {
I2C_CONTROLLER_NAME_VR_GFX = 0,
I2C_CONTROLLER_NAME_VR_SOC,
I2C_CONTROLLER_NAME_VR_VMEMP,
I2C_CONTROLLER_NAME_VR_VDDIO,
I2C_CONTROLLER_NAME_LIQUID0,
I2C_CONTROLLER_NAME_LIQUID1,
I2C_CONTROLLER_NAME_PLX,
I2C_CONTROLLER_NAME_FAN_INTAKE,
I2C_CONTROLLER_NAME_COUNT,
} I2cControllerName_e;
typedef enum {
I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
I2C_CONTROLLER_THROTTLER_VR_GFX,
I2C_CONTROLLER_THROTTLER_VR_SOC,
I2C_CONTROLLER_THROTTLER_VR_VMEMP,
I2C_CONTROLLER_THROTTLER_VR_VDDIO,
I2C_CONTROLLER_THROTTLER_LIQUID0,
I2C_CONTROLLER_THROTTLER_LIQUID1,
I2C_CONTROLLER_THROTTLER_PLX,
I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
I2C_CONTROLLER_THROTTLER_INA3221,
I2C_CONTROLLER_THROTTLER_COUNT,
} I2cControllerThrottler_e;
typedef enum {
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
I2C_CONTROLLER_PROTOCOL_INA3221,
I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
I2C_CONTROLLER_PROTOCOL_COUNT,
} I2cControllerProtocol_e;
typedef struct {
uint8_t Enabled;
uint8_t Speed;
uint8_t SlaveAddress;
uint8_t ControllerPort;
uint8_t ControllerName;
uint8_t ThermalThrotter;
uint8_t I2cProtocol;
uint8_t PaddingConfig;
} I2cControllerConfig_t;
typedef enum {
I2C_PORT_SVD_SCL = 0,
I2C_PORT_GPIO,
} I2cPort_e;
typedef enum {
I2C_SPEED_FAST_50K = 0, //50 Kbits/s
I2C_SPEED_FAST_100K, //100 Kbits/s
I2C_SPEED_FAST_400K, //400 Kbits/s
I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
I2C_SPEED_HIGH_2M, //2.3 Mbits/s
I2C_SPEED_COUNT,
} I2cSpeed_e;
typedef enum {
I2C_CMD_READ = 0,
I2C_CMD_WRITE,
I2C_CMD_COUNT,
} I2cCmdType_e;
#define CMDCONFIG_STOP_BIT 0
#define CMDCONFIG_RESTART_BIT 1
#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
typedef struct {
uint8_t ReadWriteData; //Return data for read. Data to send for write
uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
} SwI2cCmd_t; //SW I2C Command Table
typedef struct {
uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
uint8_t SlaveAddress; //Slave address of device
uint8_t NumCmds; //Number of commands
SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
} SwI2cRequest_t; // SW I2C Request Table
typedef struct {
SwI2cRequest_t SwI2cRequest;
uint32_t Spare[8];
uint32_t MmHubPadding[8]; // SMU internal use
} SwI2cRequestExternal_t;
typedef struct {
uint64_t mca_umc_status;
uint64_t mca_umc_addr;
uint16_t ce_count_lo_chip;
uint16_t ce_count_hi_chip;
uint32_t eccPadding;
} EccInfo_t;
typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
MSR_SEQUENCE,
BAMACO_SEQUENCE,
ULPS_SEQUENCE,
D3HOT_SEQUENCE_COUNT,
} D3HOTSequence_e;
//This is aligned with RSMU PGFSM Register Mapping
typedef enum {
PG_DYNAMIC_MODE = 0,
PG_STATIC_MODE,
} PowerGatingMode_e;
//This is aligned with RSMU PGFSM Register Mapping
typedef enum {
PG_POWER_DOWN = 0,
PG_POWER_UP,
} PowerGatingSettings_e;
typedef struct {
uint32_t a; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
uint32_t c; // store in IEEE float format in this variable
} QuadraticInt_t;
typedef struct {
uint32_t m; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
} LinearInt_t;
typedef struct {
uint32_t a; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
uint32_t c; // store in IEEE float format in this variable
} DroopInt_t;
typedef enum {
DCS_ARCH_DISABLED,
DCS_ARCH_FADCS,
DCS_ARCH_ASYNC,
} DCS_ARCH_e;
//Only Clks that have DPM descriptors are listed here
typedef enum {
PPCLK_GFXCLK = 0,
PPCLK_SOCCLK,
PPCLK_UCLK,
PPCLK_FCLK,
PPCLK_DCLK_0,
PPCLK_VCLK_0,
PPCLK_DCLK_1,
PPCLK_VCLK_1,
PPCLK_DISPCLK,
PPCLK_DPPCLK,
PPCLK_DPREFCLK,
PPCLK_DCFCLK,
PPCLK_DTBCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef enum {
VOLTAGE_MODE_PPTABLE = 0,
VOLTAGE_MODE_FUSES,
VOLTAGE_MODE_COUNT,
} VOLTAGE_MODE_e;
typedef enum {
AVFS_VOLTAGE_GFX = 0,
AVFS_VOLTAGE_SOC,
AVFS_VOLTAGE_COUNT,
} AVFS_VOLTAGE_TYPE_e;
typedef enum {
AVFS_TEMP_COLD = 0,
AVFS_TEMP_HOT,
AVFS_TEMP_COUNT,
} AVFS_TEMP_e;
typedef enum {
AVFS_D_G,
AVFS_D_M_B,
AVFS_D_M_S,
AVFS_D_COUNT,
} AVFS_D_e;
typedef enum {
UCLK_DIV_BY_1 = 0,
UCLK_DIV_BY_2,
UCLK_DIV_BY_4,
UCLK_DIV_BY_8,
} UCLK_DIV_e;
typedef enum {
GPIO_INT_POLARITY_ACTIVE_LOW = 0,
GPIO_INT_POLARITY_ACTIVE_HIGH,
} GpioIntPolarity_e;
typedef enum {
PWR_CONFIG_TDP = 0,
PWR_CONFIG_TGP,
PWR_CONFIG_TCP_ESTIMATED,
PWR_CONFIG_TCP_MEASURED,
} PwrConfig_e;
typedef struct {
uint8_t Padding;
uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e
LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
uint32_t Padding3[3];
uint16_t Padding4;
uint16_t FoptimalDc; //Foptimal frequency in DC power mode.
uint16_t FoptimalAc; //Foptimal frequency in AC power mode.
uint16_t Padding2;
} DpmDescriptor_t;
typedef enum {
PPT_THROTTLER_PPT0,
PPT_THROTTLER_PPT1,
PPT_THROTTLER_PPT2,
PPT_THROTTLER_PPT3,
PPT_THROTTLER_COUNT
} PPT_THROTTLER_e;
typedef enum {
TEMP_EDGE,
TEMP_HOTSPOT,
TEMP_HOTSPOT_G,
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
TEMP_PLX,
TEMP_COUNT,
} TEMP_e;
typedef enum {
TDC_THROTTLER_GFX,
TDC_THROTTLER_SOC,
TDC_THROTTLER_U,
TDC_THROTTLER_COUNT
} TDC_THROTTLER_e;
typedef enum {
SVI_PLANE_GFX,
SVI_PLANE_SOC,
SVI_PLANE_VMEMP,
SVI_PLANE_VDDIO_MEM,
SVI_PLANE_U,
SVI_PLANE_COUNT,
} SVI_PLANE_e;
typedef enum {
PMFW_VOLT_PLANE_GFX,
PMFW_VOLT_PLANE_SOC,
PMFW_VOLT_PLANE_COUNT
} PMFW_VOLT_PLANE_e;
typedef enum {
CUSTOMER_VARIANT_ROW,
CUSTOMER_VARIANT_FALCON,
CUSTOMER_VARIANT_COUNT,
} CUSTOMER_VARIANT_e;
typedef enum {
POWER_SOURCE_AC,
POWER_SOURCE_DC,
POWER_SOURCE_COUNT,
} POWER_SOURCE_e;
typedef enum {
MEM_VENDOR_SAMSUNG,
MEM_VENDOR_INFINEON,
MEM_VENDOR_ELPIDA,
MEM_VENDOR_ETRON,
MEM_VENDOR_NANYA,
MEM_VENDOR_HYNIX,
MEM_VENDOR_MOSEL,
MEM_VENDOR_WINBOND,
MEM_VENDOR_ESMT,
MEM_VENDOR_PLACEHOLDER0,
MEM_VENDOR_PLACEHOLDER1,
MEM_VENDOR_PLACEHOLDER2,
MEM_VENDOR_PLACEHOLDER3,
MEM_VENDOR_PLACEHOLDER4,
MEM_VENDOR_PLACEHOLDER5,
MEM_VENDOR_MICRON,
MEM_VENDOR_COUNT,
} MEM_VENDOR_e;
typedef enum {
PP_GRTAVFS_HW_CPO_CTL_ZONE0,
PP_GRTAVFS_HW_CPO_CTL_ZONE1,
PP_GRTAVFS_HW_CPO_CTL_ZONE2,
PP_GRTAVFS_HW_CPO_CTL_ZONE3,
PP_GRTAVFS_HW_CPO_CTL_ZONE4,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4,
PP_GRTAVFS_HW_ZONE0_VF,
PP_GRTAVFS_HW_ZONE1_VF1,
PP_GRTAVFS_HW_ZONE2_VF2,
PP_GRTAVFS_HW_ZONE3_VF3,
PP_GRTAVFS_HW_VOLTAGE_GB,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4,
PP_GRTAVFS_HW_RESERVED_0,
PP_GRTAVFS_HW_RESERVED_1,
PP_GRTAVFS_HW_RESERVED_2,
PP_GRTAVFS_HW_RESERVED_3,
PP_GRTAVFS_HW_RESERVED_4,
PP_GRTAVFS_HW_RESERVED_5,
PP_GRTAVFS_HW_RESERVED_6,
PP_GRTAVFS_HW_FUSE_COUNT,
} PP_GRTAVFS_HW_FUSE_e;
typedef enum {
PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4,
PP_GRTAVFS_FW_COMMON_FUSE_COUNT,
} PP_GRTAVFS_FW_COMMON_FUSE_e;
typedef enum {
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4,
PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY,
PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4,
PP_GRTAVFS_FW_SEP_FUSE_COUNT,
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
typedef struct {
int8_t Offset; // in Amps
uint8_t Padding;
uint16_t MaxCurrent; // in Amps
} SviTelemetryScale_t;
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
#define PP_OD_FEATURE_PPT_BIT 2
#define PP_OD_FEATURE_FAN_CURVE_BIT 3
#define PP_OD_FEATURE_GFXCLK_BIT 7
#define PP_OD_FEATURE_UCLK_BIT 8
#define PP_OD_FEATURE_ZERO_FAN_BIT 9
#define PP_OD_FEATURE_TEMPERATURE_BIT 10
#define PP_OD_FEATURE_COUNT 13
typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING,
PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
} PP_OD_POWER_FEATURE_e;
typedef enum {
FAN_MODE_AUTO = 0,
FAN_MODE_MANUAL_LINEAR,
} FanMode_e;
typedef struct {
uint32_t FeatureCtrlMask;
//Voltage control
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
uint32_t Reserved;
//Frequency changes
int16_t GfxclkFmin; // MHz
int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
//PPT
int16_t Ppt; // %
int16_t Tdc;
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
uint16_t FanMinimumPwm;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
uint8_t MaxOpTemp;
uint8_t Padding[4];
uint32_t Spare[12];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
typedef struct {
OverDriveTable_t OverDriveTable;
} OverDriveTableExternal_t;
typedef struct {
uint32_t FeatureCtrlMask;
int16_t VoltageOffsetPerZoneBoundary;
uint16_t Reserved1;
uint16_t Reserved2;
int16_t GfxclkFmin; // MHz
int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
//PPT
int16_t Ppt; // %
int16_t Tdc;
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
uint16_t FanMinimumPwm;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
uint8_t MaxOpTemp;
uint8_t Padding[4];
uint32_t Spare[12];
} OverDriveLimits_t;
typedef enum {
BOARD_GPIO_SMUIO_0,
BOARD_GPIO_SMUIO_1,
BOARD_GPIO_SMUIO_2,
BOARD_GPIO_SMUIO_3,
BOARD_GPIO_SMUIO_4,
BOARD_GPIO_SMUIO_5,
BOARD_GPIO_SMUIO_6,
BOARD_GPIO_SMUIO_7,
BOARD_GPIO_SMUIO_8,
BOARD_GPIO_SMUIO_9,
BOARD_GPIO_SMUIO_10,
BOARD_GPIO_SMUIO_11,
BOARD_GPIO_SMUIO_12,
BOARD_GPIO_SMUIO_13,
BOARD_GPIO_SMUIO_14,
BOARD_GPIO_SMUIO_15,
BOARD_GPIO_SMUIO_16,
BOARD_GPIO_SMUIO_17,
BOARD_GPIO_SMUIO_18,
BOARD_GPIO_SMUIO_19,
BOARD_GPIO_SMUIO_20,
BOARD_GPIO_SMUIO_21,
BOARD_GPIO_SMUIO_22,
BOARD_GPIO_SMUIO_23,
BOARD_GPIO_SMUIO_24,
BOARD_GPIO_SMUIO_25,
BOARD_GPIO_SMUIO_26,
BOARD_GPIO_SMUIO_27,
BOARD_GPIO_SMUIO_28,
BOARD_GPIO_SMUIO_29,
BOARD_GPIO_SMUIO_30,
BOARD_GPIO_SMUIO_31,
MAX_BOARD_GPIO_SMUIO_NUM,
BOARD_GPIO_DC_GEN_A,
BOARD_GPIO_DC_GEN_B,
BOARD_GPIO_DC_GEN_C,
BOARD_GPIO_DC_GEN_D,
BOARD_GPIO_DC_GEN_E,
BOARD_GPIO_DC_GEN_F,
BOARD_GPIO_DC_GEN_G,
BOARD_GPIO_DC_GENLK_CLK,
BOARD_GPIO_DC_GENLK_VSYNC,
BOARD_GPIO_DC_SWAPLOCK_A,
BOARD_GPIO_DC_SWAPLOCK_B,
} BOARD_GPIO_TYPE_e;
#define INVALID_BOARD_GPIO 0xFF
typedef struct {
//PLL 0
uint16_t InitGfxclk_bypass;
uint16_t InitSocclk;
uint16_t InitMp0clk;
uint16_t InitMpioclk;
uint16_t InitSmnclk;
uint16_t InitUcpclk;
uint16_t InitCsrclk;
//PLL 1
uint16_t InitDprefclk;
uint16_t InitDcfclk;
uint16_t InitDtbclk;
//PLL 2
uint16_t InitDclk; //assume same DCLK/VCLK for both instances
uint16_t InitVclk;
// PLL 3
uint16_t InitUsbdfsclk;
uint16_t InitMp1clk;
uint16_t InitLclk;
uint16_t InitBaco400clk_bypass;
uint16_t InitBaco1200clk_bypass;
uint16_t InitBaco700clk_bypass;
// PLL 4
uint16_t InitFclk;
// PLL 5
uint16_t InitGfxclk_clkb;
//PLL 6
uint8_t InitUclkDPMState; // =0,1,2,3, frequency from FreqTableUclk
uint8_t Padding[3];
uint32_t InitVcoFreqPll0;
uint32_t InitVcoFreqPll1;
uint32_t InitVcoFreqPll2;
uint32_t InitVcoFreqPll3;
uint32_t InitVcoFreqPll4;
uint32_t InitVcoFreqPll5;
uint32_t InitVcoFreqPll6;
//encoding will change depending on SVI2/SVI3
uint16_t InitGfx; // In mV(Q2) , should be 0?
uint16_t InitSoc; // In mV(Q2)
uint16_t InitU; // In Mv(Q2) not applicable
uint16_t Padding2;
uint32_t Spare[8];
} BootValues_t;
typedef struct {
uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts
uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps
uint16_t Temperature[TEMP_COUNT]; // Celsius
uint8_t PwmLimitMin;
uint8_t PwmLimitMax;
uint8_t FanTargetTemperature;
uint8_t Spare1[1];
uint16_t AcousticTargetRpmThresholdMin;
uint16_t AcousticTargetRpmThresholdMax;
uint16_t AcousticLimitRpmThresholdMin;
uint16_t AcousticLimitRpmThresholdMax;
uint16_t PccLimitMin;
uint16_t PccLimitMax;
uint16_t FanStopTempMin;
uint16_t FanStopTempMax;
uint16_t FanStartTempMin;
uint16_t FanStartTempMax;
uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
uint32_t Spare[11];
} MsgLimits_t;
typedef struct {
uint16_t BaseClockAc;
uint16_t GameClockAc;
uint16_t BoostClockAc;
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
uint32_t Reserved[4];
} DriverReportedClocks_t;
typedef struct {
uint8_t DcBtcEnabled;
uint8_t Padding[3];
uint16_t DcTol; // mV Q2
uint16_t DcBtcGb; // mV Q2
uint16_t DcBtcMin; // mV Q2
uint16_t DcBtcMax; // mV Q2
LinearInt_t DcBtcGbScalar;
} AvfsDcBtcParams_t;
typedef struct {
uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C
uint16_t VftFMin; // in MHz
uint16_t VInversion; // in mV Q2
QuadraticInt_t qVft[AVFS_TEMP_COUNT];
QuadraticInt_t qAvfsGb;
QuadraticInt_t qAvfsGb2;
} AvfsFuseOverride_t;
typedef struct {
// SECTION: Version
uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different)
// SECTION: Feature Control
uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping
// SECTION: Miscellaneous Configuration
uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e
uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e
uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT
uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e
// SECTION: Infrastructure Limits
uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported
uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported
uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift
//if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars
//relative index 0
uint8_t EnableLegacyPptLimit;
uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support
uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting
uint8_t PaddingPpt[1];
uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature
uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail
uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input
uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only
uint16_t PaddingInfra;
// Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years)
uint32_t FitControllerFailureRateLimit; //in IEEE float
//Expected GFX Duty Cycle at Vmax.
uint32_t FitControllerGfxDutyCycle; // in IEEE float
//Expected SOC Duty Cycle at Vmax.
uint32_t FitControllerSocDutyCycle; // in IEEE float
//This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block.
uint32_t FitControllerSocOffset; //in IEEE float
uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value
// SECTION: Throttler settings
uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping
// SECTION: FW DSTATE Settings
uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping
// SECTION: Voltage Control Parameters
uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE)
uint16_t UlvVoltageOffsetU; // In mV(Q2). ULV offset used in either U_ULV(part of FW_DSTATE)
uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE
// Voltage Limits
uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled
uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled
//Vmin Optimizations
int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin
int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin
uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot.
uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold.
uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
//This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
//Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts.
uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT];
//Scalar coefficient of the PSM aging degradation function
uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM
//Exponential coefficient of the PSM aging degradation function
uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM
//Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN
//Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN
uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT];
uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT];
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
QuadraticInt_t Vmin_droop;
uint32_t SpareVmin[9];
//SECTION: DPM Configuration 1
DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
// SECTION: DPM Configuration 2
uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz
uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2)
uint8_t GfxclkSpare[2];
uint16_t GfxclkFreqCap;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // in Mhz
uint16_t GfxclkFgfxoffExitImu; // in Mhz
uint16_t GfxclkFgfxoffExitRlc; // in Mhz
uint16_t GfxclkThrottleClock; //Used primarily in DCS
uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages
uint8_t GfxIdlePadding;
uint8_t SmsRepairWRCKClkDivEn;
uint8_t SmsRepairWRCKClkDivVal;
uint8_t GfxOffEntryEarlyMGCGEn;
uint8_t GfxOffEntryForceCGCGEn;
uint8_t GfxOffEntryForceCGCGDelayEn;
uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds
uint16_t GfxclkFreqGfxUlv; // in MHz
uint8_t GfxIdlePadding2[2];
uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry
uint32_t GfxoffSpare[15];
// GFX GPO
uint32_t DfllBtcMasterScalerM;
int32_t DfllBtcMasterScalerB;
uint32_t DfllBtcSlaveScalerM;
int32_t DfllBtcSlaveScalerB;
uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
uint32_t GfxGpoSpare[10];
// GFX DCS
uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
uint16_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS.
uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase.
uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
uint32_t DcsSpare[14];
// UCLK section
uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz
// UCLK section
uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
uint8_t PaddingMem[3];
uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3.
uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
//FCLK Section
uint8_t FclkDpmUPstates[NUM_FCLK_DPM_LEVELS]; // U P-state ID associated with each FCLK DPM state.
uint16_t FclkDpmVddU[NUM_FCLK_DPM_LEVELS]; // mV(Q2) Vset U voltage associated with each FCLK DPM state.
uint16_t FclkDpmUSpeed[NUM_FCLK_DPM_LEVELS]; //U speed associated with each FCLK DPM state
uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
uint16_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4
uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
uint16_t LclkFreq[NUM_LINK_LEVELS];
// SECTION: Fan Control
uint16_t FanStopTemp[TEMP_COUNT]; //Celsius
uint16_t FanStartTemp[TEMP_COUNT]; //Celsius
uint16_t FanGain[TEMP_COUNT];
uint16_t FanGainPadding;
uint16_t FanPwmMin;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanMaximumRpm;
uint16_t MGpuAcousticLimitRpmThreshold;
uint16_t FanTargetGfxclk;
uint32_t TempInputSelectMask;
uint8_t FanZeroRpmEnable;
uint8_t FanTachEdgePerRev;
uint16_t FanTargetTemperature[TEMP_COUNT];
// The following are AFC override parameters. Leave at 0 to use FW defaults.
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
uint16_t FuzzyFan_Reserved;
uint16_t FwCtfLimit[TEMP_COUNT];
uint16_t IntakeTempEnableRPM;
int16_t IntakeTempOffsetTemp;
uint16_t IntakeTempReleaseTemp;
uint16_t IntakeTempHighIntakeAcousticLimit;
uint16_t IntakeTempAcouticLimitReleaseRate;
int16_t FanAbnormalTempLimitOffset;
uint16_t FanStalledTriggerRpm;
uint16_t FanAbnormalTriggerRpmCoeff;
uint16_t FanAbnormalDetectionEnable;
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding[3];
uint32_t FanSpare[13];
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
uint8_t GfxAvfsPadding[3];
uint32_t L2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
uint32_t SeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
uint32_t CommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t L2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
uint32_t SeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
uint32_t Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
uint32_t V2F_vmin_range_low;
uint32_t V2F_vmin_range_high;
uint32_t V2F_vmax_range_low;
uint32_t V2F_vmax_range_high;
AvfsDcBtcParams_t DcBtcGfxParams;
uint32_t GfxAvfsSpare[32];
//SECTION: VDD_SOC AVFS
uint8_t OverrideSocAvfsFuses;
uint8_t MinSocAvfsRevision;
uint8_t SocAvfsPadding[2];
AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT];
DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb
LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V
QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V
AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT];
uint32_t SocAvfsSpare[32];
//SECTION: Boot clock and voltage values
BootValues_t BootValues;
//SECTION: Driver Reported Clocks
DriverReportedClocks_t DriverReportedClocks;
//SECTION: Message Limits
MsgLimits_t MsgLimits;
//SECTION: OverDrive Limits
OverDriveLimits_t OverDriveLimitsMin;
OverDriveLimits_t OverDriveLimitsBasicMax;
OverDriveLimits_t OverDriveLimitsAdvancedMax;
// SECTION: Advanced Options
uint32_t DebugOverrides;
// Section: Total Board Power idle vs active coefficients
uint8_t TotalBoardPowerSupport;
uint8_t TotalBoardPowerPadding[3];
int16_t TotalIdleBoardPowerM;
int16_t TotalIdleBoardPowerB;
int16_t TotalBoardPowerM;
int16_t TotalBoardPowerB;
QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
// SECTION: Sku Reserved
uint32_t Spare[43];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
} SkuTable_t;
typedef struct {
// SECTION: Version
uint32_t Version; //should be unique to each board type
// SECTION: I2C Control
I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
// SECTION: SVI2 Board Parameters
uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields
uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields
uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
uint8_t VmempUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
uint8_t VddioUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
//SECTION SVI3 Board Parameters
uint8_t SlaveAddrMapping[SVI_PLANE_COUNT];
uint8_t VrPsiSupport[SVI_PLANE_COUNT];
uint8_t PaddingPsi[SVI_PLANE_COUNT];
uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3
// SECTION: Voltage Regulator Settings
SviTelemetryScale_t SviTelemetryScale[SVI_PLANE_COUNT];
uint32_t VoltageTelemetryRatio[SVI_PLANE_COUNT]; // This is used for VDDIO Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16)
uint8_t DownSlewRateVr[SVI_PLANE_COUNT];
// SECTION: GPIO Settings
uint8_t LedOffGpio;
uint8_t FanOffGpio;
uint8_t GfxVrPowerStageOffGpio;
uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
uint8_t GthrGpio; // GPIO pin configured for GTHR Event
uint8_t GthrPolarity; // replace GPIO polarity for GTHR
// LED Display Settings
uint8_t LedPin0; // GPIO number for LedPin[0]
uint8_t LedPin1; // GPIO number for LedPin[1]
uint8_t LedPin2; // GPIO number for LedPin[2]
uint8_t LedEnableMask;
uint8_t LedPcie; // GPIO number for PCIE results
uint8_t LedError; // GPIO number for Error Cases
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
uint8_t UclkTrainingModeSpreadPercent; // Q4.4
uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT];
// FCLK Spread Spectrum
uint8_t FclkSpreadPadding;
uint8_t FclkSpreadPercent; // Q4.4
uint16_t FclkSpreadFreq; // kHz
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
uint8_t FuseWritePadding[3];
// SECTION: Board Reserved
uint32_t BoardSpare[63];
// SECTION: Structure Padding
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
} BoardTable_t;
#pragma pack(push, 1)
typedef struct {
SkuTable_t SkuTable;
BoardTable_t BoardTable;
} PPTable_t;
#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
uint16_t GfxclkAverageLpfTau;
uint16_t FclkAverageLpfTau;
uint16_t UclkAverageLpfTau;
uint16_t GfxActivityLpfTau;
uint16_t UclkActivityLpfTau;
uint16_t SocketPowerLpfTau;
uint16_t VcnClkAverageLpfTau;
uint16_t VcnUsageAverageLpfTau;
} DriverSmuConfig_t;
typedef struct {
DriverSmuConfig_t DriverSmuConfig;
uint32_t Spare[8];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} DriverSmuConfigExternal_t;
typedef struct {
uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t Padding;
uint32_t Spare[32];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} DriverInfoTable_t;
typedef struct {
uint32_t CurrClock[PPCLK_COUNT];
uint16_t AverageGfxclkFrequencyTarget;
uint16_t AverageGfxclkFrequencyPreDs;
uint16_t AverageGfxclkFrequencyPostDs;
uint16_t AverageFclkFrequencyPreDs;
uint16_t AverageFclkFrequencyPostDs;
uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock
uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock
uint16_t AverageVclk0Frequency ;
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
uint16_t PCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
uint32_t MetricsCounter ;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
uint16_t Vcn0ActivityPercentage ;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
uint8_t PcieRate ;
uint8_t PcieWidth ;
uint8_t AvgFanPwm;
uint8_t Padding[1];
uint16_t AvgFanRpm;
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
uint32_t PublicSerialNumberLower;
uint32_t PublicSerialNumberUpper;
} SmuMetrics_t;
typedef struct {
SmuMetrics_t SmuMetrics;
uint32_t Spare[30];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} SmuMetricsExternal_t;
typedef struct {
uint8_t WmSetting;
uint8_t Flags;
uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
typedef enum {
WATERMARKS_CLOCK_RANGE = 0,
WATERMARKS_DUMMY_PSTATE,
WATERMARKS_MALL,
WATERMARKS_COUNT,
} WATERMARKS_FLAGS_e;
typedef struct {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
} Watermarks_t;
typedef struct {
Watermarks_t Watermarks;
uint32_t Spare[16];
uint32_t MmHubPadding[8]; // SMU internal use
} WatermarksExternal_t;
typedef struct {
uint16_t avgPsmCount[36];
uint16_t minPsmCount[36];
float avgPsmVoltage[36];
float minPsmVoltage[36];
} AvfsDebugTable_t;
typedef struct {
AvfsDebugTable_t AvfsDebugTable;
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsDebugTableExternal_t;
typedef struct {
uint8_t Gfx_ActiveHystLimit;
uint8_t Gfx_IdleHystLimit;
uint8_t Gfx_FPS;
uint8_t Gfx_MinActiveFreqType;
uint8_t Gfx_BoosterFreqType;
uint8_t PaddingGfx;
uint16_t Gfx_MinActiveFreq; // MHz
uint16_t Gfx_BoosterFreq; // MHz
uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
uint32_t Gfx_PD_Data_limit_a; // Q16
uint32_t Gfx_PD_Data_limit_b; // Q16
uint32_t Gfx_PD_Data_limit_c; // Q16
uint32_t Gfx_PD_Data_error_coeff; // Q16
uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
uint8_t Fclk_ActiveHystLimit;
uint8_t Fclk_IdleHystLimit;
uint8_t Fclk_FPS;
uint8_t Fclk_MinActiveFreqType;
uint8_t Fclk_BoosterFreqType;
uint8_t PaddingFclk;
uint16_t Fclk_MinActiveFreq; // MHz
uint16_t Fclk_BoosterFreq; // MHz
uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms
uint32_t Fclk_PD_Data_limit_a; // Q16
uint32_t Fclk_PD_Data_limit_b; // Q16
uint32_t Fclk_PD_Data_limit_c; // Q16
uint32_t Fclk_PD_Data_error_coeff; // Q16
uint32_t Fclk_PD_Data_error_rate_coeff; // Q16
uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16
uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS];
uint8_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS];
uint16_t Mem_Fps;
uint8_t padding[2];
} DpmActivityMonitorCoeffInt_t;
typedef struct {
DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt;
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffIntExternal_t;
// Workload bits
#define WORKLOAD_PPLIB_DEFAULT_BIT 0
#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
#define WORKLOAD_PPLIB_VIDEO_BIT 3
#define WORKLOAD_PPLIB_VR_BIT 4
#define WORKLOAD_PPLIB_COMPUTE_BIT 5
#define WORKLOAD_PPLIB_CUSTOM_BIT 6
#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7
#define WORKLOAD_PPLIB_COUNT 8
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
// Table transfer status
#define TABLE_TRANSFER_OK 0x0
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
#define TABLE_WATERMARKS 2
#define TABLE_AVFS_PSM_DEBUG 3
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
#define TABLE_WIFIBAND 12
#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2
#define IH_INTERRUPT_CONTEXT_ID_AC 0x3
#define IH_INTERRUPT_CONTEXT_ID_DC 0x4
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
//
// aw88081.c -- AW88081 ALSA SoC Audio driver
//
// Copyright (c) 2024 awinic Technology CO., LTD
//
// Author: Weidong Wang <[email protected]>
//
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <sound/soc.h>
#include "aw88081.h"
#include "aw88395/aw88395_device.h"
struct aw88081 {
struct aw_device *aw_pa;
struct mutex lock;
struct delayed_work start_work;
struct regmap *regmap;
struct aw_container *aw_cfg;
bool phase_sync;
};
static const struct regmap_config aw88081_regmap_config = {
.val_bits = 16,
.reg_bits = 8,
.max_register = AW88081_REG_MAX,
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.val_format_endian = REGMAP_ENDIAN_BIG,
};
static int aw88081_dev_get_iis_status(struct aw_device *aw_dev)
{
unsigned int reg_val;
int ret;
ret = regmap_read(aw_dev->regmap, AW88081_SYSST_REG, ®_val);
if (ret)
return ret;
if ((reg_val & AW88081_BIT_PLL_CHECK) != AW88081_BIT_PLL_CHECK) {
dev_err(aw_dev->dev, "check pll lock fail,reg_val:0x%04x", reg_val);
return -EINVAL;
}
return 0;
}
static int aw88081_dev_check_mode1_pll(struct aw_device *aw_dev)
{
int ret, i;
for (i = 0; i < AW88081_DEV_SYSST_CHECK_MAX; i++) {
ret = aw88081_dev_get_iis_status(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "mode1 iis signal check error");
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
} else {
return 0;
}
}
return -EPERM;
}
static int aw88081_dev_check_mode2_pll(struct aw_device *aw_dev)
{
unsigned int reg_val;
int ret, i;
ret = regmap_read(aw_dev->regmap, AW88081_PLLCTRL1_REG, ®_val);
if (ret)
return ret;
reg_val &= (~AW88081_CCO_MUX_MASK);
if (reg_val == AW88081_CCO_MUX_DIVIDED_VALUE) {
dev_dbg(aw_dev->dev, "CCO_MUX is already divider");
return -EPERM;
}
/* change mode2 */
ret = regmap_update_bits(aw_dev->regmap, AW88081_PLLCTRL1_REG,
~AW88081_CCO_MUX_MASK, AW88081_CCO_MUX_DIVIDED_VALUE);
if (ret)
return ret;
for (i = 0; i < AW88081_DEV_SYSST_CHECK_MAX; i++) {
ret = aw88081_dev_get_iis_status(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "mode2 iis check error");
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
} else {
break;
}
}
/* change mode1 */
ret = regmap_update_bits(aw_dev->regmap, AW88081_PLLCTRL1_REG,
~AW88081_CCO_MUX_MASK, AW88081_CCO_MUX_BYPASS_VALUE);
if (ret == 0) {
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
for (i = 0; i < AW88081_DEV_SYSST_CHECK_MAX; i++) {
ret = aw88081_dev_check_mode1_pll(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "mode2 switch to mode1, iis check error");
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
} else {
break;
}
}
}
return ret;
}
static int aw88081_dev_check_syspll(struct aw_device *aw_dev)
{
int ret;
ret = aw88081_dev_check_mode1_pll(aw_dev);
if (ret) {
dev_dbg(aw_dev->dev, "mode1 check iis failed try switch to mode2 check");
ret = aw88081_dev_check_mode2_pll(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "mode2 check iis failed");
return ret;
}
}
return 0;
}
static int aw88081_dev_check_sysst(struct aw_device *aw_dev)
{
unsigned int check_val;
unsigned int reg_val;
unsigned int value;
int ret, i;
ret = regmap_read(aw_dev->regmap, AW88081_PWMCTRL4_REG, ®_val);
if (ret)
return ret;
if (reg_val & (~AW88081_NOISE_GATE_EN_MASK))
check_val = AW88081_NO_SWS_SYSST_CHECK;
else
check_val = AW88081_SWS_SYSST_CHECK;
for (i = 0; i < AW88081_DEV_SYSST_CHECK_MAX; i++) {
ret = regmap_read(aw_dev->regmap, AW88081_SYSST_REG, ®_val);
if (ret)
return ret;
value = reg_val & (~AW88081_BIT_SYSST_CHECK_MASK) & check_val;
if (value != check_val) {
dev_err(aw_dev->dev, "check sysst fail, reg_val=0x%04x, check:0x%x",
reg_val, check_val);
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
} else {
return 0;
}
}
return -EPERM;
}
static void aw88081_dev_i2s_tx_enable(struct aw_device *aw_dev, bool flag)
{
if (flag)
regmap_update_bits(aw_dev->regmap, AW88081_I2SCTRL3_REG,
~AW88081_I2STXEN_MASK, AW88081_I2STXEN_ENABLE_VALUE);
else
regmap_update_bits(aw_dev->regmap, AW88081_I2SCTRL3_REG,
~AW88081_I2STXEN_MASK, AW88081_I2STXEN_DISABLE_VALUE);
}
static void aw88081_dev_pwd(struct aw_device *aw_dev, bool pwd)
{
if (pwd)
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_PWDN_MASK, AW88081_PWDN_POWER_DOWN_VALUE);
else
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_PWDN_MASK, AW88081_PWDN_WORKING_VALUE);
}
static void aw88081_dev_amppd(struct aw_device *aw_dev, bool amppd)
{
if (amppd)
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_EN_PA_MASK, AW88081_EN_PA_POWER_DOWN_VALUE);
else
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_EN_PA_MASK, AW88081_EN_PA_WORKING_VALUE);
}
static void aw88081_dev_clear_int_status(struct aw_device *aw_dev)
{
unsigned int int_status;
/* read int status and clear */
regmap_read(aw_dev->regmap, AW88081_SYSINT_REG, &int_status);
/* make sure int status is clear */
regmap_read(aw_dev->regmap, AW88081_SYSINT_REG, &int_status);
dev_dbg(aw_dev->dev, "read interrupt reg = 0x%04x", int_status);
}
static void aw88081_dev_set_volume(struct aw_device *aw_dev, unsigned int value)
{
struct aw_volume_desc *vol_desc = &aw_dev->volume_desc;
unsigned int volume;
volume = min((value + vol_desc->init_volume), (unsigned int)AW88081_MUTE_VOL);
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL2_REG, ~AW88081_VOL_MASK, volume);
}
static void aw88081_dev_fade_in(struct aw_device *aw_dev)
{
struct aw_volume_desc *desc = &aw_dev->volume_desc;
int fade_in_vol = desc->ctl_volume;
int fade_step = aw_dev->fade_step;
int i;
if (fade_step == 0 || aw_dev->fade_in_time == 0) {
aw88081_dev_set_volume(aw_dev, fade_in_vol);
return;
}
for (i = AW88081_MUTE_VOL; i >= fade_in_vol; i -= fade_step) {
aw88081_dev_set_volume(aw_dev, i);
usleep_range(aw_dev->fade_in_time, aw_dev->fade_in_time + 10);
}
if (i != fade_in_vol)
aw88081_dev_set_volume(aw_dev, fade_in_vol);
}
static void aw88081_dev_fade_out(struct aw_device *aw_dev)
{
struct aw_volume_desc *desc = &aw_dev->volume_desc;
int fade_step = aw_dev->fade_step;
int i;
if (fade_step == 0 || aw_dev->fade_out_time == 0) {
aw88081_dev_set_volume(aw_dev, AW88081_MUTE_VOL);
return;
}
for (i = desc->ctl_volume; i <= AW88081_MUTE_VOL; i += fade_step) {
aw88081_dev_set_volume(aw_dev, i);
usleep_range(aw_dev->fade_out_time, aw_dev->fade_out_time + 10);
}
if (i != AW88081_MUTE_VOL)
aw88081_dev_set_volume(aw_dev, AW88081_MUTE_VOL);
}
static void aw88081_dev_mute(struct aw_device *aw_dev, bool is_mute)
{
if (is_mute) {
aw88081_dev_fade_out(aw_dev);
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_HMUTE_MASK, AW88081_HMUTE_ENABLE_VALUE);
} else {
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_HMUTE_MASK, AW88081_HMUTE_DISABLE_VALUE);
aw88081_dev_fade_in(aw_dev);
}
}
static void aw88081_dev_uls_hmute(struct aw_device *aw_dev, bool uls_hmute)
{
if (uls_hmute)
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_ULS_HMUTE_MASK,
AW88081_ULS_HMUTE_ENABLE_VALUE);
else
regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
~AW88081_ULS_HMUTE_MASK,
AW88081_ULS_HMUTE_DISABLE_VALUE);
}
static int aw88081_dev_reg_update(struct aw88081 *aw88081,
unsigned char *data, unsigned int len)
{
struct aw_device *aw_dev = aw88081->aw_pa;
struct aw_volume_desc *vol_desc = &aw_dev->volume_desc;
unsigned int read_vol;
int data_len, i, ret;
int16_t *reg_data;
u16 reg_val;
u8 reg_addr;
if (!len || !data) {
dev_err(aw_dev->dev, "reg data is null or len is 0");
return -EINVAL;
}
reg_data = (int16_t *)data;
data_len = len >> 1;
if (data_len & 0x1) {
dev_err(aw_dev->dev, "data len:%d unsupported", data_len);
return -EINVAL;
}
for (i = 0; i < data_len; i += 2) {
reg_addr = reg_data[i];
reg_val = reg_data[i + 1];
if (reg_addr == AW88081_SYSCTRL_REG) {
reg_val &= ~(~AW88081_EN_PA_MASK |
~AW88081_PWDN_MASK |
~AW88081_HMUTE_MASK |
~AW88081_ULS_HMUTE_MASK);
reg_val |= AW88081_EN_PA_POWER_DOWN_VALUE |
AW88081_PWDN_POWER_DOWN_VALUE |
AW88081_HMUTE_ENABLE_VALUE |
AW88081_ULS_HMUTE_ENABLE_VALUE;
}
if (reg_addr == AW88081_SYSCTRL2_REG) {
read_vol = (reg_val & (~AW88081_VOL_MASK)) >>
AW88081_VOL_START_BIT;
aw_dev->volume_desc.init_volume = read_vol;
}
/* i2stxen */
if (reg_addr == AW88081_I2SCTRL3_REG) {
/* close tx */
reg_val &= AW88081_I2STXEN_MASK;
reg_val |= AW88081_I2STXEN_DISABLE_VALUE;
}
ret = regmap_write(aw_dev->regmap, reg_addr, reg_val);
if (ret)
return ret;
}
if (aw_dev->prof_cur != aw_dev->prof_index)
vol_desc->ctl_volume = 0;
/* keep min volume */
aw88081_dev_set_volume(aw_dev, vol_desc->mute_volume);
return 0;
}
static int aw88081_dev_get_prof_name(struct aw_device *aw_dev, int index, char **prof_name)
{
struct aw_prof_info *prof_info = &aw_dev->prof_info;
struct aw_prof_desc *prof_desc;
if ((index >= aw_dev->prof_info.count) || (index < 0)) {
dev_err(aw_dev->dev, "index[%d] overflow count[%d]",
index, aw_dev->prof_info.count);
return -EINVAL;
}
prof_desc = &aw_dev->prof_info.prof_desc[index];
*prof_name = prof_info->prof_name_list[prof_desc->id];
return 0;
}
static int aw88081_dev_get_prof_data(struct aw_device *aw_dev, int index,
struct aw_prof_desc **prof_desc)
{
if ((index >= aw_dev->prof_info.count) || (index < 0)) {
dev_err(aw_dev->dev, "%s: index[%d] overflow count[%d]\n",
__func__, index, aw_dev->prof_info.count);
return -EINVAL;
}
*prof_desc = &aw_dev->prof_info.prof_desc[index];
return 0;
}
static int aw88081_dev_fw_update(struct aw88081 *aw88081)
{
struct aw_device *aw_dev = aw88081->aw_pa;
struct aw_prof_desc *prof_index_desc;
struct aw_sec_data_desc *sec_desc;
char *prof_name;
int ret;
ret = aw88081_dev_get_prof_name(aw_dev, aw_dev->prof_index, &prof_name);
if (ret) {
dev_err(aw_dev->dev, "get prof name failed");
return -EINVAL;
}
dev_dbg(aw_dev->dev, "start update %s", prof_name);
ret = aw88081_dev_get_prof_data(aw_dev, aw_dev->prof_index, &prof_index_desc);
if (ret)
return ret;
/* update reg */
sec_desc = prof_index_desc->sec_desc;
ret = aw88081_dev_reg_update(aw88081, sec_desc[AW88395_DATA_TYPE_REG].data,
sec_desc[AW88395_DATA_TYPE_REG].len);
if (ret) {
dev_err(aw_dev->dev, "update reg failed");
return ret;
}
aw_dev->prof_cur = aw_dev->prof_index;
return 0;
}
static int aw88081_dev_start(struct aw88081 *aw88081)
{
struct aw_device *aw_dev = aw88081->aw_pa;
int ret;
if (aw_dev->status == AW88081_DEV_PW_ON) {
dev_dbg(aw_dev->dev, "already power on");
return 0;
}
/* power on */
aw88081_dev_pwd(aw_dev, false);
usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
ret = aw88081_dev_check_syspll(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "pll check failed cannot start");
goto pll_check_fail;
}
/* amppd on */
aw88081_dev_amppd(aw_dev, false);
usleep_range(AW88081_1000_US, AW88081_1000_US + 50);
/* check i2s status */
ret = aw88081_dev_check_sysst(aw_dev);
if (ret) {
dev_err(aw_dev->dev, "sysst check failed");
goto sysst_check_fail;
}
/* enable tx feedback */
aw88081_dev_i2s_tx_enable(aw_dev, true);
/* close uls mute */
aw88081_dev_uls_hmute(aw_dev, false);
/* close mute */
aw88081_dev_mute(aw_dev, false);
/* clear inturrupt */
aw88081_dev_clear_int_status(aw_dev);
aw_dev->status = AW88081_DEV_PW_ON;
return 0;
sysst_check_fail:
aw88081_dev_i2s_tx_enable(aw_dev, false);
aw88081_dev_clear_int_status(aw_dev);
aw88081_dev_amppd(aw_dev, true);
pll_check_fail:
aw88081_dev_pwd(aw_dev, true);
aw_dev->status = AW88081_DEV_PW_OFF;
return ret;
}
static int aw88081_dev_stop(struct aw_device *aw_dev)
{
if (aw_dev->status == AW88081_DEV_PW_OFF) {
dev_dbg(aw_dev->dev, "already power off");
return 0;
}
aw_dev->status = AW88081_DEV_PW_OFF;
/* clear inturrupt */
aw88081_dev_clear_int_status(aw_dev);
aw88081_dev_uls_hmute(aw_dev, true);
/* set mute */
aw88081_dev_mute(aw_dev, true);
/* close tx feedback */
aw88081_dev_i2s_tx_enable(aw_dev, false);
usleep_range(AW88081_1000_US, AW88081_1000_US + 100);
/* enable amppd */
aw88081_dev_amppd(aw_dev, true);
/* set power down */
aw88081_dev_pwd(aw_dev, true);
return 0;
}
static int aw88081_reg_update(struct aw88081 *aw88081, bool force)
{
struct aw_device *aw_dev = aw88081->aw_pa;
int ret;
if (force) {
ret = regmap_write(aw_dev->regmap,
AW88081_ID_REG, AW88081_SOFT_RESET_VALUE);
if (ret)
return ret;
ret = aw88081_dev_fw_update(aw88081);
if (ret)
return ret;
} else {
if (aw_dev->prof_cur != aw_dev->prof_index) {
ret = aw88081_dev_fw_update(aw88081);
if (ret)
return ret;
}
}
aw_dev->prof_cur = aw_dev->prof_index;
return 0;
}
static void aw88081_start_pa(struct aw88081 *aw88081)
{
int ret, i;
for (i = 0; i < AW88081_START_RETRIES; i++) {
ret = aw88081_reg_update(aw88081, aw88081->phase_sync);
if (ret) {
dev_err(aw88081->aw_pa->dev, "fw update failed, cnt:%d\n", i);
continue;
}
ret = aw88081_dev_start(aw88081);
if (ret) {
dev_err(aw88081->aw_pa->dev, "aw88081 device start failed. retry = %d", i);
continue;
} else {
dev_dbg(aw88081->aw_pa->dev, "start success\n");
break;
}
}
}
static void aw88081_startup_work(struct work_struct *work)
{
struct aw88081 *aw88081 =
container_of(work, struct aw88081, start_work.work);
mutex_lock(&aw88081->lock);
aw88081_start_pa(aw88081);
mutex_unlock(&aw88081->lock);
}
static void aw88081_start(struct aw88081 *aw88081, bool sync_start)
{
if (aw88081->aw_pa->fw_status != AW88081_DEV_FW_OK)
return;
if (aw88081->aw_pa->status == AW88081_DEV_PW_ON)
return;
if (sync_start == AW88081_SYNC_START)
aw88081_start_pa(aw88081);
else
queue_delayed_work(system_wq,
&aw88081->start_work,
AW88081_START_WORK_DELAY_MS);
}
static struct snd_soc_dai_driver aw88081_dai[] = {
{
.name = "aw88081-aif",
.id = 1,
.playback = {
.stream_name = "Speaker_Playback",
.channels_min = 1,
.channels_max = 2,
.rates = AW88081_RATES,
.formats = AW88081_FORMATS,
},
.capture = {
.stream_name = "Speaker_Capture",
.channels_min = 1,
.channels_max = 2,
.rates = AW88081_RATES,
.formats = AW88081_FORMATS,
},
},
};
static int aw88081_get_fade_in_time(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
struct aw_device *aw_dev = aw88081->aw_pa;
ucontrol->value.integer.value[0] = aw_dev->fade_in_time;
return 0;
}
static int aw88081_set_fade_in_time(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct aw_device *aw_dev = aw88081->aw_pa;
int time;
time = ucontrol->value.integer.value[0];
if (time < mc->min || time > mc->max)
return -EINVAL;
if (time != aw_dev->fade_in_time) {
aw_dev->fade_in_time = time;
return 1;
}
return 0;
}
static int aw88081_get_fade_out_time(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
struct aw_device *aw_dev = aw88081->aw_pa;
ucontrol->value.integer.value[0] = aw_dev->fade_out_time;
return 0;
}
static int aw88081_set_fade_out_time(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct aw_device *aw_dev = aw88081->aw_pa;
int time;
time = ucontrol->value.integer.value[0];
if (time < mc->min || time > mc->max)
return -EINVAL;
if (time != aw_dev->fade_out_time) {
aw_dev->fade_out_time = time;
return 1;
}
return 0;
}
static int aw88081_dev_set_profile_index(struct aw_device *aw_dev, int index)
{
/* check the index whether is valid */
if ((index >= aw_dev->prof_info.count) || (index < 0))
return -EINVAL;
/* check the index whether change */
if (aw_dev->prof_index == index)
return -EPERM;
aw_dev->prof_index = index;
return 0;
}
static int aw88081_profile_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
char *prof_name;
int count, ret;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
count = aw88081->aw_pa->prof_info.count;
if (count <= 0) {
uinfo->value.enumerated.items = 0;
return 0;
}
uinfo->value.enumerated.items = count;
if (uinfo->value.enumerated.item >= count)
uinfo->value.enumerated.item = count - 1;
count = uinfo->value.enumerated.item;
ret = aw88081_dev_get_prof_name(aw88081->aw_pa, count, &prof_name);
if (ret) {
strscpy(uinfo->value.enumerated.name, "null",
sizeof(uinfo->value.enumerated.name));
return 0;
}
strscpy(uinfo->value.enumerated.name, prof_name, sizeof(uinfo->value.enumerated.name));
return 0;
}
static int aw88081_profile_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
ucontrol->value.integer.value[0] = aw88081->aw_pa->prof_index;
return 0;
}
static int aw88081_profile_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
int ret;
/* pa stop or stopping just set profile */
mutex_lock(&aw88081->lock);
ret = aw88081_dev_set_profile_index(aw88081->aw_pa, ucontrol->value.integer.value[0]);
if (ret) {
dev_dbg(codec->dev, "profile index does not change");
mutex_unlock(&aw88081->lock);
return 0;
}
if (aw88081->aw_pa->status) {
aw88081_dev_stop(aw88081->aw_pa);
aw88081_start(aw88081, AW88081_SYNC_START);
}
mutex_unlock(&aw88081->lock);
return 1;
}
static int aw88081_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
struct aw_volume_desc *vol_desc = &aw88081->aw_pa->volume_desc;
ucontrol->value.integer.value[0] = vol_desc->ctl_volume;
return 0;
}
static int aw88081_volume_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
struct aw_volume_desc *vol_desc = &aw88081->aw_pa->volume_desc;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int value;
value = ucontrol->value.integer.value[0];
if (value < mc->min || value > mc->max)
return -EINVAL;
if (vol_desc->ctl_volume != value) {
vol_desc->ctl_volume = value;
aw88081_dev_set_volume(aw88081->aw_pa, vol_desc->ctl_volume);
return 1;
}
return 0;
}
static int aw88081_get_fade_step(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
ucontrol->value.integer.value[0] = aw88081->aw_pa->fade_step;
return 0;
}
static int aw88081_set_fade_step(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(codec);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int value;
value = ucontrol->value.integer.value[0];
if (value < mc->min || value > mc->max)
return -EINVAL;
if (aw88081->aw_pa->fade_step != value) {
aw88081->aw_pa->fade_step = value;
return 1;
}
return 0;
}
static const struct snd_kcontrol_new aw88081_controls[] = {
SOC_SINGLE_EXT("PCM Playback Volume", AW88081_SYSCTRL2_REG,
0, AW88081_MUTE_VOL, 0, aw88081_volume_get,
aw88081_volume_set),
SOC_SINGLE_EXT("Fade Step", 0, 0, AW88081_MUTE_VOL, 0,
aw88081_get_fade_step, aw88081_set_fade_step),
SOC_SINGLE_EXT("Volume Ramp Up Step", 0, 0, FADE_TIME_MAX, 0,
aw88081_get_fade_in_time, aw88081_set_fade_in_time),
SOC_SINGLE_EXT("Volume Ramp Down Step", 0, 0, FADE_TIME_MAX, 0,
aw88081_get_fade_out_time, aw88081_set_fade_out_time),
AW88081_PROFILE_EXT("Profile Set", aw88081_profile_info,
aw88081_profile_get, aw88081_profile_set),
};
static void aw88081_parse_channel_dt(struct aw88081 *aw88081)
{
struct aw_device *aw_dev = aw88081->aw_pa;
struct device_node *np = aw_dev->dev->of_node;
u32 channel_value = AW88081_DEV_DEFAULT_CH;
of_property_read_u32(np, "awinic,audio-channel", &channel_value);
aw88081->phase_sync = of_property_read_bool(np, "awinic,sync-flag");
aw_dev->channel = channel_value;
}
static int aw88081_init(struct aw88081 *aw88081, struct i2c_client *i2c, struct regmap *regmap)
{
struct aw_device *aw_dev;
unsigned int chip_id;
int ret;
/* read chip id */
ret = regmap_read(regmap, AW88081_ID_REG, &chip_id);
if (ret) {
dev_err(&i2c->dev, "%s read chipid error. ret = %d", __func__, ret);
return ret;
}
if (chip_id != AW88081_CHIP_ID) {
dev_err(&i2c->dev, "unsupported device");
return -ENXIO;
}
dev_dbg(&i2c->dev, "chip id = %x\n", chip_id);
aw_dev = devm_kzalloc(&i2c->dev, sizeof(*aw_dev), GFP_KERNEL);
if (!aw_dev)
return -ENOMEM;
aw88081->aw_pa = aw_dev;
aw_dev->i2c = i2c;
aw_dev->regmap = regmap;
aw_dev->dev = &i2c->dev;
aw_dev->chip_id = AW88081_CHIP_ID;
aw_dev->acf = NULL;
aw_dev->prof_info.prof_desc = NULL;
aw_dev->prof_info.prof_type = AW88395_DEV_NONE_TYPE_ID;
aw_dev->fade_step = AW88081_VOLUME_STEP_DB;
aw_dev->volume_desc.mute_volume = AW88081_MUTE_VOL;
aw88081_parse_channel_dt(aw88081);
return 0;
}
static int aw88081_dev_init(struct aw88081 *aw88081, struct aw_container *aw_cfg)
{
struct aw_device *aw_dev = aw88081->aw_pa;
int ret;
ret = aw88395_dev_cfg_load(aw_dev, aw_cfg);
if (ret) {
dev_err(aw_dev->dev, "aw_dev acf parse failed");
return -EINVAL;
}
ret = regmap_write(aw_dev->regmap, AW88081_ID_REG, AW88081_SOFT_RESET_VALUE);
if (ret)
return ret;
aw_dev->fade_in_time = AW88081_500_US;
aw_dev->fade_out_time = AW88081_500_US;
aw_dev->prof_cur = AW88081_INIT_PROFILE;
aw_dev->prof_index = AW88081_INIT_PROFILE;
ret = aw88081_dev_fw_update(aw88081);
if (ret) {
dev_err(aw_dev->dev, "fw update failed ret = %d\n", ret);
return ret;
}
aw88081_dev_clear_int_status(aw_dev);
aw88081_dev_uls_hmute(aw_dev, true);
aw88081_dev_mute(aw_dev, true);
usleep_range(AW88081_5000_US, AW88081_5000_US + 10);
aw88081_dev_i2s_tx_enable(aw_dev, false);
usleep_range(AW88081_1000_US, AW88081_1000_US + 100);
aw88081_dev_amppd(aw_dev, true);
aw88081_dev_pwd(aw_dev, true);
return 0;
}
static int aw88081_request_firmware_file(struct aw88081 *aw88081)
{
const struct firmware *cont = NULL;
int ret;
aw88081->aw_pa->fw_status = AW88081_DEV_FW_FAILED;
ret = request_firmware(&cont, AW88081_ACF_FILE, aw88081->aw_pa->dev);
if (ret)
return ret;
dev_dbg(aw88081->aw_pa->dev, "loaded %s - size: %zu\n",
AW88081_ACF_FILE, cont ? cont->size : 0);
aw88081->aw_cfg = devm_kzalloc(aw88081->aw_pa->dev, cont->size + sizeof(int), GFP_KERNEL);
if (!aw88081->aw_cfg) {
release_firmware(cont);
return -ENOMEM;
}
aw88081->aw_cfg->len = (int)cont->size;
memcpy(aw88081->aw_cfg->data, cont->data, cont->size);
release_firmware(cont);
ret = aw88395_dev_load_acf_check(aw88081->aw_pa, aw88081->aw_cfg);
if (ret)
return ret;
mutex_lock(&aw88081->lock);
ret = aw88081_dev_init(aw88081, aw88081->aw_cfg);
mutex_unlock(&aw88081->lock);
return ret;
}
static int aw88081_playback_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
mutex_lock(&aw88081->lock);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
aw88081_start(aw88081, AW88081_ASYNC_START);
break;
case SND_SOC_DAPM_POST_PMD:
aw88081_dev_stop(aw88081->aw_pa);
break;
default:
break;
}
mutex_unlock(&aw88081->lock);
return 0;
}
static const struct snd_soc_dapm_widget aw88081_dapm_widgets[] = {
/* playback */
SND_SOC_DAPM_AIF_IN_E("AIF_RX", "Speaker_Playback", 0, SND_SOC_NOPM, 0, 0,
aw88081_playback_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_OUTPUT("DAC Output"),
/* capture */
SND_SOC_DAPM_AIF_OUT("AIF_TX", "Speaker_Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_INPUT("ADC Input"),
};
static const struct snd_soc_dapm_route aw88081_audio_map[] = {
{"DAC Output", NULL, "AIF_RX"},
{"AIF_TX", NULL, "ADC Input"},
};
static int aw88081_codec_probe(struct snd_soc_component *component)
{
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(component);
int ret;
INIT_DELAYED_WORK(&aw88081->start_work, aw88081_startup_work);
ret = aw88081_request_firmware_file(aw88081);
if (ret)
dev_err(aw88081->aw_pa->dev, "%s: request firmware failed\n", __func__);
return ret;
}
static void aw88081_codec_remove(struct snd_soc_component *aw_codec)
{
struct aw88081 *aw88081 = snd_soc_component_get_drvdata(aw_codec);
cancel_delayed_work_sync(&aw88081->start_work);
}
static const struct snd_soc_component_driver soc_codec_dev_aw88081 = {
.probe = aw88081_codec_probe,
.remove = aw88081_codec_remove,
.dapm_widgets = aw88081_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(aw88081_dapm_widgets),
.dapm_routes = aw88081_audio_map,
.num_dapm_routes = ARRAY_SIZE(aw88081_audio_map),
.controls = aw88081_controls,
.num_controls = ARRAY_SIZE(aw88081_controls),
};
static int aw88081_i2c_probe(struct i2c_client *i2c)
{
struct aw88081 *aw88081;
int ret;
ret = i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C);
if (!ret)
return dev_err_probe(&i2c->dev, -ENXIO, "check_functionality failed");
aw88081 = devm_kzalloc(&i2c->dev, sizeof(*aw88081), GFP_KERNEL);
if (!aw88081)
return -ENOMEM;
mutex_init(&aw88081->lock);
i2c_set_clientdata(i2c, aw88081);
aw88081->regmap = devm_regmap_init_i2c(i2c, &aw88081_regmap_config);
if (IS_ERR(aw88081->regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(aw88081->regmap),
"failed to init regmap\n");
/* aw pa init */
ret = aw88081_init(aw88081, i2c, aw88081->regmap);
if (ret)
return ret;
return devm_snd_soc_register_component(&i2c->dev,
&soc_codec_dev_aw88081,
aw88081_dai, ARRAY_SIZE(aw88081_dai));
}
static const struct i2c_device_id aw88081_i2c_id[] = {
{ AW88081_I2C_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, aw88081_i2c_id);
static struct i2c_driver aw88081_i2c_driver = {
.driver = {
.name = AW88081_I2C_NAME,
},
.probe = aw88081_i2c_probe,
.id_table = aw88081_i2c_id,
};
module_i2c_driver(aw88081_i2c_driver);
MODULE_DESCRIPTION("ASoC AW88081 Smart PA Driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
/*
* This contains functions for filename crypto management
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*
* Written by Uday Savagaonkar, 2014.
* Modified by Jaegeuk Kim, 2015.
*
* This has not yet undergone a rigorous security audit.
*/
#include <linux/namei.h>
#include <linux/scatterlist.h>
#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
/*
* The minimum message length (input and output length), in bytes, for all
* filenames encryption modes. Filenames shorter than this will be zero-padded
* before being encrypted.
*/
#define FSCRYPT_FNAME_MIN_MSG_LEN 16
/*
* struct fscrypt_nokey_name - identifier for directory entry when key is absent
*
* When userspace lists an encrypted directory without access to the key, the
* filesystem must present a unique "no-key name" for each filename that allows
* it to find the directory entry again if requested. Naively, that would just
* mean using the ciphertext filenames. However, since the ciphertext filenames
* can contain illegal characters ('\0' and '/'), they must be encoded in some
* way. We use base64url. But that can cause names to exceed NAME_MAX (255
* bytes), so we also need to use a strong hash to abbreviate long names.
*
* The filesystem may also need another kind of hash, the "dirhash", to quickly
* find the directory entry. Since filesystems normally compute the dirhash
* over the on-disk filename (i.e. the ciphertext), it's not computable from
* no-key names that abbreviate the ciphertext using the strong hash to fit in
* NAME_MAX. It's also not computable if it's a keyed hash taken over the
* plaintext (but it may still be available in the on-disk directory entry);
* casefolded directories use this type of dirhash. At least in these cases,
* each no-key name must include the name's dirhash too.
*
* To meet all these requirements, we base64url-encode the following
* variable-length structure. It contains the dirhash, or 0's if the filesystem
* didn't provide one; up to 149 bytes of the ciphertext name; and for
* ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes.
*
* This ensures that each no-key name contains everything needed to find the
* directory entry again, contains only legal characters, doesn't exceed
* NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only
* take the performance hit of SHA-256 on very long filenames (which are rare).
*/
struct fscrypt_nokey_name {
u32 dirhash[2];
u8 bytes[149];
u8 sha256[SHA256_DIGEST_SIZE];
}; /* 189 bytes => 252 bytes base64url-encoded, which is <= NAME_MAX (255) */
/*
* Decoded size of max-size no-key name, i.e. a name that was abbreviated using
* the strong hash and thus includes the 'sha256' field. This isn't simply
* sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included.
*/
#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256)
/* Encoded size of max-size no-key name */
#define FSCRYPT_NOKEY_NAME_MAX_ENCODED \
FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX)
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
return is_dot_dotdot(str->name, str->len);
}
/**
* fscrypt_fname_encrypt() - encrypt a filename
* @inode: inode of the parent directory (for regular filenames)
* or of the symlink (for symlink targets). Key must already be
* set up.
* @iname: the filename to encrypt
* @out: (output) the encrypted filename
* @olen: size of the encrypted filename. It must be at least @iname->len.
* Any extra space is filled with NUL padding before encryption.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
u8 *out, unsigned int olen)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
struct scatterlist sg;
int res;
/*
* Copy the filename to the output buffer for encrypting in-place and
* pad it with the needed number of NUL bytes.
*/
if (WARN_ON_ONCE(olen < iname->len))
return -ENOBUFS;
memcpy(out, iname->name, iname->len);
memset(out + iname->len, 0, olen - iname->len);
/* Initialize the IV */
fscrypt_generate_iv(&iv, 0, ci);
/* Set up the encryption request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req)
return -ENOMEM;
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
sg_init_one(&sg, out, olen);
skcipher_request_set_crypt(req, &sg, &sg, olen, &iv);
/* Do the encryption */
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
fscrypt_err(inode, "Filename encryption failed: %d", res);
return res;
}
return 0;
}
EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt);
/**
* fname_decrypt() - decrypt a filename
* @inode: inode of the parent directory (for regular filenames)
* or of the symlink (for symlink targets)
* @iname: the encrypted filename to decrypt
* @oname: (output) the decrypted filename. The caller must have allocated
* enough space for this, e.g. using fscrypt_fname_alloc_buffer().
*
* Return: 0 on success, -errno on failure
*/
static int fname_decrypt(const struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
int res;
/* Allocate request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req)
return -ENOMEM;
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
/* Initialize IV */
fscrypt_generate_iv(&iv, 0, ci);
/* Create decryption request */
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
fscrypt_err(inode, "Filename decryption failed: %d", res);
return res;
}
oname->len = strnlen(oname->name, iname->len);
return 0;
}
static const char base64url_table[65] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
#define FSCRYPT_BASE64URL_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
/**
* fscrypt_base64url_encode() - base64url-encode some binary data
* @src: the binary data to encode
* @srclen: the length of @src in bytes
* @dst: (output) the base64url-encoded string. Not NUL-terminated.
*
* Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL
* and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't used,
* as it's unneeded and not required by the RFC. base64url is used instead of
* base64 to avoid the '/' character, which isn't allowed in filenames.
*
* Return: the length of the resulting base64url-encoded string in bytes.
* This will be equal to FSCRYPT_BASE64URL_CHARS(srclen).
*/
static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst)
{
u32 ac = 0;
int bits = 0;
int i;
char *cp = dst;
for (i = 0; i < srclen; i++) {
ac = (ac << 8) | src[i];
bits += 8;
do {
bits -= 6;
*cp++ = base64url_table[(ac >> bits) & 0x3f];
} while (bits >= 6);
}
if (bits)
*cp++ = base64url_table[(ac << (6 - bits)) & 0x3f];
return cp - dst;
}
/**
* fscrypt_base64url_decode() - base64url-decode a string
* @src: the string to decode. Doesn't need to be NUL-terminated.
* @srclen: the length of @src in bytes
* @dst: (output) the decoded binary data
*
* Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with
* URL and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't
* accepted, nor are non-encoding characters such as whitespace.
*
* This implementation hasn't been optimized for performance.
*
* Return: the length of the resulting decoded binary data in bytes,
* or -1 if the string isn't a valid base64url string.
*/
static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst)
{
u32 ac = 0;
int bits = 0;
int i;
u8 *bp = dst;
for (i = 0; i < srclen; i++) {
const char *p = strchr(base64url_table, src[i]);
if (p == NULL || src[i] == 0)
return -1;
ac = (ac << 6) | (p - base64url_table);
bits += 6;
if (bits >= 8) {
bits -= 8;
*bp++ = (u8)(ac >> bits);
}
}
if (ac & ((1 << bits) - 1))
return -1;
return bp - dst;
}
bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
u32 orig_len, u32 max_len,
u32 *encrypted_len_ret)
{
int padding = 4 << (fscrypt_policy_flags(policy) &
FSCRYPT_POLICY_FLAGS_PAD_MASK);
u32 encrypted_len;
if (orig_len > max_len)
return false;
encrypted_len = max_t(u32, orig_len, FSCRYPT_FNAME_MIN_MSG_LEN);
encrypted_len = round_up(encrypted_len, padding);
*encrypted_len_ret = min(encrypted_len, max_len);
return true;
}
/**
* fscrypt_fname_encrypted_size() - calculate length of encrypted filename
* @inode: parent inode of dentry name being encrypted. Key must
* already be set up.
* @orig_len: length of the original filename
* @max_len: maximum length to return
* @encrypted_len_ret: where calculated length should be returned (on success)
*
* Filenames that are shorter than the maximum length may have their lengths
* increased slightly by encryption, due to padding that is applied.
*
* Return: false if the orig_len is greater than max_len. Otherwise, true and
* fill out encrypted_len_ret with the length (up to max_len).
*/
bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
u32 max_len, u32 *encrypted_len_ret)
{
return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy,
orig_len, max_len,
encrypted_len_ret);
}
EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size);
/**
* fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames
* @max_encrypted_len: maximum length of encrypted filenames the buffer will be
* used to present
* @crypto_str: (output) buffer to allocate
*
* Allocate a buffer that is large enough to hold any decrypted or encoded
* filename (null-terminated), for the given maximum encrypted filename length.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
u32 max_presented_len = max_t(u32, FSCRYPT_NOKEY_NAME_MAX_ENCODED,
max_encrypted_len);
crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS);
if (!crypto_str->name)
return -ENOMEM;
crypto_str->len = max_presented_len;
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
/**
* fscrypt_fname_free_buffer() - free a buffer for presented filenames
* @crypto_str: the buffer to free
*
* Free a buffer that was allocated by fscrypt_fname_alloc_buffer().
*/
void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
if (!crypto_str)
return;
kfree(crypto_str->name);
crypto_str->name = NULL;
}
EXPORT_SYMBOL(fscrypt_fname_free_buffer);
/**
* fscrypt_fname_disk_to_usr() - convert an encrypted filename to
* user-presentable form
* @inode: inode of the parent directory (for regular filenames)
* or of the symlink (for symlink targets)
* @hash: first part of the name's dirhash, if applicable. This only needs to
* be provided if the filename is located in an indexed directory whose
* encryption key may be unavailable. Not needed for symlink targets.
* @minor_hash: second part of the name's dirhash, if applicable
* @iname: encrypted filename to convert. May also be "." or "..", which
* aren't actually encrypted.
* @oname: output buffer for the user-presentable filename. The caller must
* have allocated enough space for this, e.g. using
* fscrypt_fname_alloc_buffer().
*
* If the key is available, we'll decrypt the disk name. Otherwise, we'll
* encode it for presentation in fscrypt_nokey_name format.
* See struct fscrypt_nokey_name for details.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
const struct qstr qname = FSTR_TO_QSTR(iname);
struct fscrypt_nokey_name nokey_name;
u32 size; /* size of the unencoded no-key name */
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
return 0;
}
if (iname->len < FSCRYPT_FNAME_MIN_MSG_LEN)
return -EUCLEAN;
if (fscrypt_has_encryption_key(inode))
return fname_decrypt(inode, iname, oname);
/*
* Sanity check that struct fscrypt_nokey_name doesn't have padding
* between fields and that its encoded size never exceeds NAME_MAX.
*/
BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) !=
offsetof(struct fscrypt_nokey_name, bytes));
BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) !=
offsetof(struct fscrypt_nokey_name, sha256));
BUILD_BUG_ON(FSCRYPT_NOKEY_NAME_MAX_ENCODED > NAME_MAX);
nokey_name.dirhash[0] = hash;
nokey_name.dirhash[1] = minor_hash;
if (iname->len <= sizeof(nokey_name.bytes)) {
memcpy(nokey_name.bytes, iname->name, iname->len);
size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
} else {
memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes));
/* Compute strong hash of remaining part of name. */
sha256(&iname->name[sizeof(nokey_name.bytes)],
iname->len - sizeof(nokey_name.bytes),
nokey_name.sha256);
size = FSCRYPT_NOKEY_NAME_MAX;
}
oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size,
oname->name);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
/**
* fscrypt_setup_filename() - prepare to search a possibly encrypted directory
* @dir: the directory that will be searched
* @iname: the user-provided filename being searched for
* @lookup: 1 if we're allowed to proceed without the key because it's
* ->lookup() or we're finding the dir_entry for deletion; 0 if we cannot
* proceed without the key because we're going to create the dir_entry.
* @fname: the filename information to be filled in
*
* Given a user-provided filename @iname, this function sets @fname->disk_name
* to the name that would be stored in the on-disk directory entry, if possible.
* If the directory is unencrypted this is simply @iname. Else, if we have the
* directory's encryption key, then @iname is the plaintext, so we encrypt it to
* get the disk_name.
*
* Else, for keyless @lookup operations, @iname should be a no-key name, so we
* decode it to get the struct fscrypt_nokey_name. Non-@lookup operations will
* be impossible in this case, so we fail them with ENOKEY.
*
* If successful, fscrypt_free_filename() must be called later to clean up.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
struct fscrypt_nokey_name *nokey_name;
int ret;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
if (!IS_ENCRYPTED(dir) || fscrypt_is_dot_dotdot(iname)) {
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
return 0;
}
ret = fscrypt_get_encryption_info(dir, lookup);
if (ret)
return ret;
if (fscrypt_has_encryption_key(dir)) {
if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX,
&fname->crypto_buf.len))
return -ENAMETOOLONG;
fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
GFP_NOFS);
if (!fname->crypto_buf.name)
return -ENOMEM;
ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name,
fname->crypto_buf.len);
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
return 0;
}
if (!lookup)
return -ENOKEY;
fname->is_nokey_name = true;
/*
* We don't have the key and we are doing a lookup; decode the
* user-supplied name
*/
if (iname->len > FSCRYPT_NOKEY_NAME_MAX_ENCODED)
return -ENOENT;
fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL);
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
ret = fscrypt_base64url_decode(iname->name, iname->len,
fname->crypto_buf.name);
if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
(ret > offsetof(struct fscrypt_nokey_name, sha256) &&
ret != FSCRYPT_NOKEY_NAME_MAX)) {
ret = -ENOENT;
goto errout;
}
fname->crypto_buf.len = ret;
nokey_name = (void *)fname->crypto_buf.name;
fname->hash = nokey_name->dirhash[0];
fname->minor_hash = nokey_name->dirhash[1];
if (ret != FSCRYPT_NOKEY_NAME_MAX) {
/* The full ciphertext filename is available. */
fname->disk_name.name = nokey_name->bytes;
fname->disk_name.len =
ret - offsetof(struct fscrypt_nokey_name, bytes);
}
return 0;
errout:
kfree(fname->crypto_buf.name);
return ret;
}
EXPORT_SYMBOL(fscrypt_setup_filename);
/**
* fscrypt_match_name() - test whether the given name matches a directory entry
* @fname: the name being searched for
* @de_name: the name from the directory entry
* @de_name_len: the length of @de_name in bytes
*
* Normally @fname->disk_name will be set, and in that case we simply compare
* that to the name stored in the directory entry. The only exception is that
* if we don't have the key for an encrypted directory and the name we're
* looking for is very long, then we won't have the full disk_name and instead
* we'll need to match against a fscrypt_nokey_name that includes a strong hash.
*
* Return: %true if the name matches, otherwise %false.
*/
bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len)
{
const struct fscrypt_nokey_name *nokey_name =
(const void *)fname->crypto_buf.name;
u8 digest[SHA256_DIGEST_SIZE];
if (likely(fname->disk_name.name)) {
if (de_name_len != fname->disk_name.len)
return false;
return !memcmp(de_name, fname->disk_name.name, de_name_len);
}
if (de_name_len <= sizeof(nokey_name->bytes))
return false;
if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes)))
return false;
sha256(&de_name[sizeof(nokey_name->bytes)],
de_name_len - sizeof(nokey_name->bytes), digest);
return !memcmp(digest, nokey_name->sha256, sizeof(digest));
}
EXPORT_SYMBOL_GPL(fscrypt_match_name);
/**
* fscrypt_fname_siphash() - calculate the SipHash of a filename
* @dir: the parent directory
* @name: the filename to calculate the SipHash of
*
* Given a plaintext filename @name and a directory @dir which uses SipHash as
* its dirhash method and has had its fscrypt key set up, this function
* calculates the SipHash of that name using the directory's secret dirhash key.
*
* Return: the SipHash of @name using the hash key of @dir
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
const struct fscrypt_inode_info *ci = dir->i_crypt_info;
WARN_ON_ONCE(!ci->ci_dirhash_key_initialized);
return siphash(name->name, name->len, &ci->ci_dirhash_key);
}
EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
/*
* Validate dentries in encrypted directories to make sure we aren't potentially
* caching stale dentries after a key has been added.
*/
int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
int err;
int valid;
/*
* Plaintext names are always valid, since fscrypt doesn't support
* reverting to no-key names without evicting the directory's inode
* -- which implies eviction of the dentries in the directory.
*/
if (!(dentry->d_flags & DCACHE_NOKEY_NAME))
return 1;
/*
* No-key name; valid if the directory's key is still unavailable.
*
* Although fscrypt forbids rename() on no-key names, we still must use
* dget_parent() here rather than use ->d_parent directly. That's
* because a corrupted fs image may contain directory hard links, which
* the VFS handles by moving the directory's dentry tree in the dcache
* each time ->lookup() finds the directory and it already has a dentry
* elsewhere. Thus ->d_parent can be changing, and we must safely grab
* a reference to some ->d_parent to prevent it from being freed.
*/
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
/*
* Pass allow_unsupported=true, so that files with an unsupported
* encryption policy can be deleted.
*/
err = fscrypt_get_encryption_info(d_inode(dir), true);
valid = !fscrypt_has_encryption_key(d_inode(dir));
dput(dir);
if (err < 0)
return err;
return valid;
}
EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
|
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2017-2022 Toradex
*/
/ {
chosen {
stdout-path = "serial0:115200n8";
};
/* fixed crystal dedicated to mcp2515 */
clk16m: clk16m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <16000000>;
};
reg_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "3.3V";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
reg_5v0: regulator-5v0 {
compatible = "regulator-fixed";
regulator-name = "5V";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
reg_usbh_vbus: regulator-usbh-vbus {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh_reg>;
regulator-name = "VCC_USB[1-4]";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
vin-supply = <®_5v0>;
};
};
&adc1 {
status = "okay";
};
&ecspi1 {
status = "okay";
mcp2515: can@0 {
compatible = "microchip,mcp2515";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_can_int>;
reg = <0>;
clocks = <&clk16m>;
interrupt-parent = <&gpio2>;
interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
spi-max-frequency = <10000000>;
vdd-supply = <®_3v3>;
xceiver-supply = <®_5v0>;
status = "okay";
};
};
&i2c1 {
status = "okay";
/* M41T0M6 real time clock on carrier board */
m41t0m6: rtc@68 {
compatible = "st,m41t0";
reg = <0x68>;
};
};
/* PWM <A> */
&pwm4 {
status = "okay";
};
/* PWM <B> */
&pwm5 {
status = "okay";
};
/* PWM <C> */
&pwm6 {
status = "okay";
};
/* PWM <D> */
&pwm7 {
status = "okay";
};
&uart1 {
status = "okay";
};
&uart2 {
status = "okay";
};
&uart5 {
status = "okay";
};
&usbotg1 {
disable-over-current;
vbus-supply = <®_usbh_vbus>;
status = "okay";
};
&usbotg2 {
disable-over-current;
vbus-supply = <®_usbh_vbus>;
status = "okay";
};
&usdhc1 {
vmmc-supply = <®_3v3>;
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Hynitron cstxxx Touchscreen
*
* Copyright (c) 2022 Chris Morgan <[email protected]>
*
* This code is based on hynitron_core.c authored by Hynitron.
* Note that no datasheet was available, so much of these registers
* are undocumented. This is essentially a cleaned-up version of the
* vendor driver with support removed for hardware I cannot test and
* device-specific functions replated with generic functions wherever
* possible.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/unaligned.h>
/* Per chip data */
struct hynitron_ts_chip_data {
unsigned int max_touch_num;
u32 ic_chkcode;
int (*firmware_info)(struct i2c_client *client);
int (*bootloader_enter)(struct i2c_client *client);
int (*init_input)(struct i2c_client *client);
void (*report_touch)(struct i2c_client *client);
};
/* Data generic to all (supported and non-supported) controllers. */
struct hynitron_ts_data {
const struct hynitron_ts_chip_data *chip;
struct i2c_client *client;
struct input_dev *input_dev;
struct touchscreen_properties prop;
struct gpio_desc *reset_gpio;
};
/*
* Since I have no datasheet, these values are guessed and/or assumed
* based on observation and testing.
*/
#define CST3XX_FIRMWARE_INFO_START_CMD 0x01d1
#define CST3XX_FIRMWARE_INFO_END_CMD 0x09d1
#define CST3XX_FIRMWARE_CHK_CODE_REG 0xfcd1
#define CST3XX_FIRMWARE_VERSION_REG 0x08d2
#define CST3XX_FIRMWARE_VER_INVALID_VAL 0xa5a5a5a5
#define CST3XX_BOOTLDR_PROG_CMD 0xaa01a0
#define CST3XX_BOOTLDR_PROG_CHK_REG 0x02a0
#define CST3XX_BOOTLDR_CHK_VAL 0xac
#define CST3XX_TOUCH_DATA_PART_REG 0x00d0
#define CST3XX_TOUCH_DATA_FULL_REG 0x07d0
#define CST3XX_TOUCH_DATA_CHK_VAL 0xab
#define CST3XX_TOUCH_DATA_TOUCH_VAL 0x03
#define CST3XX_TOUCH_DATA_STOP_CMD 0xab00d0
#define CST3XX_TOUCH_COUNT_MASK GENMASK(6, 0)
/*
* Hard coded reset delay value of 20ms not IC dependent in
* vendor driver.
*/
static void hyn_reset_proc(struct i2c_client *client, int delay)
{
struct hynitron_ts_data *ts_data = i2c_get_clientdata(client);
gpiod_set_value_cansleep(ts_data->reset_gpio, 1);
msleep(20);
gpiod_set_value_cansleep(ts_data->reset_gpio, 0);
if (delay)
fsleep(delay * 1000);
}
static irqreturn_t hyn_interrupt_handler(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct hynitron_ts_data *ts_data = i2c_get_clientdata(client);
ts_data->chip->report_touch(client);
return IRQ_HANDLED;
}
/*
* The vendor driver would retry twice before failing to read or write
* to the i2c device.
*/
static int cst3xx_i2c_write(struct i2c_client *client,
unsigned char *buf, int len)
{
int ret;
int retries = 0;
while (retries < 2) {
ret = i2c_master_send(client, buf, len);
if (ret == len)
return 0;
if (ret <= 0)
retries++;
else
break;
}
return ret < 0 ? ret : -EIO;
}
static int cst3xx_i2c_read_register(struct i2c_client *client, u16 reg,
u8 *val, u16 len)
{
__le16 buf = cpu_to_le16(reg);
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = 2,
.buf = (u8 *)&buf,
},
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = val,
}
};
int err;
int ret;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret == ARRAY_SIZE(msgs))
return 0;
err = ret < 0 ? ret : -EIO;
dev_err(&client->dev, "Error reading %d bytes from 0x%04x: %d (%d)\n",
len, reg, err, ret);
return err;
}
static int cst3xx_firmware_info(struct i2c_client *client)
{
struct hynitron_ts_data *ts_data = i2c_get_clientdata(client);
int err;
u32 tmp;
unsigned char buf[4];
/*
* Tests suggest this command needed to read firmware regs.
*/
put_unaligned_le16(CST3XX_FIRMWARE_INFO_START_CMD, buf);
err = cst3xx_i2c_write(client, buf, 2);
if (err)
return err;
usleep_range(10000, 11000);
/*
* Read register for check-code to determine if device detected
* correctly.
*/
err = cst3xx_i2c_read_register(client, CST3XX_FIRMWARE_CHK_CODE_REG,
buf, 4);
if (err)
return err;
tmp = get_unaligned_le32(buf);
if ((tmp & 0xffff0000) != ts_data->chip->ic_chkcode) {
dev_err(&client->dev, "%s ic mismatch, chkcode is %u\n",
__func__, tmp);
return -ENODEV;
}
usleep_range(10000, 11000);
/* Read firmware version and test if firmware missing. */
err = cst3xx_i2c_read_register(client, CST3XX_FIRMWARE_VERSION_REG,
buf, 4);
if (err)
return err;
tmp = get_unaligned_le32(buf);
if (tmp == CST3XX_FIRMWARE_VER_INVALID_VAL) {
dev_err(&client->dev, "Device firmware missing\n");
return -ENODEV;
}
/*
* Tests suggest cmd required to exit reading firmware regs.
*/
put_unaligned_le16(CST3XX_FIRMWARE_INFO_END_CMD, buf);
err = cst3xx_i2c_write(client, buf, 2);
if (err)
return err;
usleep_range(5000, 6000);
return 0;
}
static int cst3xx_bootloader_enter(struct i2c_client *client)
{
int err;
u8 retry;
u32 tmp = 0;
unsigned char buf[3];
for (retry = 0; retry < 5; retry++) {
hyn_reset_proc(client, (7 + retry));
/* set cmd to enter program mode */
put_unaligned_le24(CST3XX_BOOTLDR_PROG_CMD, buf);
err = cst3xx_i2c_write(client, buf, 3);
if (err)
continue;
usleep_range(2000, 2500);
/* check whether in program mode */
err = cst3xx_i2c_read_register(client,
CST3XX_BOOTLDR_PROG_CHK_REG,
buf, 1);
if (err)
continue;
tmp = get_unaligned(buf);
if (tmp == CST3XX_BOOTLDR_CHK_VAL)
break;
}
if (tmp != CST3XX_BOOTLDR_CHK_VAL) {
dev_err(&client->dev, "%s unable to enter bootloader mode\n",
__func__);
return -ENODEV;
}
hyn_reset_proc(client, 40);
return 0;
}
static void cst3xx_report_contact(struct hynitron_ts_data *ts_data,
u8 id, unsigned int x, unsigned int y, u8 w)
{
input_mt_slot(ts_data->input_dev, id);
input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, 1);
touchscreen_report_pos(ts_data->input_dev, &ts_data->prop, x, y, true);
input_report_abs(ts_data->input_dev, ABS_MT_TOUCH_MAJOR, w);
}
static int cst3xx_finish_touch_read(struct i2c_client *client)
{
unsigned char buf[3];
int err;
put_unaligned_le24(CST3XX_TOUCH_DATA_STOP_CMD, buf);
err = cst3xx_i2c_write(client, buf, 3);
if (err) {
dev_err(&client->dev,
"send read touch info ending failed: %d\n", err);
return err;
}
return 0;
}
/*
* Handle events from IRQ. Note that for cst3xx it appears that IRQ
* fires continuously while touched, otherwise once every 1500ms
* when not touched (assume touchscreen waking up periodically).
* Note buffer is sized for 5 fingers, if more needed buffer must
* be increased. The buffer contains 5 bytes for each touch point,
* a touch count byte, a check byte, and then a second check byte after
* all other touch points.
*
* For example 1 touch would look like this:
* touch1[5]:touch_count[1]:chk_byte[1]
*
* 3 touches would look like this:
* touch1[5]:touch_count[1]:chk_byte[1]:touch2[5]:touch3[5]:chk_byte[1]
*/
static void cst3xx_touch_report(struct i2c_client *client)
{
struct hynitron_ts_data *ts_data = i2c_get_clientdata(client);
u8 buf[28];
u8 finger_id, sw, w;
unsigned int x, y;
unsigned int touch_cnt, end_byte;
unsigned int idx = 0;
unsigned int i;
int err;
/* Read and validate the first bits of input data. */
err = cst3xx_i2c_read_register(client, CST3XX_TOUCH_DATA_PART_REG,
buf, 28);
if (err ||
buf[6] != CST3XX_TOUCH_DATA_CHK_VAL ||
buf[0] == CST3XX_TOUCH_DATA_CHK_VAL) {
dev_err(&client->dev, "cst3xx touch read failure\n");
return;
}
/* Report to the device we're done reading the touch data. */
err = cst3xx_finish_touch_read(client);
if (err)
return;
touch_cnt = buf[5] & CST3XX_TOUCH_COUNT_MASK;
/*
* Check the check bit of the last touch slot. The check bit is
* always present after touch point 1 for valid data, and then
* appears as the last byte after all other touch data.
*/
if (touch_cnt > 1) {
end_byte = touch_cnt * 5 + 2;
if (buf[end_byte] != CST3XX_TOUCH_DATA_CHK_VAL) {
dev_err(&client->dev, "cst3xx touch read failure\n");
return;
}
}
/* Parse through the buffer to capture touch data. */
for (i = 0; i < touch_cnt; i++) {
x = ((buf[idx + 1] << 4) | ((buf[idx + 3] >> 4) & 0x0f));
y = ((buf[idx + 2] << 4) | (buf[idx + 3] & 0x0f));
w = (buf[idx + 4] >> 3);
sw = (buf[idx] & 0x0f) >> 1;
finger_id = (buf[idx] >> 4) & 0x0f;
/* Sanity check we don't have more fingers than we expect */
if (ts_data->chip->max_touch_num < finger_id) {
dev_err(&client->dev, "cst3xx touch read failure\n");
break;
}
/* sw value of 0 means no touch, 0x03 means touch */
if (sw == CST3XX_TOUCH_DATA_TOUCH_VAL)
cst3xx_report_contact(ts_data, finger_id, x, y, w);
idx += 5;
/* Skip the 2 bytes between point 1 and point 2 */
if (i == 0)
idx += 2;
}
input_mt_sync_frame(ts_data->input_dev);
input_sync(ts_data->input_dev);
}
static int cst3xx_input_dev_int(struct i2c_client *client)
{
struct hynitron_ts_data *ts_data = i2c_get_clientdata(client);
int err;
ts_data->input_dev = devm_input_allocate_device(&client->dev);
if (!ts_data->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
ts_data->input_dev->name = "Hynitron cst3xx Touchscreen";
ts_data->input_dev->phys = "input/ts";
ts_data->input_dev->id.bustype = BUS_I2C;
input_set_drvdata(ts_data->input_dev, ts_data);
input_set_capability(ts_data->input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(ts_data->input_dev, EV_ABS, ABS_MT_POSITION_Y);
input_set_abs_params(ts_data->input_dev, ABS_MT_TOUCH_MAJOR,
0, 255, 0, 0);
touchscreen_parse_properties(ts_data->input_dev, true, &ts_data->prop);
if (!ts_data->prop.max_x || !ts_data->prop.max_y) {
dev_err(&client->dev,
"Invalid x/y (%d, %d), using defaults\n",
ts_data->prop.max_x, ts_data->prop.max_y);
ts_data->prop.max_x = 1152;
ts_data->prop.max_y = 1920;
input_abs_set_max(ts_data->input_dev,
ABS_MT_POSITION_X, ts_data->prop.max_x);
input_abs_set_max(ts_data->input_dev,
ABS_MT_POSITION_Y, ts_data->prop.max_y);
}
err = input_mt_init_slots(ts_data->input_dev,
ts_data->chip->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (err) {
dev_err(&client->dev,
"Failed to initialize input slots: %d\n", err);
return err;
}
err = input_register_device(ts_data->input_dev);
if (err) {
dev_err(&client->dev,
"Input device registration failed: %d\n", err);
return err;
}
return 0;
}
static int hyn_probe(struct i2c_client *client)
{
struct hynitron_ts_data *ts_data;
int err;
ts_data = devm_kzalloc(&client->dev, sizeof(*ts_data), GFP_KERNEL);
if (!ts_data)
return -ENOMEM;
ts_data->client = client;
i2c_set_clientdata(client, ts_data);
ts_data->chip = device_get_match_data(&client->dev);
if (!ts_data->chip)
return -EINVAL;
ts_data->reset_gpio = devm_gpiod_get(&client->dev,
"reset", GPIOD_OUT_LOW);
err = PTR_ERR_OR_ZERO(ts_data->reset_gpio);
if (err) {
dev_err(&client->dev, "request reset gpio failed: %d\n", err);
return err;
}
hyn_reset_proc(client, 60);
err = ts_data->chip->bootloader_enter(client);
if (err < 0)
return err;
err = ts_data->chip->init_input(client);
if (err < 0)
return err;
err = ts_data->chip->firmware_info(client);
if (err < 0)
return err;
err = devm_request_threaded_irq(&client->dev, client->irq,
NULL, hyn_interrupt_handler,
IRQF_ONESHOT,
"Hynitron Touch Int", client);
if (err) {
dev_err(&client->dev, "failed to request IRQ: %d\n", err);
return err;
}
return 0;
}
static const struct hynitron_ts_chip_data cst3xx_data = {
.max_touch_num = 5,
.ic_chkcode = 0xcaca0000,
.firmware_info = &cst3xx_firmware_info,
.bootloader_enter = &cst3xx_bootloader_enter,
.init_input = &cst3xx_input_dev_int,
.report_touch = &cst3xx_touch_report,
};
static const struct i2c_device_id hyn_tpd_id[] = {
{ .name = "hynitron_ts" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(i2c, hyn_tpd_id);
static const struct of_device_id hyn_dt_match[] = {
{ .compatible = "hynitron,cst340", .data = &cst3xx_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, hyn_dt_match);
static struct i2c_driver hynitron_i2c_driver = {
.driver = {
.name = "Hynitron-TS",
.of_match_table = hyn_dt_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = hyn_tpd_id,
.probe = hyn_probe,
};
module_i2c_driver(hynitron_i2c_driver);
MODULE_AUTHOR("Chris Morgan");
MODULE_DESCRIPTION("Hynitron Touchscreen Driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Maxim Integrated DS1803 and similar digital potentiometer driver
* Copyright (c) 2016 Slawomir Stepien
* Copyright (c) 2022 Jagath Jog J
*
* Datasheet: https://datasheets.maximintegrated.com/en/ds/DS1803.pdf
* Datasheet: https://datasheets.maximintegrated.com/en/ds/DS3502.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* ds1803 2 256 10, 50, 100 0101xxx
* ds3502 1 128 10 01010xx
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#define DS1803_WIPER_0 0xA9
#define DS1803_WIPER_1 0xAA
#define DS3502_WR_IVR 0x00
enum ds1803_type {
DS1803_010,
DS1803_050,
DS1803_100,
DS3502,
};
struct ds1803_cfg {
int wipers;
int avail[3];
int kohms;
const struct iio_chan_spec *channels;
u8 num_channels;
int (*read)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val);
};
struct ds1803_data {
struct i2c_client *client;
const struct ds1803_cfg *cfg;
};
#define DS1803_CHANNEL(ch, addr) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.address = (addr), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec ds1803_channels[] = {
DS1803_CHANNEL(0, DS1803_WIPER_0),
DS1803_CHANNEL(1, DS1803_WIPER_1),
};
static const struct iio_chan_spec ds3502_channels[] = {
DS1803_CHANNEL(0, DS3502_WR_IVR),
};
static int ds1803_read(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
u8 result[ARRAY_SIZE(ds1803_channels)];
ret = i2c_master_recv(data->client, result, indio_dev->num_channels);
if (ret < 0)
return ret;
*val = result[chan->channel];
return ret;
}
static int ds3502_read(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
ret = i2c_smbus_read_byte_data(data->client, chan->address);
if (ret < 0)
return ret;
*val = ret;
return ret;
}
static const struct ds1803_cfg ds1803_cfg[] = {
[DS1803_010] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 10,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS1803_050] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 50,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS1803_100] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 100,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS3502] = {
.wipers = 1,
.avail = { 0, 1, 127 },
.kohms = 10,
.channels = ds3502_channels,
.num_channels = ARRAY_SIZE(ds3502_channels),
.read = ds3502_read,
},
};
static int ds1803_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = data->cfg->read(indio_dev, chan, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->avail[2]; /* Max wiper position */
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int ds1803_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
u8 addr = chan->address;
int max_pos = data->cfg->avail[2];
if (val2 != 0)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > max_pos || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return i2c_smbus_write_byte_data(data->client, addr, val);
}
static int ds1803_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type,
int *length, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
*vals = data->cfg->avail;
*length = ARRAY_SIZE(data->cfg->avail);
*type = IIO_VAL_INT;
return IIO_AVAIL_RANGE;
}
return -EINVAL;
}
static const struct iio_info ds1803_info = {
.read_raw = ds1803_read_raw,
.write_raw = ds1803_write_raw,
.read_avail = ds1803_read_avail,
};
static int ds1803_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ds1803_data *data;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
data->client = client;
data->cfg = i2c_get_match_data(client);
indio_dev->info = &ds1803_info;
indio_dev->channels = data->cfg->channels;
indio_dev->num_channels = data->cfg->num_channels;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static const struct of_device_id ds1803_dt_ids[] = {
{ .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
{ .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
{ .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
{ .compatible = "maxim,ds3502", .data = &ds1803_cfg[DS3502] },
{}
};
MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
static const struct i2c_device_id ds1803_id[] = {
{ "ds1803-010", (kernel_ulong_t)&ds1803_cfg[DS1803_010] },
{ "ds1803-050", (kernel_ulong_t)&ds1803_cfg[DS1803_050] },
{ "ds1803-100", (kernel_ulong_t)&ds1803_cfg[DS1803_100] },
{ "ds3502", (kernel_ulong_t)&ds1803_cfg[DS3502] },
{}
};
MODULE_DEVICE_TABLE(i2c, ds1803_id);
static struct i2c_driver ds1803_driver = {
.driver = {
.name = "ds1803",
.of_match_table = ds1803_dt_ids,
},
.probe = ds1803_probe,
.id_table = ds1803_id,
};
module_i2c_driver(ds1803_driver);
MODULE_AUTHOR("Slawomir Stepien <[email protected]>");
MODULE_AUTHOR("Jagath Jog J <[email protected]>");
MODULE_DESCRIPTION("DS1803 digital potentiometer");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Qualcomm Atheros, Inc
*
* Based on net/sched/sch_fq_codel.c
*/
#ifndef __NET_SCHED_FQ_IMPL_H
#define __NET_SCHED_FQ_IMPL_H
#include <net/fq.h>
/* functions that are embedded into includer */
static void
__fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
unsigned int bytes, unsigned int truesize)
{
struct fq_tin *tin = flow->tin;
int idx;
tin->backlog_bytes -= bytes;
tin->backlog_packets -= packets;
flow->backlog -= bytes;
fq->backlog -= packets;
fq->memory_usage -= truesize;
if (flow->backlog)
return;
if (flow == &tin->default_flow) {
list_del_init(&tin->tin_list);
return;
}
idx = flow - fq->flows;
__clear_bit(idx, fq->flows_bitmap);
}
static void fq_adjust_removal(struct fq *fq,
struct fq_flow *flow,
struct sk_buff *skb)
{
__fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
}
static struct sk_buff *fq_flow_dequeue(struct fq *fq,
struct fq_flow *flow)
{
struct sk_buff *skb;
lockdep_assert_held(&fq->lock);
skb = __skb_dequeue(&flow->queue);
if (!skb)
return NULL;
fq_adjust_removal(fq, flow, skb);
return skb;
}
static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
fq_skb_free_t free_func)
{
unsigned int packets = 0, bytes = 0, truesize = 0;
struct fq_tin *tin = flow->tin;
struct sk_buff *skb;
int pending;
lockdep_assert_held(&fq->lock);
pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
do {
skb = __skb_dequeue(&flow->queue);
if (!skb)
break;
packets++;
bytes += skb->len;
truesize += skb->truesize;
free_func(fq, tin, flow, skb);
} while (packets < pending);
__fq_adjust_removal(fq, flow, packets, bytes, truesize);
return packets;
}
static struct sk_buff *fq_tin_dequeue(struct fq *fq,
struct fq_tin *tin,
fq_tin_dequeue_t dequeue_func)
{
struct fq_flow *flow;
struct list_head *head;
struct sk_buff *skb;
lockdep_assert_held(&fq->lock);
begin:
head = &tin->new_flows;
if (list_empty(head)) {
head = &tin->old_flows;
if (list_empty(head))
return NULL;
}
flow = list_first_entry(head, struct fq_flow, flowchain);
if (flow->deficit <= 0) {
flow->deficit += fq->quantum;
list_move_tail(&flow->flowchain,
&tin->old_flows);
goto begin;
}
skb = dequeue_func(fq, tin, flow);
if (!skb) {
/* force a pass through old_flows to prevent starvation */
if ((head == &tin->new_flows) &&
!list_empty(&tin->old_flows)) {
list_move_tail(&flow->flowchain, &tin->old_flows);
} else {
list_del_init(&flow->flowchain);
flow->tin = NULL;
}
goto begin;
}
flow->deficit -= skb->len;
tin->tx_bytes += skb->len;
tin->tx_packets++;
return skb;
}
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
u32 hash = skb_get_hash(skb);
return reciprocal_scale(hash, fq->flows_cnt);
}
static struct fq_flow *fq_flow_classify(struct fq *fq,
struct fq_tin *tin, u32 idx,
struct sk_buff *skb)
{
struct fq_flow *flow;
lockdep_assert_held(&fq->lock);
flow = &fq->flows[idx];
if (flow->tin && flow->tin != tin) {
flow = &tin->default_flow;
tin->collisions++;
fq->collisions++;
}
if (!flow->tin)
tin->flows++;
return flow;
}
static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
{
struct fq_tin *tin;
struct fq_flow *flow = NULL;
u32 len = 0;
int i;
for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
struct fq_flow *cur = &fq->flows[i];
unsigned int cur_len;
cur_len = cur->backlog;
if (cur_len <= len)
continue;
flow = cur;
len = cur_len;
}
list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
unsigned int cur_len = tin->default_flow.backlog;
if (cur_len <= len)
continue;
flow = &tin->default_flow;
len = cur_len;
}
return flow;
}
static void fq_tin_enqueue(struct fq *fq,
struct fq_tin *tin, u32 idx,
struct sk_buff *skb,
fq_skb_free_t free_func)
{
struct fq_flow *flow;
struct sk_buff *next;
bool oom;
lockdep_assert_held(&fq->lock);
flow = fq_flow_classify(fq, tin, idx, skb);
if (!flow->backlog) {
if (flow != &tin->default_flow)
__set_bit(idx, fq->flows_bitmap);
else if (list_empty(&tin->tin_list))
list_add(&tin->tin_list, &fq->tin_backlog);
}
flow->tin = tin;
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
flow->backlog += skb->len;
tin->backlog_bytes += skb->len;
tin->backlog_packets++;
fq->memory_usage += skb->truesize;
fq->backlog++;
__skb_queue_tail(&flow->queue, skb);
}
if (list_empty(&flow->flowchain)) {
flow->deficit = fq->quantum;
list_add_tail(&flow->flowchain,
&tin->new_flows);
}
oom = (fq->memory_usage > fq->memory_limit);
while (fq->backlog > fq->limit || oom) {
flow = fq_find_fattest_flow(fq);
if (!flow)
return;
if (!fq_flow_drop(fq, flow, free_func))
return;
flow->tin->overlimit++;
fq->overlimit++;
if (oom) {
fq->overmemory++;
oom = (fq->memory_usage > fq->memory_limit);
}
}
}
static void fq_flow_filter(struct fq *fq,
struct fq_flow *flow,
fq_skb_filter_t filter_func,
void *filter_data,
fq_skb_free_t free_func)
{
struct fq_tin *tin = flow->tin;
struct sk_buff *skb, *tmp;
lockdep_assert_held(&fq->lock);
skb_queue_walk_safe(&flow->queue, skb, tmp) {
if (!filter_func(fq, tin, flow, skb, filter_data))
continue;
__skb_unlink(skb, &flow->queue);
fq_adjust_removal(fq, flow, skb);
free_func(fq, tin, flow, skb);
}
}
static void fq_tin_filter(struct fq *fq,
struct fq_tin *tin,
fq_skb_filter_t filter_func,
void *filter_data,
fq_skb_free_t free_func)
{
struct fq_flow *flow;
lockdep_assert_held(&fq->lock);
list_for_each_entry(flow, &tin->new_flows, flowchain)
fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
list_for_each_entry(flow, &tin->old_flows, flowchain)
fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
}
static void fq_flow_reset(struct fq *fq,
struct fq_flow *flow,
fq_skb_free_t free_func)
{
struct fq_tin *tin = flow->tin;
struct sk_buff *skb;
while ((skb = fq_flow_dequeue(fq, flow)))
free_func(fq, tin, flow, skb);
if (!list_empty(&flow->flowchain)) {
list_del_init(&flow->flowchain);
if (list_empty(&tin->new_flows) &&
list_empty(&tin->old_flows))
list_del_init(&tin->tin_list);
}
flow->tin = NULL;
WARN_ON_ONCE(flow->backlog);
}
static void fq_tin_reset(struct fq *fq,
struct fq_tin *tin,
fq_skb_free_t free_func)
{
struct list_head *head;
struct fq_flow *flow;
for (;;) {
head = &tin->new_flows;
if (list_empty(head)) {
head = &tin->old_flows;
if (list_empty(head))
break;
}
flow = list_first_entry(head, struct fq_flow, flowchain);
fq_flow_reset(fq, flow, free_func);
}
WARN_ON_ONCE(!list_empty(&tin->tin_list));
WARN_ON_ONCE(tin->backlog_bytes);
WARN_ON_ONCE(tin->backlog_packets);
}
static void fq_flow_init(struct fq_flow *flow)
{
INIT_LIST_HEAD(&flow->flowchain);
__skb_queue_head_init(&flow->queue);
}
static void fq_tin_init(struct fq_tin *tin)
{
INIT_LIST_HEAD(&tin->new_flows);
INIT_LIST_HEAD(&tin->old_flows);
INIT_LIST_HEAD(&tin->tin_list);
fq_flow_init(&tin->default_flow);
}
static int fq_init(struct fq *fq, int flows_cnt)
{
int i;
memset(fq, 0, sizeof(fq[0]));
spin_lock_init(&fq->lock);
INIT_LIST_HEAD(&fq->tin_backlog);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
if (!fq->flows)
return -ENOMEM;
fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
if (!fq->flows_bitmap) {
kvfree(fq->flows);
fq->flows = NULL;
return -ENOMEM;
}
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_init(&fq->flows[i]);
return 0;
}
static void fq_reset(struct fq *fq,
fq_skb_free_t free_func)
{
int i;
for (i = 0; i < fq->flows_cnt; i++)
fq_flow_reset(fq, &fq->flows[i], free_func);
kvfree(fq->flows);
fq->flows = NULL;
bitmap_free(fq->flows_bitmap);
fq->flows_bitmap = NULL;
}
#endif
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "soc/intel_dram.h"
static u32 read_reference_ts_freq(struct intel_uncore *uncore)
{
u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
base_freq *= 1000000;
frac_freq = ((ts_override &
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
frac_freq = 1000000 / (frac_freq + 1);
return base_freq + frac_freq;
}
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
u32 f25_mhz = 25000000;
u32 f38_4_mhz = 38400000;
u32 crystal_clock =
(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
switch (crystal_clock) {
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
return f24_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
return f19_2_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
return f38_4_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
return f25_mhz;
default:
MISSING_CASE(crystal_clock);
return 0;
}
}
static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
/*
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*
* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
* tells us which one we should use.
*/
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
freq = gen11_get_crystal_clock_freq(uncore, c0);
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
}
static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
{
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
}
static u32 gen6_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The PCU TSC counts 10ns increments; this timestamp
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
* rolling over every 1.5 hours).
*/
return 12500000;
}
static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* 63:32 increments every 1000 ns
* 31:0 mbz
*/
return 1000000000 / 1000;
}
static u32 g4x_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* 63:20 increments every 1/4 ns
* 19:0 mbz
*
* -> 63:32 increments every 1024 ns
*/
return 1000000000 / 1024;
}
static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The value in this register increments once every 16
* hclks." (through the “Clocking Configuration”
* (“CLKCFG”) MCHBAR register)
*
* Testing on actual hardware has shown there is no /16.
*/
return DIV_ROUND_CLOSEST(i9xx_fsb_freq(uncore->i915), 4) * 1000;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
{
if (GRAPHICS_VER(uncore->i915) >= 11)
return gen11_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 9)
return gen9_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 6)
return gen6_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) == 5)
return gen5_read_clock_frequency(uncore);
else if (IS_G4X(uncore->i915))
return g4x_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) == 4)
return gen4_read_clock_frequency(uncore);
else
return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
{
gt->clock_frequency = read_clock_frequency(gt->uncore);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
else if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
gt->clock_frequency / 1000,
gt->clock_period_ns,
div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
USEC_PER_SEC));
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
gt_err(gt, "GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
read_clock_frequency(gt->uncore));
}
}
#endif
static u64 div_u64_roundup(u64 nom, u32 den)
{
return div_u64(nom + den - 1, den);
}
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
* 8300) freezing up around GPU hangs. Looks as if even
* scheduling/timer interrupts start misbehaving if the RPS
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (GRAPHICS_VER(gt->i915) == 6)
val = div_u64_roundup(val, 25) * 25;
return val;
}
|
// SPDX-License-Identifier: ISC
/* Copyright (C) 2019 MediaTek Inc.
*
* Author: Roy Luo <[email protected]>
* Ryder Lee <[email protected]>
* Felix Fietkau <[email protected]>
* Lorenzo Bianconi <[email protected]>
*/
#include <linux/etherdevice.h>
#include "mt7615.h"
#include "mac.h"
#include "eeprom.h"
static void mt7615_pci_init_work(struct work_struct *work)
{
struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
mcu_work);
int i, ret;
ret = mt7615_mcu_init(dev);
for (i = 0; (ret == -EAGAIN) && (i < 10); i++) {
msleep(200);
ret = mt7615_mcu_init(dev);
}
if (ret)
return;
mt7615_init_work(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
u32 addr = mt7615_reg_map(dev, MT_EFUSE_BASE);
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)
return ret;
if (is_mt7663(&dev->mt76)) {
/* Reset RGU */
mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
}
ret = mt7615_dma_init(dev);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
int mt7615_register_device(struct mt7615_dev *dev)
{
int ret;
mt7615_init_device(dev);
INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mphy.leds.cdev.brightness_set = mt7615_led_set_brightness;
dev->mphy.leds.cdev.blink_set = mt7615_led_set_blink;
}
ret = mt7622_wmac_init(dev);
if (ret)
return ret;
ret = mt7615_init_hardware(dev);
if (ret)
return ret;
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
ret = mt7615_thermal_init(dev);
if (ret)
return ret;
ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
if (dev->dbdc_support) {
ret = mt7615_register_ext_phy(dev);
if (ret)
return ret;
}
return mt7615_init_debugfs(dev);
}
void mt7615_unregister_device(struct mt7615_dev *dev)
{
bool mcu_running;
mcu_running = mt7615_wait_for_mcu_init(dev);
mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
if (mcu_running)
mt7615_mcu_exit(dev);
mt7615_tx_token_put(dev);
mt7615_dma_cleanup(dev);
tasklet_disable(&dev->mt76.irq_tasklet);
mt76_free_device(&dev->mt76);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/clock-shx3.c
*
* SH-X3 support for the clock framework
*
* Copyright (C) 2006-2007 Renesas Technology Corp.
* Copyright (C) 2006-2007 Renesas Solutions Corp.
* Copyright (C) 2006-2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 16666666,
};
static unsigned long pll_recalc(struct clk *clk)
{
/* PLL1 has a fixed x72 multiplier. */
return clk->parent->rate * 72;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_P] = DIV4(0, 0x0f80, 0),
[DIV4_SHA] = DIV4(4, 0x0ff0, 0),
[DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc00030
#define MSTPCR1 0xffc00034
enum { MSTP027, MSTP026, MSTP025, MSTP024,
MSTP009, MSTP008, MSTP003, MSTP002,
MSTP001, MSTP000, MSTP119, MSTP105,
MSTP104, MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
[MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
[MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
[MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
/* MSTPCR1 */
[MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
[MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]),
CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Google Veyron Speedy Rev 1+ board device tree source
*
* Copyright 2015 Google, Inc
*/
/dts-v1/;
#include "rk3288-veyron-chromebook.dtsi"
#include "rk3288-veyron-broadcom-bluetooth.dtsi"
#include "../cros-ec-sbs.dtsi"
/ {
model = "Google Speedy";
compatible = "google,veyron-speedy-rev9", "google,veyron-speedy-rev8",
"google,veyron-speedy-rev7", "google,veyron-speedy-rev6",
"google,veyron-speedy-rev5", "google,veyron-speedy-rev4",
"google,veyron-speedy-rev3", "google,veyron-speedy-rev2",
"google,veyron-speedy", "google,veyron", "rockchip,rk3288";
};
&cpu_alert0 {
temperature = <65000>;
};
&cpu_alert1 {
temperature = <70000>;
};
&cpu_crit {
temperature = <90000>;
};
&edp {
/delete-property/pinctrl-names;
/delete-property/pinctrl-0;
force-hpd;
};
&gpu_alert0 {
temperature = <80000>;
};
&gpu_crit {
temperature = <90000>;
};
&rk808 {
pinctrl-names = "default";
pinctrl-0 = <&pmic_int_l>;
};
&sdmmc {
disable-wp;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_pin
&sdmmc_bus4>;
};
&vcc_5v {
enable-active-high;
gpio = <&gpio7 RK_PC5 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&drv_5v>;
};
&vcc50_hdmi {
enable-active-high;
gpio = <&gpio5 RK_PC3 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&vcc50_hdmi_en>;
};
&gpio0 {
gpio-line-names = "PMIC_SLEEP_AP",
"DDRIO_PWROFF",
"DDRIO_RETEN",
"TS3A227E_INT_L",
"PMIC_INT_L",
"PWR_KEY_L",
"AP_LID_INT_L",
"EC_IN_RW",
"AC_PRESENT_AP",
/*
* RECOVERY_SW_L is Chrome OS ABI. Schematics call
* it REC_MODE_L.
*/
"RECOVERY_SW_L",
"OTP_OUT",
"HOST1_PWR_EN",
"USBOTG_PWREN_H",
"AP_WARM_RESET_H",
"nFALUT2",
"I2C0_SDA_PMIC",
"I2C0_SCL_PMIC",
"SUSPEND_L",
"USB_INT";
};
&gpio2 {
gpio-line-names = "CONFIG0",
"CONFIG1",
"CONFIG2",
"",
"",
"",
"",
"CONFIG3",
"PWRLIMIT#_CPU",
"EMMC_RST_L",
"",
"",
"BL_PWR_EN",
"AVDD_1V8_DISP_EN";
};
&gpio3 {
gpio-line-names = "FLASH0_D0",
"FLASH0_D1",
"FLASH0_D2",
"FLASH0_D3",
"FLASH0_D4",
"FLASH0_D5",
"FLASH0_D6",
"FLASH0_D7",
"",
"",
"",
"",
"",
"",
"",
"",
"FLASH0_CS2/EMMC_CMD",
"",
"FLASH0_DQS/EMMC_CLKO";
};
&gpio4 {
gpio-line-names = "",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"UART0_RXD",
"UART0_TXD",
"UART0_CTS",
"UART0_RTS",
"SDIO0_D0",
"SDIO0_D1",
"SDIO0_D2",
"SDIO0_D3",
"SDIO0_CMD",
"SDIO0_CLK",
"BT_DEV_WAKE",
"",
"WIFI_ENABLE_H",
"BT_ENABLE_L",
"WIFI_HOST_WAKE",
"BT_HOST_WAKE";
};
&gpio5 {
gpio-line-names = "",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"SPI0_CLK",
"SPI0_CS0",
"SPI0_TXD",
"SPI0_RXD",
"",
"",
"",
"VCC50_HDMI_EN";
};
&gpio6 {
gpio-line-names = "I2S0_SCLK",
"I2S0_LRCK_RX",
"I2S0_LRCK_TX",
"I2S0_SDI",
"I2S0_SDO0",
"HP_DET_H",
"ALS_INT", /* not connected */
"INT_CODEC",
"I2S0_CLK",
"I2C2_SDA",
"I2C2_SCL",
"MICDET",
"",
"",
"",
"",
"SDMMC_D0",
"SDMMC_D1",
"SDMMC_D2",
"SDMMC_D3",
"SDMMC_CLK",
"SDMMC_CMD";
};
&gpio7 {
gpio-line-names = "LCDC_BL",
"PWM_LOG",
"BL_EN",
"TRACKPAD_INT",
"TPM_INT_H",
"SDMMC_DET_L",
/*
* AP_FLASH_WP_L is Chrome OS ABI. Schematics call
* it FW_WP_AP.
*/
"AP_FLASH_WP_L",
"EC_INT",
"CPU_NMI",
"DVS_OK",
"",
"EDP_HOTPLUG",
"DVS1",
"nFALUT1",
"LCD_EN",
"DVS2",
"VCC5V_GOOD_H",
"I2C4_SDA_TP",
"I2C4_SCL_TP",
"I2C5_SDA_HDMI",
"I2C5_SCL_HDMI",
"5V_DRV",
"UART2_RXD",
"UART2_TXD";
};
&gpio8 {
gpio-line-names = "RAM_ID0",
"RAM_ID1",
"RAM_ID2",
"RAM_ID3",
"I2C1_SDA_TPM",
"I2C1_SCL_TPM",
"SPI2_CLK",
"SPI2_CS0",
"SPI2_RXD",
"SPI2_TXD";
};
&pinctrl {
pinctrl-names = "default", "sleep";
pinctrl-0 = <
/* Common for sleep and wake, but no owners */
&ddr0_retention
&ddrio_pwroff
&global_pwroff
/* Wake only */
&suspend_l_wake
>;
pinctrl-1 = <
/* Common for sleep and wake, but no owners */
&ddr0_retention
&ddrio_pwroff
&global_pwroff
/* Sleep only */
&suspend_l_sleep
>;
buck-5v {
drv_5v: drv-5v {
rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
rockchip,pins = <5 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
};
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/align.h>
#include <linux/dma-mapping.h>
#include <linux/hisi_acc_qm.h>
#include <linux/module.h>
#include <linux/slab.h>
#define HISI_ACC_SGL_SGE_NR_MIN 1
#define HISI_ACC_SGL_NR_MAX 256
#define HISI_ACC_SGL_ALIGN_SIZE 64
#define HISI_ACC_MEM_BLOCK_NR 5
struct acc_hw_sge {
dma_addr_t buf;
void *page_ctrl;
__le32 len;
__le32 pad;
__le32 pad0;
__le32 pad1;
};
/* use default sgl head size 64B */
struct hisi_acc_hw_sgl {
dma_addr_t next_dma;
__le16 entry_sum_in_chain;
__le16 entry_sum_in_sgl;
__le16 entry_length_in_sgl;
__le16 pad0;
__le64 pad1[5];
struct hisi_acc_hw_sgl *next;
struct acc_hw_sge sge_entries[];
} __aligned(1);
struct hisi_acc_sgl_pool {
struct mem_block {
struct hisi_acc_hw_sgl *sgl;
dma_addr_t sgl_dma;
size_t size;
} mem_block[HISI_ACC_MEM_BLOCK_NR];
u32 sgl_num_per_block;
u32 block_num;
u32 count;
u32 sge_nr;
size_t sgl_size;
};
/**
* hisi_acc_create_sgl_pool() - Create a hw sgl pool.
* @dev: The device which hw sgl pool belongs to.
* @count: Count of hisi_acc_hw_sgl in pool.
* @sge_nr: The count of sge in hw_sgl
*
* This function creates a hw sgl pool, after this user can get hw sgl memory
* from it.
*/
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr)
{
u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
struct hisi_acc_sgl_pool *pool;
struct mem_block *block;
u32 i, j;
if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
return ERR_PTR(-EINVAL);
sgl_size = ALIGN(sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl),
HISI_ACC_SGL_ALIGN_SIZE);
/*
* the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/
block_size = 1 << (PAGE_SHIFT + MAX_PAGE_ORDER < 32 ?
PAGE_SHIFT + MAX_PAGE_ORDER : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
(remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
return ERR_PTR(-EINVAL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
block = pool->mem_block;
for (i = 0; i < block_num; i++) {
block[i].sgl = dma_alloc_coherent(dev, block_size,
&block[i].sgl_dma,
GFP_KERNEL);
if (!block[i].sgl) {
dev_err(dev, "Fail to allocate hw SG buffer!\n");
goto err_free_mem;
}
block[i].size = block_size;
}
if (remain_sgl > 0) {
block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
&block[i].sgl_dma,
GFP_KERNEL);
if (!block[i].sgl) {
dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
goto err_free_mem;
}
block[i].size = remain_sgl * sgl_size;
}
pool->sgl_num_per_block = sgl_num_per_block;
pool->block_num = remain_sgl ? block_num + 1 : block_num;
pool->count = count;
pool->sgl_size = sgl_size;
pool->sge_nr = sge_nr;
return pool;
err_free_mem:
for (j = 0; j < i; j++)
dma_free_coherent(dev, block_size, block[j].sgl,
block[j].sgl_dma);
kfree_sensitive(pool);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
/**
* hisi_acc_free_sgl_pool() - Free a hw sgl pool.
* @dev: The device which hw sgl pool belongs to.
* @pool: Pointer of pool.
*
* This function frees memory of a hw sgl pool.
*/
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
struct mem_block *block;
u32 i;
if (!dev || !pool)
return;
block = pool->mem_block;
for (i = 0; i < pool->block_num; i++)
dma_free_coherent(dev, block[i].size, block[i].sgl,
block[i].sgl_dma);
kfree(pool);
}
EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
u32 index, dma_addr_t *hw_sgl_dma)
{
struct mem_block *block;
u32 block_index, offset;
block = pool->mem_block;
block_index = index / pool->sgl_num_per_block;
offset = index % pool->sgl_num_per_block;
*hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
return (void *)block[block_index].sgl + pool->sgl_size * offset;
}
static void sg_map_to_hw_sg(struct scatterlist *sgl,
struct acc_hw_sge *hw_sge)
{
hw_sge->buf = sg_dma_address(sgl);
hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
hw_sge->page_ctrl = sg_virt(sgl);
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
var++;
hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
}
static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
{
hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
u16 entry_sum = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
int i;
for (i = 0; i < entry_sum; i++) {
hw_sge[i].page_ctrl = NULL;
hw_sge[i].buf = 0;
hw_sge[i].len = 0;
}
}
/**
* hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
* @dev: The device which hw sgl belongs to.
* @sgl: Scatterlist which will be mapped to hw sgl.
* @pool: Pool which hw sgl memory will be allocated in.
* @index: Index of hisi_acc_hw_sgl in pool.
* @hw_sgl_dma: The dma address of allocated hw sgl.
*
* This function builds hw sgl according input sgl, user can use hw_sgl_dma
* as src/dst in its BD. Only support single hw sgl currently.
*/
struct hisi_acc_hw_sgl *
hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sgl,
struct hisi_acc_sgl_pool *pool,
u32 index, dma_addr_t *hw_sgl_dma)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
unsigned int i, sg_n_mapped;
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
int sg_n, ret;
if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
if (!sg_n_mapped) {
dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
}
if (sg_n_mapped > pool->sge_nr) {
dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
ret = -EINVAL;
goto err_unmap;
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
dev_err(dev, "Get SGL error!\n");
ret = -ENOMEM;
goto err_unmap;
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
for_each_sg(sgl, sg, sg_n_mapped, i) {
sg_map_to_hw_sg(sg, curr_hw_sge);
inc_hw_sgl_sge(curr_hw_sgl);
curr_hw_sge++;
}
update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
err_unmap:
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
/**
* hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl.
* @dev: The device which hw sgl belongs to.
* @sgl: Related scatterlist.
* @hw_sgl: Virtual address of hw sgl.
*
* This function unmaps allocated hw sgl.
*/
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
struct hisi_acc_hw_sgl *hw_sgl)
{
if (!dev || !sgl || !hw_sgl)
return;
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*/
#include <dt-bindings/clock/imx6sx-clock.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/types.h>
#include "clk.h"
static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
static const char *ocram_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
static const char *audio_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "pll2_pfd2_396m", "pll3_pfd0_720m", "pll3_pfd1_540m", "pll2_bus", };
static const char *gpu_core_sels[] = { "pll3_pfd1_540m", "pll3_pfd0_720m", "pll2_bus", "pll2_pfd2_396m", };
static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
static const char *ldb_di1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
static const char *pcie_axi_sels[] = { "axi", "ahb", };
static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll5_video_div", "pll4_audio_div", };
static const char *qspi1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
static const char *perclk_sels[] = { "ipg", "osc", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *vid_sels[] = { "pll3_pfd1_540m", "pll3_usb_otg", "pll3_pfd3_454m", "pll4_audio_div", "pll5_video_div", };
static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", "dummy", };
static const char *uart_sels[] = { "pll3_80m", "osc", };
static const char *qspi2_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", "dummy", "dummy", };
static const char *enet_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
static const char *enet_sels[] = { "enet_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *m4_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "osc", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd3_454m", };
static const char *m4_sels[] = { "m4_pre_sel", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *eim_slow_sels[] = { "ocram", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *ecspi_sels[] = { "pll3_60m", "osc", };
static const char *lcdif1_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
static const char *lcdif1_sels[] = { "lcdif1_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *lcdif2_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd3_594m", "pll3_pfd1_540m", };
static const char *lcdif2_sels[] = { "lcdif2_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
static const char *display_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll3_usb_otg", "pll3_pfd1_540m", };
static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
static const char *cko1_sels[] = {
"dummy", "dummy", "dummy", "dummy",
"vadc", "ocram", "qspi2", "m4", "enet_ahb", "lcdif2_pix",
"lcdif1_pix", "ahb", "ipg", "perclk", "ckil", "pll4_audio_div",
};
static const char *cko2_sels[] = {
"dummy", "mmdc_p0_fast", "usdhc4", "usdhc1", "dummy", "wrck",
"ecspi_root", "dummy", "usdhc3", "pcie", "arm", "csi_core",
"display_axi", "dummy", "osc", "dummy", "dummy",
"usdhc2", "ssi1", "ssi2", "ssi3", "gpu_axi_podf", "dummy",
"can_podf", "lvds1_out", "qspi1", "esai_extal", "eim_slow",
"uart_serial", "spdif", "audio", "dummy",
};
static const char *cko_sels[] = { "cko1", "cko2", };
static const char *lvds_sels[] = {
"arm", "pll1_sys", "dummy", "dummy", "dummy", "dummy", "dummy", "pll5_video_div",
"dummy", "dummy", "pcie_ref_125m", "dummy", "usbphy1", "usbphy2",
};
static const char *pll_bypass_src_sels[] = { "osc", "lvds1_in", "lvds2_in", "dummy", };
static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
{ .val = 2, .div = 5, },
{ .val = 3, .div = 4, },
{ }
};
static const struct clk_div_table post_div_table[] = {
{ .val = 2, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 0, .div = 4, },
{ }
};
static const struct clk_div_table video_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 1, },
{ .val = 3, .div = 4, },
{ }
};
static u32 share_count_asrc;
static u32 share_count_audio;
static u32 share_count_esai;
static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
static u32 share_count_sai1;
static u32 share_count_sai2;
static void __init imx6sx_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
bool lcdif1_assigned_clk;
clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
IMX6SX_CLK_CLK_END), GFP_KERNEL);
if (WARN_ON(!clk_hw_data))
return;
clk_hw_data->num = IMX6SX_CLK_CLK_END;
hws = clk_hw_data->hws;
hws[IMX6SX_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
hws[IMX6SX_CLK_CKIL] = imx_get_clk_hw_by_name(ccm_node, "ckil");
hws[IMX6SX_CLK_OSC] = imx_get_clk_hw_by_name(ccm_node, "osc");
/* ipp_di clock is external input */
hws[IMX6SX_CLK_IPP_DI0] = imx_get_clk_hw_by_name(ccm_node, "ipp_di0");
hws[IMX6SX_CLK_IPP_DI1] = imx_get_clk_hw_by_name(ccm_node, "ipp_di1");
/* Clock source from external clock via CLK1/2 PAD */
hws[IMX6SX_CLK_ANACLK1] = imx_get_clk_hw_by_name(ccm_node, "anaclk1");
hws[IMX6SX_CLK_ANACLK2] = imx_get_clk_hw_by_name(ccm_node, "anaclk2");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
of_node_put(np);
hws[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
hws[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
/* type name parent_name base div_mask */
hws[IMX6SX_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f);
hws[IMX6SX_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1);
hws[IMX6SX_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3);
hws[IMX6SX_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f);
hws[IMX6SX_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f);
hws[IMX6SX_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3);
hws[IMX6SX_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3);
hws[IMX6SX_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
/* Do not bypass PLLs initially */
clk_set_parent(hws[IMX6SX_PLL1_BYPASS]->clk, hws[IMX6SX_CLK_PLL1]->clk);
clk_set_parent(hws[IMX6SX_PLL2_BYPASS]->clk, hws[IMX6SX_CLK_PLL2]->clk);
clk_set_parent(hws[IMX6SX_PLL3_BYPASS]->clk, hws[IMX6SX_CLK_PLL3]->clk);
clk_set_parent(hws[IMX6SX_PLL4_BYPASS]->clk, hws[IMX6SX_CLK_PLL4]->clk);
clk_set_parent(hws[IMX6SX_PLL5_BYPASS]->clk, hws[IMX6SX_CLK_PLL5]->clk);
clk_set_parent(hws[IMX6SX_PLL6_BYPASS]->clk, hws[IMX6SX_CLK_PLL6]->clk);
clk_set_parent(hws[IMX6SX_PLL7_BYPASS]->clk, hws[IMX6SX_CLK_PLL7]->clk);
hws[IMX6SX_CLK_PLL1_SYS] = imx_clk_hw_gate("pll1_sys", "pll1_bypass", base + 0x00, 13);
hws[IMX6SX_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
hws[IMX6SX_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
hws[IMX6SX_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
hws[IMX6SX_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
hws[IMX6SX_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
hws[IMX6SX_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
/*
* Bit 20 is the reserved and read-only bit, we do this only for:
* - Do nothing for usbphy clk_enable/disable
* - Keep refcount when do usbphy clk_enable/disable, in that case,
* the clk framework may need to enable/disable usbphy's parent
*/
hws[IMX6SX_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
hws[IMX6SX_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
/*
* usbphy*_gate needs to be on after system boots up, and software
* never needs to control it anymore.
*/
hws[IMX6SX_CLK_USBPHY1_GATE] = imx_clk_hw_gate("usbphy1_gate", "dummy", base + 0x10, 6);
hws[IMX6SX_CLK_USBPHY2_GATE] = imx_clk_hw_gate("usbphy2_gate", "dummy", base + 0x20, 6);
/* FIXME 100MHz is used for pcie ref for all imx6 pcie, excepted imx6q */
hws[IMX6SX_CLK_PCIE_REF] = imx_clk_hw_fixed_factor("pcie_ref", "pll6_enet", 1, 5);
hws[IMX6SX_CLK_PCIE_REF_125M] = imx_clk_hw_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
hws[IMX6SX_CLK_LVDS1_OUT] = imx_clk_hw_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12));
hws[IMX6SX_CLK_LVDS2_OUT] = imx_clk_hw_gate_exclusive("lvds2_out", "lvds2_sel", base + 0x160, 11, BIT(13));
hws[IMX6SX_CLK_LVDS1_IN] = imx_clk_hw_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10));
hws[IMX6SX_CLK_LVDS2_IN] = imx_clk_hw_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11));
hws[IMX6SX_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
base + 0xe0, 0, 2, 0, clk_enet_ref_table,
&imx_ccm_lock);
hws[IMX6SX_CLK_ENET2_REF] = clk_hw_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0,
base + 0xe0, 2, 2, 0, clk_enet_ref_table,
&imx_ccm_lock);
hws[IMX6SX_CLK_ENET2_REF_125M] = imx_clk_hw_gate("enet2_ref_125m", "enet2_ref", base + 0xe0, 20);
hws[IMX6SX_CLK_ENET_PTP_REF] = imx_clk_hw_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
hws[IMX6SX_CLK_ENET_PTP] = imx_clk_hw_gate("enet_ptp_25m", "enet_ptp_ref", base + 0xe0, 21);
/* name parent_name reg idx */
hws[IMX6SX_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
hws[IMX6SX_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
hws[IMX6SX_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
hws[IMX6SX_CLK_PLL2_PFD3] = imx_clk_hw_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3);
hws[IMX6SX_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
hws[IMX6SX_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
hws[IMX6SX_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
hws[IMX6SX_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
/* name parent_name mult div */
hws[IMX6SX_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
hws[IMX6SX_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
hws[IMX6SX_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
hws[IMX6SX_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
hws[IMX6SX_CLK_TWD] = imx_clk_hw_fixed_factor("twd", "arm", 1, 2);
hws[IMX6SX_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8);
hws[IMX6SX_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX6SX_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
hws[IMX6SX_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video",
CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
hws[IMX6SX_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
/* name reg shift width parent_names num_parents */
hws[IMX6SX_CLK_LVDS1_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
hws[IMX6SX_CLK_LVDS2_SEL] = imx_clk_hw_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
np = ccm_node;
base = of_iomap(np, 0);
WARN_ON(!base);
/* name reg shift width parent_names num_parents */
hws[IMX6SX_CLK_STEP] = imx_clk_hw_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
hws[IMX6SX_CLK_PLL1_SW] = imx_clk_hw_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
hws[IMX6SX_CLK_OCRAM_SEL] = imx_clk_hw_mux("ocram_sel", base + 0x14, 6, 2, ocram_sels, ARRAY_SIZE(ocram_sels));
hws[IMX6SX_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
hws[IMX6SX_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
hws[IMX6SX_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
hws[IMX6SX_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
hws[IMX6SX_CLK_PCIE_AXI_SEL] = imx_clk_hw_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
hws[IMX6SX_CLK_GPU_AXI_SEL] = imx_clk_hw_mux("gpu_axi_sel", base + 0x18, 8, 2, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
hws[IMX6SX_CLK_GPU_CORE_SEL] = imx_clk_hw_mux("gpu_core_sel", base + 0x18, 4, 2, gpu_core_sels, ARRAY_SIZE(gpu_core_sels));
hws[IMX6SX_CLK_EIM_SLOW_SEL] = imx_clk_hw_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
hws[IMX6SX_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SX_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SX_CLK_USDHC3_SEL] = imx_clk_hw_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SX_CLK_USDHC4_SEL] = imx_clk_hw_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
hws[IMX6SX_CLK_ENET_SEL] = imx_clk_hw_mux("enet_sel", base + 0x34, 9, 3, enet_sels, ARRAY_SIZE(enet_sels));
hws[IMX6SX_CLK_M4_PRE_SEL] = imx_clk_hw_mux("m4_pre_sel", base + 0x34, 6, 3, m4_pre_sels, ARRAY_SIZE(m4_pre_sels));
hws[IMX6SX_CLK_M4_SEL] = imx_clk_hw_mux("m4_sel", base + 0x34, 0, 3, m4_sels, ARRAY_SIZE(m4_sels));
hws[IMX6SX_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
hws[IMX6SX_CLK_LCDIF2_PRE_SEL] = imx_clk_hw_mux("lcdif2_pre_sel", base + 0x38, 6, 3, lcdif2_pre_sels, ARRAY_SIZE(lcdif2_pre_sels));
hws[IMX6SX_CLK_LCDIF2_SEL] = imx_clk_hw_mux("lcdif2_sel", base + 0x38, 0, 3, lcdif2_sels, ARRAY_SIZE(lcdif2_sels));
hws[IMX6SX_CLK_DISPLAY_SEL] = imx_clk_hw_mux("display_sel", base + 0x3c, 14, 2, display_sels, ARRAY_SIZE(display_sels));
hws[IMX6SX_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels));
hws[IMX6SX_CLK_CKO1_SEL] = imx_clk_hw_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
hws[IMX6SX_CLK_CKO2_SEL] = imx_clk_hw_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels));
hws[IMX6SX_CLK_CKO] = imx_clk_hw_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels));
hws[IMX6SX_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
hws[IMX6SX_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
hws[IMX6SX_CLK_LDB_DI1_SEL] = imx_clk_hw_mux("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels));
hws[IMX6SX_CLK_LDB_DI0_SEL] = imx_clk_hw_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
hws[IMX6SX_CLK_LCDIF1_PRE_SEL] = imx_clk_hw_mux_flags("lcdif1_pre_sel", base + 0x38, 15, 3, lcdif1_pre_sels, ARRAY_SIZE(lcdif1_pre_sels), CLK_SET_RATE_PARENT);
hws[IMX6SX_CLK_LCDIF1_SEL] = imx_clk_hw_mux_flags("lcdif1_sel", base + 0x38, 9, 3, lcdif1_sels, ARRAY_SIZE(lcdif1_sels), CLK_SET_RATE_PARENT);
/* name parent_name reg shift width */
hws[IMX6SX_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
hws[IMX6SX_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
hws[IMX6SX_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2);
hws[IMX6SX_CLK_GPU_CORE_PODF] = imx_clk_hw_divider("gpu_core_podf", "gpu_core_sel", base + 0x18, 29, 3);
hws[IMX6SX_CLK_GPU_AXI_PODF] = imx_clk_hw_divider("gpu_axi_podf", "gpu_axi_sel", base + 0x18, 26, 3);
hws[IMX6SX_CLK_LCDIF1_PODF] = imx_clk_hw_divider("lcdif1_podf", "lcdif1_pred", base + 0x18, 23, 3);
hws[IMX6SX_CLK_QSPI1_PODF] = imx_clk_hw_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
hws[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_hw_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
hws[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_hw_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3);
hws[IMX6SX_CLK_PERCLK] = imx_clk_hw_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_VID_PODF] = imx_clk_hw_divider("vid_podf", "vid_sel", base + 0x20, 24, 2);
hws[IMX6SX_CLK_CAN_PODF] = imx_clk_hw_divider("can_podf", "can_sel", base + 0x20, 2, 6);
hws[IMX6SX_CLK_USDHC4_PODF] = imx_clk_hw_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
hws[IMX6SX_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
hws[IMX6SX_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
hws[IMX6SX_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
hws[IMX6SX_CLK_UART_PODF] = imx_clk_hw_divider("uart_podf", "uart_sel", base + 0x24, 0, 6);
hws[IMX6SX_CLK_ESAI_PRED] = imx_clk_hw_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
hws[IMX6SX_CLK_ESAI_PODF] = imx_clk_hw_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
hws[IMX6SX_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
hws[IMX6SX_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
hws[IMX6SX_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
hws[IMX6SX_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
hws[IMX6SX_CLK_QSPI2_PRED] = imx_clk_hw_divider("qspi2_pred", "qspi2_sel", base + 0x2c, 18, 3);
hws[IMX6SX_CLK_QSPI2_PODF] = imx_clk_hw_divider("qspi2_podf", "qspi2_pred", base + 0x2c, 21, 6);
hws[IMX6SX_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
hws[IMX6SX_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
hws[IMX6SX_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
hws[IMX6SX_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
hws[IMX6SX_CLK_AUDIO_PRED] = imx_clk_hw_divider("audio_pred", "audio_sel", base + 0x30, 12, 3);
hws[IMX6SX_CLK_AUDIO_PODF] = imx_clk_hw_divider("audio_podf", "audio_pred", base + 0x30, 9, 3);
hws[IMX6SX_CLK_ENET_PODF] = imx_clk_hw_divider("enet_podf", "enet_pre_sel", base + 0x34, 12, 3);
hws[IMX6SX_CLK_M4_PODF] = imx_clk_hw_divider("m4_podf", "m4_sel", base + 0x34, 3, 3);
hws[IMX6SX_CLK_ECSPI_PODF] = imx_clk_hw_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
hws[IMX6SX_CLK_LCDIF1_PRED] = imx_clk_hw_divider("lcdif1_pred", "lcdif1_pre_sel", base + 0x38, 12, 3);
hws[IMX6SX_CLK_LCDIF2_PRED] = imx_clk_hw_divider("lcdif2_pred", "lcdif2_pre_sel", base + 0x38, 3, 3);
hws[IMX6SX_CLK_DISPLAY_PODF] = imx_clk_hw_divider("display_podf", "display_sel", base + 0x3c, 16, 3);
hws[IMX6SX_CLK_CSI_PODF] = imx_clk_hw_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
hws[IMX6SX_CLK_CKO1_PODF] = imx_clk_hw_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
hws[IMX6SX_CLK_CKO2_PODF] = imx_clk_hw_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
hws[IMX6SX_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
hws[IMX6SX_CLK_LDB_DI0_DIV_7] = imx_clk_hw_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
hws[IMX6SX_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
hws[IMX6SX_CLK_LDB_DI1_DIV_7] = imx_clk_hw_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
/* name reg shift width busy: reg, shift parent_names num_parents */
hws[IMX6SX_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
hws[IMX6SX_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
/* name parent_name reg shift width busy: reg, shift */
hws[IMX6SX_CLK_OCRAM_PODF] = imx_clk_hw_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0);
hws[IMX6SX_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
hws[IMX6SX_CLK_MMDC_PODF] = imx_clk_hw_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
hws[IMX6SX_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
/* name parent_name reg shift */
/* CCGR0 */
hws[IMX6SX_CLK_AIPS_TZ1] = imx_clk_hw_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_AIPS_TZ2] = imx_clk_hw_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_APBH_DMA] = imx_clk_hw_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
hws[IMX6SX_CLK_ASRC_MEM] = imx_clk_hw_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
hws[IMX6SX_CLK_ASRC_IPG] = imx_clk_hw_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
hws[IMX6SX_CLK_CAAM_MEM] = imx_clk_hw_gate2("caam_mem", "ahb", base + 0x68, 8);
hws[IMX6SX_CLK_CAAM_ACLK] = imx_clk_hw_gate2("caam_aclk", "ahb", base + 0x68, 10);
hws[IMX6SX_CLK_CAAM_IPG] = imx_clk_hw_gate2("caam_ipg", "ipg", base + 0x68, 12);
hws[IMX6SX_CLK_CAN1_IPG] = imx_clk_hw_gate2("can1_ipg", "ipg", base + 0x68, 14);
hws[IMX6SX_CLK_CAN1_SERIAL] = imx_clk_hw_gate2("can1_serial", "can_podf", base + 0x68, 16);
hws[IMX6SX_CLK_CAN2_IPG] = imx_clk_hw_gate2("can2_ipg", "ipg", base + 0x68, 18);
hws[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_hw_gate2("can2_serial", "can_podf", base + 0x68, 20);
hws[IMX6SX_CLK_DCIC1] = imx_clk_hw_gate2("dcic1", "display_podf", base + 0x68, 24);
hws[IMX6SX_CLK_DCIC2] = imx_clk_hw_gate2("dcic2", "display_podf", base + 0x68, 26);
hws[IMX6SX_CLK_AIPS_TZ3] = imx_clk_hw_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL);
/* CCGR1 */
hws[IMX6SX_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
hws[IMX6SX_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2);
hws[IMX6SX_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4);
hws[IMX6SX_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6);
hws[IMX6SX_CLK_ECSPI5] = imx_clk_hw_gate2("ecspi5", "ecspi_podf", base + 0x6c, 8);
hws[IMX6SX_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12);
hws[IMX6SX_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14);
hws[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_hw_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai);
hws[IMX6SX_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai);
hws[IMX6SX_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
hws[IMX6SX_CLK_WAKEUP] = imx_clk_hw_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_GPT_BUS] = imx_clk_hw_gate2("gpt_bus", "perclk", base + 0x6c, 20);
hws[IMX6SX_CLK_GPT_SERIAL] = imx_clk_hw_gate2("gpt_serial", "perclk", base + 0x6c, 22);
hws[IMX6SX_CLK_GPU] = imx_clk_hw_gate2("gpu", "gpu_core_podf", base + 0x6c, 26);
hws[IMX6SX_CLK_OCRAM_S] = imx_clk_hw_gate2("ocram_s", "ahb", base + 0x6c, 28);
hws[IMX6SX_CLK_CANFD] = imx_clk_hw_gate2("canfd", "can_podf", base + 0x6c, 30);
/* CCGR2 */
hws[IMX6SX_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x70, 2);
hws[IMX6SX_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6);
hws[IMX6SX_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8);
hws[IMX6SX_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10);
hws[IMX6SX_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12);
hws[IMX6SX_CLK_IOMUXC] = imx_clk_hw_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14);
hws[IMX6SX_CLK_IPMUX1] = imx_clk_hw_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_IPMUX2] = imx_clk_hw_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_IPMUX3] = imx_clk_hw_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_TZASC1] = imx_clk_hw_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif_apb", "display_podf", base + 0x70, 28);
hws[IMX6SX_CLK_PXP_AXI] = imx_clk_hw_gate2("pxp_axi", "display_podf", base + 0x70, 30);
/* CCGR3 */
hws[IMX6SX_CLK_M4] = imx_clk_hw_gate2("m4", "m4_podf", base + 0x74, 2);
hws[IMX6SX_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x74, 4);
hws[IMX6SX_CLK_ENET_AHB] = imx_clk_hw_gate2("enet_ahb", "enet_sel", base + 0x74, 4);
hws[IMX6SX_CLK_DISPLAY_AXI] = imx_clk_hw_gate2("display_axi", "display_podf", base + 0x74, 6);
hws[IMX6SX_CLK_LCDIF2_PIX] = imx_clk_hw_gate2("lcdif2_pix", "lcdif2_sel", base + 0x74, 8);
hws[IMX6SX_CLK_LCDIF1_PIX] = imx_clk_hw_gate2("lcdif1_pix", "lcdif1_sel", base + 0x74, 10);
hws[IMX6SX_CLK_LDB_DI0] = imx_clk_hw_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
hws[IMX6SX_CLK_QSPI1] = imx_clk_hw_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
hws[IMX6SX_CLK_MLB] = imx_clk_hw_gate2("mlb", "ahb", base + 0x74, 18);
hws[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_hw_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2_flags("mmdc_p1_ipg", "ipg", base + 0x74, 26, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_OCRAM] = imx_clk_hw_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
hws[IMX6SX_CLK_PCIE_AXI] = imx_clk_hw_gate2("pcie_axi", "display_podf", base + 0x78, 0);
hws[IMX6SX_CLK_QSPI2] = imx_clk_hw_gate2("qspi2", "qspi2_podf", base + 0x78, 10);
hws[IMX6SX_CLK_PER1_BCH] = imx_clk_hw_gate2("per1_bch", "usdhc3", base + 0x78, 12);
hws[IMX6SX_CLK_PER2_MAIN] = imx_clk_hw_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16);
hws[IMX6SX_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18);
hws[IMX6SX_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20);
hws[IMX6SX_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22);
hws[IMX6SX_CLK_GPMI_BCH_APB] = imx_clk_hw_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24);
hws[IMX6SX_CLK_GPMI_BCH] = imx_clk_hw_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
hws[IMX6SX_CLK_GPMI_IO] = imx_clk_hw_gate2("gpmi_io", "qspi2_podf", base + 0x78, 28);
hws[IMX6SX_CLK_GPMI_APB] = imx_clk_hw_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
/* CCGR5 */
hws[IMX6SX_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
hws[IMX6SX_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6);
hws[IMX6SX_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12);
hws[IMX6SX_CLK_AUDIO] = imx_clk_hw_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio);
hws[IMX6SX_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
hws[IMX6SX_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
hws[IMX6SX_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
hws[IMX6SX_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
hws[IMX6SX_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
hws[IMX6SX_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1);
hws[IMX6SX_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2);
hws[IMX6SX_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3);
hws[IMX6SX_CLK_UART_IPG] = imx_clk_hw_gate2("uart_ipg", "ipg", base + 0x7c, 24);
hws[IMX6SX_CLK_UART_SERIAL] = imx_clk_hw_gate2("uart_serial", "uart_podf", base + 0x7c, 26);
hws[IMX6SX_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1);
hws[IMX6SX_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2);
hws[IMX6SX_CLK_SAI1] = imx_clk_hw_gate2_shared("sai1", "ssi1_podf", base + 0x7c, 28, &share_count_sai1);
hws[IMX6SX_CLK_SAI2] = imx_clk_hw_gate2_shared("sai2", "ssi2_podf", base + 0x7c, 30, &share_count_sai2);
/* CCGR6 */
hws[IMX6SX_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0);
hws[IMX6SX_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
hws[IMX6SX_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
hws[IMX6SX_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
hws[IMX6SX_CLK_USDHC4] = imx_clk_hw_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
hws[IMX6SX_CLK_EIM_SLOW] = imx_clk_hw_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10);
hws[IMX6SX_CLK_PWM8] = imx_clk_hw_gate2("pwm8", "perclk", base + 0x80, 16);
hws[IMX6SX_CLK_VADC] = imx_clk_hw_gate2("vadc", "vid_podf", base + 0x80, 20);
hws[IMX6SX_CLK_GIS] = imx_clk_hw_gate2("gis", "display_podf", base + 0x80, 22);
hws[IMX6SX_CLK_I2C4] = imx_clk_hw_gate2("i2c4", "perclk", base + 0x80, 24);
hws[IMX6SX_CLK_PWM5] = imx_clk_hw_gate2("pwm5", "perclk", base + 0x80, 26);
hws[IMX6SX_CLK_PWM6] = imx_clk_hw_gate2("pwm6", "perclk", base + 0x80, 28);
hws[IMX6SX_CLK_PWM7] = imx_clk_hw_gate2("pwm7", "perclk", base + 0x80, 30);
hws[IMX6SX_CLK_CKO1] = imx_clk_hw_gate("cko1", "cko1_podf", base + 0x60, 7);
hws[IMX6SX_CLK_CKO2] = imx_clk_hw_gate("cko2", "cko2_podf", base + 0x60, 24);
/* mask handshake of mmdc */
imx_mmdc_mask_handshake(base, 0);
imx_check_clk_hws(hws, IMX6SX_CLK_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
clk_prepare_enable(hws[IMX6SX_CLK_USBPHY1_GATE]->clk);
clk_prepare_enable(hws[IMX6SX_CLK_USBPHY2_GATE]->clk);
}
/* Set the default 132MHz for EIM module */
clk_set_parent(hws[IMX6SX_CLK_EIM_SLOW_SEL]->clk, hws[IMX6SX_CLK_PLL2_PFD2]->clk);
clk_set_rate(hws[IMX6SX_CLK_EIM_SLOW]->clk, 132000000);
np = of_find_node_by_path("/soc/bus@2200000/spba-bus@2240000/lcdif@2220000");
lcdif1_assigned_clk = of_find_property(np, "assigned-clock-parents", NULL);
/* Set parent clock for LCDIF1 pixel clock if not done via devicetree */
if (!lcdif1_assigned_clk) {
clk_set_parent(hws[IMX6SX_CLK_LCDIF1_PRE_SEL]->clk,
hws[IMX6SX_CLK_PLL5_VIDEO_DIV]->clk);
clk_set_parent(hws[IMX6SX_CLK_LCDIF1_SEL]->clk,
hws[IMX6SX_CLK_LCDIF1_PODF]->clk);
}
/* Set the parent clks of PCIe lvds1 and pcie_axi to be pcie ref, axi */
if (clk_set_parent(hws[IMX6SX_CLK_LVDS1_SEL]->clk, hws[IMX6SX_CLK_PCIE_REF_125M]->clk))
pr_err("Failed to set pcie bus parent clk.\n");
/*
* Init enet system AHB clock, set to 200MHz
* pll2_pfd2_396m-> ENET_PODF-> ENET_AHB
*/
clk_set_parent(hws[IMX6SX_CLK_ENET_PRE_SEL]->clk, hws[IMX6SX_CLK_PLL2_PFD2]->clk);
clk_set_parent(hws[IMX6SX_CLK_ENET_SEL]->clk, hws[IMX6SX_CLK_ENET_PODF]->clk);
clk_set_rate(hws[IMX6SX_CLK_ENET_PODF]->clk, 200000000);
clk_set_rate(hws[IMX6SX_CLK_ENET_REF]->clk, 125000000);
clk_set_rate(hws[IMX6SX_CLK_ENET2_REF]->clk, 125000000);
/* Audio clocks */
clk_set_rate(hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk, 393216000);
clk_set_parent(hws[IMX6SX_CLK_SPDIF_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk);
clk_set_rate(hws[IMX6SX_CLK_SPDIF_PODF]->clk, 98304000);
clk_set_parent(hws[IMX6SX_CLK_AUDIO_SEL]->clk, hws[IMX6SX_CLK_PLL3_USB_OTG]->clk);
clk_set_rate(hws[IMX6SX_CLK_AUDIO_PODF]->clk, 24000000);
clk_set_parent(hws[IMX6SX_CLK_SSI1_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk);
clk_set_parent(hws[IMX6SX_CLK_SSI2_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk);
clk_set_parent(hws[IMX6SX_CLK_SSI3_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk);
clk_set_rate(hws[IMX6SX_CLK_SSI1_PODF]->clk, 24576000);
clk_set_rate(hws[IMX6SX_CLK_SSI2_PODF]->clk, 24576000);
clk_set_rate(hws[IMX6SX_CLK_SSI3_PODF]->clk, 24576000);
clk_set_parent(hws[IMX6SX_CLK_ESAI_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk);
clk_set_rate(hws[IMX6SX_CLK_ESAI_PODF]->clk, 24576000);
/* Set parent clock for vadc */
clk_set_parent(hws[IMX6SX_CLK_VID_SEL]->clk, hws[IMX6SX_CLK_PLL3_USB_OTG]->clk);
/* default parent of can_sel clock is invalid, manually set it here */
clk_set_parent(hws[IMX6SX_CLK_CAN_SEL]->clk, hws[IMX6SX_CLK_PLL3_60M]->clk);
/* Update gpu clock from default 528M to 720M */
clk_set_parent(hws[IMX6SX_CLK_GPU_CORE_SEL]->clk, hws[IMX6SX_CLK_PLL3_PFD0]->clk);
clk_set_parent(hws[IMX6SX_CLK_GPU_AXI_SEL]->clk, hws[IMX6SX_CLK_PLL3_PFD0]->clk);
clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
imx_register_uart_clocks();
}
CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
#if !defined(__EFCT_XPORT_H__)
#define __EFCT_XPORT_H__
enum efct_xport_ctrl {
EFCT_XPORT_PORT_ONLINE = 1,
EFCT_XPORT_PORT_OFFLINE,
EFCT_XPORT_SHUTDOWN,
EFCT_XPORT_POST_NODE_EVENT,
EFCT_XPORT_WWNN_SET,
EFCT_XPORT_WWPN_SET,
};
enum efct_xport_status {
EFCT_XPORT_PORT_STATUS,
EFCT_XPORT_CONFIG_PORT_STATUS,
EFCT_XPORT_LINK_SPEED,
EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
EFCT_XPORT_LINK_STATISTICS,
EFCT_XPORT_LINK_STAT_RESET,
EFCT_XPORT_IS_QUIESCED
};
struct efct_xport_link_stats {
bool rec;
bool gec;
bool w02of;
bool w03of;
bool w04of;
bool w05of;
bool w06of;
bool w07of;
bool w08of;
bool w09of;
bool w10of;
bool w11of;
bool w12of;
bool w13of;
bool w14of;
bool w15of;
bool w16of;
bool w17of;
bool w18of;
bool w19of;
bool w20of;
bool w21of;
bool clrc;
bool clof1;
u32 link_failure_error_count;
u32 loss_of_sync_error_count;
u32 loss_of_signal_error_count;
u32 primitive_sequence_error_count;
u32 invalid_transmission_word_error_count;
u32 crc_error_count;
u32 primitive_sequence_event_timeout_count;
u32 elastic_buffer_overrun_error_count;
u32 arbitration_fc_al_timeout_count;
u32 advertised_receive_bufftor_to_buffer_credit;
u32 current_receive_buffer_to_buffer_credit;
u32 advertised_transmit_buffer_to_buffer_credit;
u32 current_transmit_buffer_to_buffer_credit;
u32 received_eofa_count;
u32 received_eofdti_count;
u32 received_eofni_count;
u32 received_soff_count;
u32 received_dropped_no_aer_count;
u32 received_dropped_no_available_rpi_resources_count;
u32 received_dropped_no_available_xri_resources_count;
};
struct efct_xport_host_stats {
bool cc;
u32 transmit_kbyte_count;
u32 receive_kbyte_count;
u32 transmit_frame_count;
u32 receive_frame_count;
u32 transmit_sequence_count;
u32 receive_sequence_count;
u32 total_exchanges_originator;
u32 total_exchanges_responder;
u32 receive_p_bsy_count;
u32 receive_f_bsy_count;
u32 dropped_frames_due_to_no_rq_buffer_count;
u32 empty_rq_timeout_count;
u32 dropped_frames_due_to_no_xri_count;
u32 empty_xri_pool_count;
};
struct efct_xport_host_statistics {
struct completion done;
struct efct_xport_link_stats link_stats;
struct efct_xport_host_stats host_stats;
};
union efct_xport_stats_u {
u32 value;
struct efct_xport_host_statistics stats;
};
struct efct_xport_fcp_stats {
u64 input_bytes;
u64 output_bytes;
u64 input_requests;
u64 output_requests;
u64 control_requests;
};
struct efct_xport {
struct efct *efct;
/* wwpn requested by user for primary nport */
u64 req_wwpn;
/* wwnn requested by user for primary nport */
u64 req_wwnn;
/* Nodes */
/* number of allocated nodes */
u32 nodes_count;
/* used to track how often IO pool is empty */
atomic_t io_alloc_failed_count;
/* array of pointers to nodes */
struct efc_node **nodes;
/* Io pool and counts */
/* pointer to IO pool */
struct efct_io_pool *io_pool;
/* lock for io_pending_list */
spinlock_t io_pending_lock;
/* list of IOs waiting for HW resources
* lock: xport->io_pending_lock
* link: efct_io_s->io_pending_link
*/
struct list_head io_pending_list;
/* count of totals IOS allocated */
atomic_t io_total_alloc;
/* count of totals IOS free'd */
atomic_t io_total_free;
/* count of totals IOS that were pended */
atomic_t io_total_pending;
/* count of active IOS */
atomic_t io_active_count;
/* count of pending IOS */
atomic_t io_pending_count;
/* non-zero if efct_scsi_check_pending is executing */
atomic_t io_pending_recursing;
/* Port */
/* requested link state */
u32 configured_link_state;
/* Timer for Statistics */
struct timer_list stats_timer;
union efct_xport_stats_u fc_xport_stats;
struct efct_xport_fcp_stats fcp_stats;
};
struct efct_rport_data {
struct efc_node *node;
};
struct efct_xport *
efct_xport_alloc(struct efct *efct);
int
efct_xport_attach(struct efct_xport *xport);
int
efct_xport_initialize(struct efct_xport *xport);
void
efct_xport_detach(struct efct_xport *xport);
int
efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
int
efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
union efct_xport_stats_u *result);
void
efct_xport_free(struct efct_xport *xport);
struct scsi_transport_template *efct_attach_fc_transport(void);
struct scsi_transport_template *efct_attach_vport_fc_transport(void);
void
efct_release_fc_transport(struct scsi_transport_template *transport_template);
#endif /* __EFCT_XPORT_H__ */
|
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __SYS_SIGCONTEXT_X86_H
#define __SYS_SIGCONTEXT_X86_H
extern void get_regs_from_mc(struct uml_pt_regs *, mcontext_t *);
#ifdef __i386__
#define GET_FAULTINFO_FROM_MC(fi, mc) \
{ \
(fi).cr2 = (mc)->cr2; \
(fi).error_code = (mc)->gregs[REG_ERR]; \
(fi).trap_no = (mc)->gregs[REG_TRAPNO]; \
}
#else
#define GET_FAULTINFO_FROM_MC(fi, mc) \
{ \
(fi).cr2 = (mc)->gregs[REG_CR2]; \
(fi).error_code = (mc)->gregs[REG_ERR]; \
(fi).trap_no = (mc)->gregs[REG_TRAPNO]; \
}
#endif
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* FM Driver for Connectivity chip of Texas Instruments.
*
* Common header for all FM driver sub-modules.
*
* Copyright (C) 2011 Texas Instruments
*/
#ifndef _FM_DRV_H
#define _FM_DRV_H
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#define FM_DRV_VERSION "0.1.1"
#define FM_DRV_NAME "ti_fmdrv"
#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
/* Flag info */
#define FM_INTTASK_RUNNING 0
#define FM_INTTASK_SCHEDULE_PENDING 1
#define FM_FW_DW_INPROGRESS 2
#define FM_CORE_READY 3
#define FM_CORE_TRANSPORT_READY 4
#define FM_AF_SWITCH_INPROGRESS 5
#define FM_CORE_TX_XMITING 6
#define FM_TUNE_COMPLETE 0x1
#define FM_BAND_LIMIT 0x2
#define FM_DRV_TX_TIMEOUT (5*HZ) /* 5 seconds */
#define FM_DRV_RX_SEEK_TIMEOUT (20*HZ) /* 20 seconds */
#define fmerr(format, ...) \
printk(KERN_ERR "fmdrv: " format, ## __VA_ARGS__)
#define fmwarn(format, ...) \
printk(KERN_WARNING "fmdrv: " format, ##__VA_ARGS__)
#ifdef DEBUG
#define fmdbg(format, ...) \
printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
#else /* DEBUG */
#define fmdbg(format, ...) do {} while(0)
#endif
enum {
FM_MODE_OFF,
FM_MODE_TX,
FM_MODE_RX,
FM_MODE_ENTRY_MAX
};
#define FM_RX_RDS_INFO_FIELD_MAX 8 /* 4 Group * 2 Bytes */
/* RX RDS data format */
struct fm_rdsdata_format {
union {
struct {
u8 buff[FM_RX_RDS_INFO_FIELD_MAX];
} groupdatabuff;
struct {
u16 pidata;
u8 blk_b[2];
u8 blk_c[2];
u8 blk_d[2];
} groupgeneral;
struct {
u16 pidata;
u8 blk_b[2];
u8 af[2];
u8 ps[2];
} group0A;
struct {
u16 pi[2];
u8 blk_b[2];
u8 ps[2];
} group0B;
} data;
};
/* FM region (Europe/US, Japan) info */
struct region_info {
u32 chanl_space;
u32 bot_freq;
u32 top_freq;
u8 fm_band;
};
struct fmdev;
typedef void (*int_handler_prototype) (struct fmdev *);
/* FM Interrupt processing related info */
struct fm_irq {
u8 stage;
u16 flag; /* FM interrupt flag */
u16 mask; /* FM interrupt mask */
/* Interrupt process timeout handler */
struct timer_list timer;
u8 retry;
int_handler_prototype *handlers;
};
/* RDS info */
struct fm_rds {
u8 flag; /* RX RDS on/off status */
u8 last_blk_idx; /* Last received RDS block */
/* RDS buffer */
wait_queue_head_t read_queue;
u32 buf_size; /* Size is always multiple of 3 */
u32 wr_idx;
u32 rd_idx;
u8 *buff;
};
#define FM_RDS_MAX_AF_LIST 25
/*
* Current RX channel Alternate Frequency cache.
* This info is used to switch to other freq (AF)
* when current channel signal strength is below RSSI threshold.
*/
struct tuned_station_info {
u16 picode;
u32 af_cache[FM_RDS_MAX_AF_LIST];
u8 afcache_size;
u8 af_list_max;
};
/* FM RX mode info */
struct fm_rx {
struct region_info region; /* Current selected band */
u32 freq; /* Current RX frquency */
u8 mute_mode; /* Current mute mode */
u8 deemphasis_mode; /* Current deemphasis mode */
/* RF dependent soft mute mode */
u8 rf_depend_mute;
u16 volume; /* Current volume level */
u16 rssi_threshold; /* Current RSSI threshold level */
/* Holds the index of the current AF jump */
u8 afjump_idx;
/* Will hold the frequency before the jump */
u32 freq_before_jump;
u8 rds_mode; /* RDS operation mode (RDS/RDBS) */
u8 af_mode; /* Alternate frequency on/off */
struct tuned_station_info stat_info;
struct fm_rds rds;
};
#define FMTX_RDS_TXT_STR_SIZE 25
/*
* FM TX RDS data
*
* @ text_type: is the text following PS or RT
* @ text: radio text string which could either be PS or RT
* @ af_freq: alternate frequency for Tx
* TODO: to be declared in application
*/
struct tx_rds {
u8 text_type;
u8 text[FMTX_RDS_TXT_STR_SIZE];
u8 flag;
u32 af_freq;
};
/*
* FM TX global data
*
* @ pwr_lvl: Power Level of the Transmission from mixer control
* @ xmit_state: Transmission state = Updated locally upon Start/Stop
* @ audio_io: i2S/Analog
* @ tx_frq: Transmission frequency
*/
struct fmtx_data {
u8 pwr_lvl;
u8 xmit_state;
u8 audio_io;
u8 region;
u16 aud_mode;
u32 preemph;
u32 tx_frq;
struct tx_rds rds;
};
/* FM driver operation structure */
struct fmdev {
struct video_device *radio_dev; /* V4L2 video device pointer */
struct v4l2_device v4l2_dev; /* V4L2 top level struct */
struct snd_card *card; /* Card which holds FM mixer controls */
u16 asci_id;
spinlock_t rds_buff_lock; /* To protect access to RDS buffer */
spinlock_t resp_skb_lock; /* To protect access to received SKB */
long flag; /* FM driver state machine info */
int streg_cbdata; /* status of ST registration */
struct sk_buff_head rx_q; /* RX queue */
struct work_struct rx_bh_work; /* RX BH Work */
struct sk_buff_head tx_q; /* TX queue */
struct work_struct tx_bh_work; /* TX BH Work */
unsigned long last_tx_jiffies; /* Timestamp of last pkt sent */
atomic_t tx_cnt; /* Number of packets can send at a time */
struct sk_buff *resp_skb; /* Response from the chip */
/* Main task completion handler */
struct completion maintask_comp;
/* Opcode of last command sent to the chip */
u8 pre_op;
/* Handler used for wakeup when response packet is received */
struct completion *resp_comp;
struct fm_irq irq_info;
u8 curr_fmmode; /* Current FM chip mode (TX, RX, OFF) */
struct fm_rx rx; /* FM receiver info */
struct fmtx_data tx_data;
/* V4L2 ctrl framework handler*/
struct v4l2_ctrl_handler ctrl_handler;
/* For core assisted locking */
struct mutex mutex;
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
#define AUD_COMM_EXEC__A 0x1000000
#define AUD_COMM_EXEC_STOP 0x0
#define FEC_COMM_EXEC__A 0x1C00000
#define FEC_COMM_EXEC_STOP 0x0
#define FEC_COMM_EXEC_ACTIVE 0x1
#define FEC_DI_COMM_EXEC__A 0x1C20000
#define FEC_DI_COMM_EXEC_STOP 0x0
#define FEC_DI_INPUT_CTL__A 0x1C20016
#define FEC_RS_COMM_EXEC__A 0x1C30000
#define FEC_RS_COMM_EXEC_STOP 0x0
#define FEC_RS_MEASUREMENT_PERIOD__A 0x1C30012
#define FEC_RS_MEASUREMENT_PRESCALE__A 0x1C30013
#define FEC_RS_NR_BIT_ERRORS__A 0x1C30014
#define FEC_OC_MODE__A 0x1C40011
#define FEC_OC_MODE_PARITY__M 0x1
#define FEC_OC_DTO_MODE__A 0x1C40014
#define FEC_OC_DTO_MODE_DYNAMIC__M 0x1
#define FEC_OC_DTO_MODE_OFFSET_ENABLE__M 0x4
#define FEC_OC_DTO_PERIOD__A 0x1C40015
#define FEC_OC_DTO_BURST_LEN__A 0x1C40018
#define FEC_OC_FCT_MODE__A 0x1C4001A
#define FEC_OC_FCT_MODE__PRE 0x0
#define FEC_OC_FCT_MODE_RAT_ENA__M 0x1
#define FEC_OC_FCT_MODE_VIRT_ENA__M 0x2
#define FEC_OC_TMD_MODE__A 0x1C4001E
#define FEC_OC_TMD_COUNT__A 0x1C4001F
#define FEC_OC_TMD_HI_MARGIN__A 0x1C40020
#define FEC_OC_TMD_LO_MARGIN__A 0x1C40021
#define FEC_OC_TMD_INT_UPD_RATE__A 0x1C40023
#define FEC_OC_AVR_PARM_A__A 0x1C40026
#define FEC_OC_AVR_PARM_B__A 0x1C40027
#define FEC_OC_RCN_GAIN__A 0x1C4002E
#define FEC_OC_RCN_CTL_RATE_LO__A 0x1C40030
#define FEC_OC_RCN_CTL_STEP_LO__A 0x1C40032
#define FEC_OC_RCN_CTL_STEP_HI__A 0x1C40033
#define FEC_OC_SNC_MODE__A 0x1C40040
#define FEC_OC_SNC_MODE_SHUTDOWN__M 0x10
#define FEC_OC_SNC_LWM__A 0x1C40041
#define FEC_OC_SNC_HWM__A 0x1C40042
#define FEC_OC_SNC_UNLOCK__A 0x1C40043
#define FEC_OC_SNC_FAIL_PERIOD__A 0x1C40046
#define FEC_OC_IPR_MODE__A 0x1C40048
#define FEC_OC_IPR_MODE_SERIAL__M 0x1
#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M 0x4
#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__M 0x10
#define FEC_OC_IPR_INVERT__A 0x1C40049
#define FEC_OC_IPR_INVERT_MD0__M 0x1
#define FEC_OC_IPR_INVERT_MD1__M 0x2
#define FEC_OC_IPR_INVERT_MD2__M 0x4
#define FEC_OC_IPR_INVERT_MD3__M 0x8
#define FEC_OC_IPR_INVERT_MD4__M 0x10
#define FEC_OC_IPR_INVERT_MD5__M 0x20
#define FEC_OC_IPR_INVERT_MD6__M 0x40
#define FEC_OC_IPR_INVERT_MD7__M 0x80
#define FEC_OC_IPR_INVERT_MERR__M 0x100
#define FEC_OC_IPR_INVERT_MSTRT__M 0x200
#define FEC_OC_IPR_INVERT_MVAL__M 0x400
#define FEC_OC_IPR_INVERT_MCLK__M 0x800
#define FEC_OC_OCR_INVERT__A 0x1C40052
#define IQM_COMM_EXEC__A 0x1800000
#define IQM_COMM_EXEC_B_STOP 0x0
#define IQM_COMM_EXEC_B_ACTIVE 0x1
#define IQM_FS_RATE_OFS_LO__A 0x1820010
#define IQM_FS_ADJ_SEL__A 0x1820014
#define IQM_FS_ADJ_SEL_B_OFF 0x0
#define IQM_FS_ADJ_SEL_B_QAM 0x1
#define IQM_FS_ADJ_SEL_B_VSB 0x2
#define IQM_FD_RATESEL__A 0x1830010
#define IQM_RC_RATE_OFS_LO__A 0x1840010
#define IQM_RC_RATE_OFS_LO__W 16
#define IQM_RC_RATE_OFS_LO__M 0xFFFF
#define IQM_RC_RATE_OFS_HI__M 0xFF
#define IQM_RC_ADJ_SEL__A 0x1840014
#define IQM_RC_ADJ_SEL_B_OFF 0x0
#define IQM_RC_ADJ_SEL_B_QAM 0x1
#define IQM_RC_ADJ_SEL_B_VSB 0x2
#define IQM_RC_STRETCH__A 0x1840016
#define IQM_CF_COMM_INT_MSK__A 0x1860006
#define IQM_CF_SYMMETRIC__A 0x1860010
#define IQM_CF_MIDTAP__A 0x1860011
#define IQM_CF_MIDTAP_RE__B 0
#define IQM_CF_MIDTAP_IM__B 1
#define IQM_CF_OUT_ENA__A 0x1860012
#define IQM_CF_OUT_ENA_QAM__B 1
#define IQM_CF_OUT_ENA_OFDM__M 0x4
#define IQM_CF_ADJ_SEL__A 0x1860013
#define IQM_CF_SCALE__A 0x1860014
#define IQM_CF_SCALE_SH__A 0x1860015
#define IQM_CF_SCALE_SH__PRE 0x0
#define IQM_CF_POW_MEAS_LEN__A 0x1860017
#define IQM_CF_DS_ENA__A 0x1860019
#define IQM_CF_TAP_RE0__A 0x1860020
#define IQM_CF_TAP_IM0__A 0x1860040
#define IQM_CF_CLP_VAL__A 0x1860060
#define IQM_CF_DATATH__A 0x1860061
#define IQM_CF_PKDTH__A 0x1860062
#define IQM_CF_WND_LEN__A 0x1860063
#define IQM_CF_DET_LCT__A 0x1860064
#define IQM_CF_BYPASSDET__A 0x1860067
#define IQM_AF_COMM_EXEC__A 0x1870000
#define IQM_AF_COMM_EXEC_ACTIVE 0x1
#define IQM_AF_CLKNEG__A 0x1870012
#define IQM_AF_CLKNEG_CLKNEGDATA__M 0x2
#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS 0x0
#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG 0x2
#define IQM_AF_START_LOCK__A 0x187001B
#define IQM_AF_PHASE0__A 0x187001C
#define IQM_AF_PHASE1__A 0x187001D
#define IQM_AF_PHASE2__A 0x187001E
#define IQM_AF_CLP_LEN__A 0x1870023
#define IQM_AF_CLP_TH__A 0x1870024
#define IQM_AF_SNS_LEN__A 0x1870026
#define IQM_AF_AGC_IF__A 0x1870028
#define IQM_AF_AGC_RF__A 0x1870029
#define IQM_AF_PDREF__A 0x187002B
#define IQM_AF_PDREF__M 0x1F
#define IQM_AF_STDBY__A 0x187002C
#define IQM_AF_STDBY_STDBY_ADC_STANDBY 0x2
#define IQM_AF_STDBY_STDBY_AMP_STANDBY 0x4
#define IQM_AF_STDBY_STDBY_PD_STANDBY 0x8
#define IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY 0x10
#define IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY 0x20
#define IQM_AF_AMUX__A 0x187002D
#define IQM_AF_AMUX_SIGNAL2ADC 0x1
#define IQM_AF_UPD_SEL__A 0x187002F
#define IQM_AF_INC_LCT__A 0x1870034
#define IQM_AF_INC_BYPASS__A 0x1870036
#define OFDM_CP_COMM_EXEC__A 0x2800000
#define OFDM_CP_COMM_EXEC_STOP 0x0
#define OFDM_EC_SB_PRIOR__A 0x3410013
#define OFDM_EC_SB_PRIOR_HI 0x0
#define OFDM_EC_SB_PRIOR_LO 0x1
#define OFDM_EC_VD_ERR_BIT_CNT__A 0x3420017
#define OFDM_EC_VD_IN_BIT_CNT__A 0x3420018
#define OFDM_EQ_TOP_TD_TPS_CONST__A 0x3010054
#define OFDM_EQ_TOP_TD_TPS_CONST__M 0x3
#define OFDM_EQ_TOP_TD_TPS_CONST_64QAM 0x2
#define OFDM_EQ_TOP_TD_TPS_CODE_HP__A 0x3010056
#define OFDM_EQ_TOP_TD_TPS_CODE_HP__M 0x7
#define OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8 0x4
#define OFDM_EQ_TOP_TD_SQR_ERR_I__A 0x301005E
#define OFDM_EQ_TOP_TD_SQR_ERR_Q__A 0x301005F
#define OFDM_EQ_TOP_TD_SQR_ERR_EXP__A 0x3010060
#define OFDM_EQ_TOP_TD_REQ_SMB_CNT__A 0x3010061
#define OFDM_EQ_TOP_TD_TPS_PWR_OFS__A 0x3010062
#define OFDM_LC_COMM_EXEC__A 0x3800000
#define OFDM_LC_COMM_EXEC_STOP 0x0
#define OFDM_SC_COMM_EXEC__A 0x3C00000
#define OFDM_SC_COMM_EXEC_STOP 0x0
#define OFDM_SC_COMM_STATE__A 0x3C00001
#define OFDM_SC_RA_RAM_PARAM0__A 0x3C20040
#define OFDM_SC_RA_RAM_PARAM1__A 0x3C20041
#define OFDM_SC_RA_RAM_CMD_ADDR__A 0x3C20042
#define OFDM_SC_RA_RAM_CMD__A 0x3C20043
#define OFDM_SC_RA_RAM_CMD_NULL 0x0
#define OFDM_SC_RA_RAM_CMD_PROC_START 0x1
#define OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM 0x3
#define OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM 0x4
#define OFDM_SC_RA_RAM_CMD_GET_OP_PARAM 0x5
#define OFDM_SC_RA_RAM_CMD_USER_IO 0x6
#define OFDM_SC_RA_RAM_CMD_SET_TIMER 0x7
#define OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING 0x8
#define OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M 0x1
#define OFDM_SC_RA_RAM_LOCKTRACK_MIN 0x1
#define OFDM_SC_RA_RAM_OP_PARAM__A 0x3C20048
#define OFDM_SC_RA_RAM_OP_PARAM_MODE__M 0x3
#define OFDM_SC_RA_RAM_OP_PARAM_MODE_2K 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_MODE_8K 0x1
#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_32 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_16 0x4
#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_8 0x8
#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_4 0xC
#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16 0x10
#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64 0x20
#define OFDM_SC_RA_RAM_OP_PARAM_HIER_NO 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A1 0x40
#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A2 0x80
#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A4 0xC0
#define OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3 0x200
#define OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4 0x400
#define OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6 0x600
#define OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8 0x800
#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI 0x0
#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO 0x1000
#define OFDM_SC_RA_RAM_OP_AUTO_MODE__M 0x1
#define OFDM_SC_RA_RAM_OP_AUTO_GUARD__M 0x2
#define OFDM_SC_RA_RAM_OP_AUTO_CONST__M 0x4
#define OFDM_SC_RA_RAM_OP_AUTO_HIER__M 0x8
#define OFDM_SC_RA_RAM_OP_AUTO_RATE__M 0x10
#define OFDM_SC_RA_RAM_LOCK__A 0x3C2004B
#define OFDM_SC_RA_RAM_LOCK_DEMOD__M 0x1
#define OFDM_SC_RA_RAM_LOCK_FEC__M 0x2
#define OFDM_SC_RA_RAM_LOCK_MPEG__M 0x4
#define OFDM_SC_RA_RAM_LOCK_NODVBT__M 0x8
#define OFDM_SC_RA_RAM_BE_OPT_DELAY__A 0x3C2004D
#define OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A 0x3C2004E
#define OFDM_SC_RA_RAM_ECHO_THRES__A 0x3C2004F
#define OFDM_SC_RA_RAM_ECHO_THRES_8K__B 0
#define OFDM_SC_RA_RAM_ECHO_THRES_8K__M 0xFF
#define OFDM_SC_RA_RAM_ECHO_THRES_2K__B 8
#define OFDM_SC_RA_RAM_ECHO_THRES_2K__M 0xFF00
#define OFDM_SC_RA_RAM_CONFIG__A 0x3C20050
#define OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M 0x800
#define OFDM_SC_RA_RAM_FR_THRES_8K__A 0x3C2007D
#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A 0x3C200E0
#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A 0x3C200E1
#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A 0x3C200E3
#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A 0x3C200E4
#define OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A 0x3C200F8
#define QAM_COMM_EXEC__A 0x1400000
#define QAM_COMM_EXEC_STOP 0x0
#define QAM_COMM_EXEC_ACTIVE 0x1
#define QAM_TOP_ANNEX_A 0x0
#define QAM_TOP_ANNEX_C 0x2
#define QAM_SL_ERR_POWER__A 0x1430017
#define QAM_DQ_QUAL_FUN0__A 0x1440018
#define QAM_DQ_QUAL_FUN1__A 0x1440019
#define QAM_DQ_QUAL_FUN2__A 0x144001A
#define QAM_DQ_QUAL_FUN3__A 0x144001B
#define QAM_DQ_QUAL_FUN4__A 0x144001C
#define QAM_DQ_QUAL_FUN5__A 0x144001D
#define QAM_LC_MODE__A 0x1450010
#define QAM_LC_QUAL_TAB0__A 0x1450018
#define QAM_LC_QUAL_TAB1__A 0x1450019
#define QAM_LC_QUAL_TAB2__A 0x145001A
#define QAM_LC_QUAL_TAB3__A 0x145001B
#define QAM_LC_QUAL_TAB4__A 0x145001C
#define QAM_LC_QUAL_TAB5__A 0x145001D
#define QAM_LC_QUAL_TAB6__A 0x145001E
#define QAM_LC_QUAL_TAB8__A 0x145001F
#define QAM_LC_QUAL_TAB9__A 0x1450020
#define QAM_LC_QUAL_TAB10__A 0x1450021
#define QAM_LC_QUAL_TAB12__A 0x1450022
#define QAM_LC_QUAL_TAB15__A 0x1450023
#define QAM_LC_QUAL_TAB16__A 0x1450024
#define QAM_LC_QUAL_TAB20__A 0x1450025
#define QAM_LC_QUAL_TAB25__A 0x1450026
#define QAM_LC_LPF_FACTORP__A 0x1450028
#define QAM_LC_LPF_FACTORI__A 0x1450029
#define QAM_LC_RATE_LIMIT__A 0x145002A
#define QAM_LC_SYMBOL_FREQ__A 0x145002B
#define QAM_SY_TIMEOUT__A 0x1470011
#define QAM_SY_TIMEOUT__PRE 0x3A98
#define QAM_SY_SYNC_LWM__A 0x1470012
#define QAM_SY_SYNC_AWM__A 0x1470013
#define QAM_SY_SYNC_HWM__A 0x1470014
#define QAM_SY_SP_INV__A 0x1470017
#define QAM_SY_SP_INV_SPECTRUM_INV_DIS 0x0
#define SCU_COMM_EXEC__A 0x800000
#define SCU_COMM_EXEC_STOP 0x0
#define SCU_COMM_EXEC_ACTIVE 0x1
#define SCU_COMM_EXEC_HOLD 0x2
#define SCU_RAM_DRIVER_DEBUG__A 0x831EBF
#define SCU_RAM_QAM_FSM_STEP_PERIOD__A 0x831EC4
#define SCU_RAM_GPIO__A 0x831EC7
#define SCU_RAM_GPIO_HW_LOCK_IND_DISABLE 0x0
#define SCU_RAM_AGC_CLP_CTRL_MODE__A 0x831EC8
#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__A 0x831ECB
#define SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A 0x831F05
#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A 0x831F15
#define SCU_RAM_AGC_KI_CYCLEN__A 0x831F17
#define SCU_RAM_AGC_SNS_CYCLEN__A 0x831F18
#define SCU_RAM_AGC_RF_SNS_DEV_MAX__A 0x831F19
#define SCU_RAM_AGC_RF_SNS_DEV_MIN__A 0x831F1A
#define SCU_RAM_AGC_RF_MAX__A 0x831F1B
#define SCU_RAM_AGC_CONFIG__A 0x831F24
#define SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M 0x1
#define SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M 0x2
#define SCU_RAM_AGC_CONFIG_INV_IF_POL__M 0x100
#define SCU_RAM_AGC_CONFIG_INV_RF_POL__M 0x200
#define SCU_RAM_AGC_KI__A 0x831F25
#define SCU_RAM_AGC_KI_RF__B 4
#define SCU_RAM_AGC_KI_RF__M 0xF0
#define SCU_RAM_AGC_KI_IF__B 8
#define SCU_RAM_AGC_KI_IF__M 0xF00
#define SCU_RAM_AGC_KI_RED__A 0x831F26
#define SCU_RAM_AGC_KI_RED_RAGC_RED__B 2
#define SCU_RAM_AGC_KI_RED_RAGC_RED__M 0xC
#define SCU_RAM_AGC_KI_RED_IAGC_RED__B 4
#define SCU_RAM_AGC_KI_RED_IAGC_RED__M 0x30
#define SCU_RAM_AGC_KI_INNERGAIN_MIN__A 0x831F27
#define SCU_RAM_AGC_KI_MINGAIN__A 0x831F28
#define SCU_RAM_AGC_KI_MAXGAIN__A 0x831F29
#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__A 0x831F2A
#define SCU_RAM_AGC_KI_MIN__A 0x831F2B
#define SCU_RAM_AGC_KI_MAX__A 0x831F2C
#define SCU_RAM_AGC_CLP_SUM__A 0x831F2D
#define SCU_RAM_AGC_CLP_SUM_MIN__A 0x831F2E
#define SCU_RAM_AGC_CLP_SUM_MAX__A 0x831F2F
#define SCU_RAM_AGC_CLP_CYCLEN__A 0x831F30
#define SCU_RAM_AGC_CLP_CYCCNT__A 0x831F31
#define SCU_RAM_AGC_CLP_DIR_TO__A 0x831F32
#define SCU_RAM_AGC_CLP_DIR_WD__A 0x831F33
#define SCU_RAM_AGC_CLP_DIR_STP__A 0x831F34
#define SCU_RAM_AGC_SNS_SUM__A 0x831F35
#define SCU_RAM_AGC_SNS_SUM_MIN__A 0x831F36
#define SCU_RAM_AGC_SNS_SUM_MAX__A 0x831F37
#define SCU_RAM_AGC_SNS_CYCCNT__A 0x831F38
#define SCU_RAM_AGC_SNS_DIR_TO__A 0x831F39
#define SCU_RAM_AGC_SNS_DIR_WD__A 0x831F3A
#define SCU_RAM_AGC_SNS_DIR_STP__A 0x831F3B
#define SCU_RAM_AGC_INGAIN_TGT__A 0x831F3D
#define SCU_RAM_AGC_INGAIN_TGT_MIN__A 0x831F3E
#define SCU_RAM_AGC_INGAIN_TGT_MAX__A 0x831F3F
#define SCU_RAM_AGC_IF_IACCU_HI__A 0x831F40
#define SCU_RAM_AGC_IF_IACCU_LO__A 0x831F41
#define SCU_RAM_AGC_IF_IACCU_HI_TGT__A 0x831F42
#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A 0x831F43
#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A 0x831F44
#define SCU_RAM_AGC_RF_IACCU_HI__A 0x831F45
#define SCU_RAM_AGC_RF_IACCU_LO__A 0x831F46
#define SCU_RAM_AGC_RF_IACCU_HI_CO__A 0x831F47
#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A 0x831F84
#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A 0x831F85
#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A 0x831F86
#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A 0x831F87
#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A 0x831F88
#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A 0x831F89
#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A 0x831F8A
#define SCU_RAM_QAM_FSM_RTH__A 0x831F8E
#define SCU_RAM_QAM_FSM_FTH__A 0x831F8F
#define SCU_RAM_QAM_FSM_PTH__A 0x831F90
#define SCU_RAM_QAM_FSM_MTH__A 0x831F91
#define SCU_RAM_QAM_FSM_CTH__A 0x831F92
#define SCU_RAM_QAM_FSM_QTH__A 0x831F93
#define SCU_RAM_QAM_FSM_RATE_LIM__A 0x831F94
#define SCU_RAM_QAM_FSM_FREQ_LIM__A 0x831F95
#define SCU_RAM_QAM_FSM_COUNT_LIM__A 0x831F96
#define SCU_RAM_QAM_LC_CA_COARSE__A 0x831F97
#define SCU_RAM_QAM_LC_CA_FINE__A 0x831F99
#define SCU_RAM_QAM_LC_CP_COARSE__A 0x831F9A
#define SCU_RAM_QAM_LC_CP_MEDIUM__A 0x831F9B
#define SCU_RAM_QAM_LC_CP_FINE__A 0x831F9C
#define SCU_RAM_QAM_LC_CI_COARSE__A 0x831F9D
#define SCU_RAM_QAM_LC_CI_MEDIUM__A 0x831F9E
#define SCU_RAM_QAM_LC_CI_FINE__A 0x831F9F
#define SCU_RAM_QAM_LC_EP_COARSE__A 0x831FA0
#define SCU_RAM_QAM_LC_EP_MEDIUM__A 0x831FA1
#define SCU_RAM_QAM_LC_EP_FINE__A 0x831FA2
#define SCU_RAM_QAM_LC_EI_COARSE__A 0x831FA3
#define SCU_RAM_QAM_LC_EI_MEDIUM__A 0x831FA4
#define SCU_RAM_QAM_LC_EI_FINE__A 0x831FA5
#define SCU_RAM_QAM_LC_CF_COARSE__A 0x831FA6
#define SCU_RAM_QAM_LC_CF_MEDIUM__A 0x831FA7
#define SCU_RAM_QAM_LC_CF_FINE__A 0x831FA8
#define SCU_RAM_QAM_LC_CF1_COARSE__A 0x831FA9
#define SCU_RAM_QAM_LC_CF1_MEDIUM__A 0x831FAA
#define SCU_RAM_QAM_LC_CF1_FINE__A 0x831FAB
#define SCU_RAM_QAM_SL_SIG_POWER__A 0x831FAC
#define SCU_RAM_QAM_EQ_CMA_RAD0__A 0x831FAD
#define SCU_RAM_QAM_EQ_CMA_RAD1__A 0x831FAE
#define SCU_RAM_QAM_EQ_CMA_RAD2__A 0x831FAF
#define SCU_RAM_QAM_EQ_CMA_RAD3__A 0x831FB0
#define SCU_RAM_QAM_EQ_CMA_RAD4__A 0x831FB1
#define SCU_RAM_QAM_EQ_CMA_RAD5__A 0x831FB2
#define SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED 0x4000
#define SCU_RAM_QAM_LOCKED_LOCKED_LOCKED 0x8000
#define SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK 0xC000
#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A 0x831FEA
#define SCU_RAM_DRIVER_VER_HI__A 0x831FEB
#define SCU_RAM_DRIVER_VER_LO__A 0x831FEC
#define SCU_RAM_PARAM_15__A 0x831FED
#define SCU_RAM_PARAM_0__A 0x831FFC
#define SCU_RAM_COMMAND__A 0x831FFD
#define SCU_RAM_COMMAND_CMD_DEMOD_RESET 0x1
#define SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV 0x2
#define SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM 0x3
#define SCU_RAM_COMMAND_CMD_DEMOD_START 0x4
#define SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK 0x5
#define SCU_RAM_COMMAND_CMD_DEMOD_STOP 0x9
#define SCU_RAM_COMMAND_STANDARD_QAM 0x200
#define SCU_RAM_COMMAND_STANDARD_OFDM 0x400
#define SIO_TOP_COMM_KEY__A 0x41000F
#define SIO_TOP_COMM_KEY_KEY 0xFABA
#define SIO_TOP_JTAGID_LO__A 0x410012
#define SIO_HI_RA_RAM_RES__A 0x420031
#define SIO_HI_RA_RAM_CMD__A 0x420032
#define SIO_HI_RA_RAM_CMD_RESET 0x2
#define SIO_HI_RA_RAM_CMD_CONFIG 0x3
#define SIO_HI_RA_RAM_CMD_BRDCTRL 0x7
#define SIO_HI_RA_RAM_PAR_1__A 0x420033
#define SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY 0x3945
#define SIO_HI_RA_RAM_PAR_2__A 0x420034
#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__M 0x7F
#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN 0x0
#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED 0x4
#define SIO_HI_RA_RAM_PAR_3__A 0x420035
#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M 0x7F
#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B 7
#define SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 0x0
#define SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 0x8
#define SIO_HI_RA_RAM_PAR_4__A 0x420036
#define SIO_HI_RA_RAM_PAR_5__A 0x420037
#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE 0x1
#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M 0x8
#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ 0x8
#define SIO_HI_RA_RAM_PAR_6__A 0x420038
#define SIO_CC_PLL_LOCK__A 0x450012
#define SIO_CC_PWD_MODE__A 0x450015
#define SIO_CC_PWD_MODE_LEVEL_NONE 0x0
#define SIO_CC_PWD_MODE_LEVEL_OFDM 0x1
#define SIO_CC_PWD_MODE_LEVEL_CLOCK 0x2
#define SIO_CC_PWD_MODE_LEVEL_PLL 0x3
#define SIO_CC_PWD_MODE_LEVEL_OSC 0x4
#define SIO_CC_SOFT_RST__A 0x450016
#define SIO_CC_SOFT_RST_OFDM__M 0x1
#define SIO_CC_SOFT_RST_SYS__M 0x2
#define SIO_CC_SOFT_RST_OSC__M 0x4
#define SIO_CC_UPDATE__A 0x450017
#define SIO_CC_UPDATE_KEY 0xFABA
#define SIO_OFDM_SH_OFDM_RING_ENABLE__A 0x470010
#define SIO_OFDM_SH_OFDM_RING_ENABLE_OFF 0x0
#define SIO_OFDM_SH_OFDM_RING_ENABLE_ON 0x1
#define SIO_OFDM_SH_OFDM_RING_STATUS__A 0x470012
#define SIO_OFDM_SH_OFDM_RING_STATUS_DOWN 0x0
#define SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED 0x1
#define SIO_BL_COMM_EXEC__A 0x480000
#define SIO_BL_COMM_EXEC_ACTIVE 0x1
#define SIO_BL_STATUS__A 0x480010
#define SIO_BL_MODE__A 0x480011
#define SIO_BL_MODE_DIRECT 0x0
#define SIO_BL_MODE_CHAIN 0x1
#define SIO_BL_ENABLE__A 0x480012
#define SIO_BL_ENABLE_ON 0x1
#define SIO_BL_TGT_HDR__A 0x480014
#define SIO_BL_TGT_ADDR__A 0x480015
#define SIO_BL_SRC_ADDR__A 0x480016
#define SIO_BL_SRC_LEN__A 0x480017
#define SIO_BL_CHAIN_ADDR__A 0x480018
#define SIO_BL_CHAIN_LEN__A 0x480019
#define SIO_PDR_MON_CFG__A 0x7F0010
#define SIO_PDR_UIO_IN_HI__A 0x7F0015
#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
#define SIO_PDR_OHW_CFG__A 0x7F001F
#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
#define SIO_PDR_GPIO_CFG__A 0x7F0021
#define SIO_PDR_MSTRT_CFG__A 0x7F0025
#define SIO_PDR_MERR_CFG__A 0x7F0026
#define SIO_PDR_MCLK_CFG__A 0x7F0028
#define SIO_PDR_MCLK_CFG_DRIVE__B 3
#define SIO_PDR_MVAL_CFG__A 0x7F0029
#define SIO_PDR_MD0_CFG__A 0x7F002A
#define SIO_PDR_MD0_CFG_DRIVE__B 3
#define SIO_PDR_MD1_CFG__A 0x7F002B
#define SIO_PDR_MD2_CFG__A 0x7F002C
#define SIO_PDR_MD3_CFG__A 0x7F002D
#define SIO_PDR_MD4_CFG__A 0x7F002F
#define SIO_PDR_MD5_CFG__A 0x7F0030
#define SIO_PDR_MD6_CFG__A 0x7F0031
#define SIO_PDR_MD7_CFG__A 0x7F0032
#define SIO_PDR_SMA_RX_CFG__A 0x7F0037
#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Siemens SIMATIC IPC driver for GPIO based LEDs
*
* Copyright (c) Siemens AG, 2023
*
* Author:
* Henning Schild <[email protected]>
*/
#ifndef _SIMATIC_IPC_LEDS_GPIO_H
#define _SIMATIC_IPC_LEDS_GPIO_H
int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
struct gpiod_lookup_table *table,
struct gpiod_lookup_table *table_extra);
void simatic_ipc_leds_gpio_remove(struct platform_device *pdev,
struct gpiod_lookup_table *table,
struct gpiod_lookup_table *table_extra);
#endif /* _SIMATIC_IPC_LEDS_GPIO_H */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Second generation of pinmux driver for Amlogic Meson-AXG SoC.
*
* Copyright (c) 2017 Baylibre SAS.
* Author: Jerome Brunet <[email protected]>
*
* Copyright (c) 2017 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <[email protected]>
*/
/*
* This new generation of pinctrl IP is mainly adopted by the
* Meson-AXG SoC and later series, which use 4-width continuous
* register bit to select the function for each pin.
*
* The value 0 is always selecting the GPIO mode, while other
* values (start from 1) for selecting the function mode.
*/
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include "pinctrl-meson.h"
#include "pinctrl-meson-axg-pmx.h"
static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc,
unsigned int pin,
const struct meson_pmx_bank **bank)
{
int i;
const struct meson_axg_pmx_data *pmx = pc->data->pmx_data;
for (i = 0; i < pmx->num_pmx_banks; i++)
if (pin >= pmx->pmx_banks[i].first &&
pin <= pmx->pmx_banks[i].last) {
*bank = &pmx->pmx_banks[i];
return 0;
}
return -EINVAL;
}
static int meson_pmx_calc_reg_and_offset(const struct meson_pmx_bank *bank,
unsigned int pin, unsigned int *reg,
unsigned int *offset)
{
int shift;
shift = pin - bank->first;
*reg = bank->reg + (bank->offset + (shift << 2)) / 32;
*offset = (bank->offset + (shift << 2)) % 32;
return 0;
}
static int meson_axg_pmx_update_function(struct meson_pinctrl *pc,
unsigned int pin, unsigned int func)
{
const struct meson_pmx_bank *bank;
int ret;
int reg;
int offset;
ret = meson_axg_pmx_get_bank(pc, pin, &bank);
if (ret)
return ret;
meson_pmx_calc_reg_and_offset(bank, pin, ®, &offset);
ret = regmap_update_bits(pc->reg_mux, reg << 2,
0xf << offset, (func & 0xf) << offset);
return ret;
}
static int meson_axg_pmx_set_mux(struct pinctrl_dev *pcdev,
unsigned int func_num, unsigned int group_num)
{
int i;
int ret;
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
const struct meson_pmx_func *func = &pc->data->funcs[func_num];
const struct meson_pmx_group *group = &pc->data->groups[group_num];
struct meson_pmx_axg_data *pmx_data =
(struct meson_pmx_axg_data *)group->data;
dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
group->name);
for (i = 0; i < group->num_pins; i++) {
ret = meson_axg_pmx_update_function(pc, group->pins[i],
pmx_data->func);
if (ret)
return ret;
}
return 0;
}
static int meson_axg_pmx_request_gpio(struct pinctrl_dev *pcdev,
struct pinctrl_gpio_range *range, unsigned int offset)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return meson_axg_pmx_update_function(pc, offset, 0);
}
const struct pinmux_ops meson_axg_pmx_ops = {
.set_mux = meson_axg_pmx_set_mux,
.get_functions_count = meson_pmx_get_funcs_count,
.get_function_name = meson_pmx_get_func_name,
.get_function_groups = meson_pmx_get_groups,
.gpio_request_enable = meson_axg_pmx_request_gpio,
};
EXPORT_SYMBOL_GPL(meson_axg_pmx_ops);
MODULE_DESCRIPTION("Amlogic Meson AXG second generation pinmux driver");
MODULE_LICENSE("Dual BSD/GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Remote processor elf helpers defines
*
* Copyright (C) 2020 Kalray, Inc.
*/
#ifndef REMOTEPROC_ELF_LOADER_H
#define REMOTEPROC_ELF_LOADER_H
#include <linux/elf.h>
#include <linux/types.h>
/**
* fw_elf_get_class - Get elf class
* @fw: the ELF firmware image
*
* Note that we use elf32_hdr to access the class since the start of the
* struct is the same for both elf class
*
* Return: elf class of the firmware
*/
static inline u8 fw_elf_get_class(const struct firmware *fw)
{
struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
return ehdr->e_ident[EI_CLASS];
}
static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class)
{
memcpy(hdr->e_ident, ELFMAG, SELFMAG);
hdr->e_ident[EI_CLASS] = class;
hdr->e_ident[EI_DATA] = ELFDATA2LSB;
hdr->e_ident[EI_VERSION] = EV_CURRENT;
hdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
}
/* Generate getter and setter for a specific elf struct/field */
#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \
static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \
{ \
if (class == ELFCLASS32) \
return (__type) ((const struct elf32_##__s *) arg)->__field; \
else \
return (__type) ((const struct elf64_##__s *) arg)->__field; \
} \
static inline void elf_##__s##_set_##__field(u8 class, void *arg, \
__type value) \
{ \
if (class == ELFCLASS32) \
((struct elf32_##__s *) arg)->__field = (__type) value; \
else \
((struct elf64_##__s *) arg)->__field = (__type) value; \
}
ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64)
ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64)
ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64)
ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
ELF_GEN_FIELD_GET_SET(hdr, e_shentsize, u16)
ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_type, u32)
ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
ELF_GEN_FIELD_GET_SET(shdr, sh_type, u32)
ELF_GEN_FIELD_GET_SET(shdr, sh_flags, u32)
ELF_GEN_FIELD_GET_SET(shdr, sh_entsize, u16)
ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64)
#define ELF_STRUCT_SIZE(__s) \
static inline unsigned long elf_size_of_##__s(u8 class) \
{ \
if (class == ELFCLASS32)\
return sizeof(struct elf32_##__s); \
else \
return sizeof(struct elf64_##__s); \
}
ELF_STRUCT_SIZE(shdr)
ELF_STRUCT_SIZE(phdr)
ELF_STRUCT_SIZE(hdr)
static inline unsigned int elf_strtbl_add(const char *name, void *ehdr, u8 class, size_t *index)
{
u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
void *shdr;
char *strtab;
size_t idx, ret;
shdr = ehdr + elf_size_of_hdr(class) + shstrndx * elf_size_of_shdr(class);
strtab = ehdr + elf_shdr_get_sh_offset(class, shdr);
idx = index ? *index : 0;
if (!strtab || !name)
return 0;
ret = idx;
strcpy((strtab + idx), name);
idx += strlen(name) + 1;
if (index)
*index = idx;
return ret;
}
#endif /* REMOTEPROC_ELF_LOADER_H */
|
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
#include "link.h"
/* Number of Host Routers per motherboard is 2 */
#define MAX_HR_NUM 2
/* Number of DPIA per host router is 2 */
#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
/*
* Host Router BW type
*/
enum bw_type {
HOST_ROUTER_BW_ESTIMATED,
HOST_ROUTER_BW_ALLOCATED,
HOST_ROUTER_BW_INVALID,
};
/*
* Enable BW Allocation Mode Support from the DP-Tx side
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
* If (stream_req_bw < or > already_allocated_bw_at_HPD)
* => Deallocate Max Bw & then allocate only what the stream needs
*
* @link: pointer to the dc_link struct instance
* @req_bw: Bw requested by the stream
*
* return: true if allocated successfully
*/
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
/*
* Handle the USB4 BW Allocation related functionality here:
* Plug => Try to allocate max bw from timing parameters supported by the sink
* Unplug => de-allocate bw
*
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
* return: allocated bw else return 0
*/
int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
/*
* Handle function for when the status of the Request above is complete.
* We will find out the result of allocating on CM and update structs.
*
* @link: pointer to the dc_link struct instance
* @bw: Allocated or Estimated BW depending on the result
* @result: Response type
*
* return: none
*/
void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
/*
* Handle the validation of total BW here and confirm that the bw used by each
* DPIA doesn't exceed available BW for each host router (HR)
*
* @link[]: array of link pointer to all possible DPIA links
* @bw_needed[]: bw needed for each DPIA link based on timing
* @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
/*
* Obtain all the DP overheads in dp tunneling for the dpia link
*
* @link: pointer to the dc_link struct instance
*
* return: DP overheads in DP tunneling
*/
int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <[email protected]> */
/* Kai Shen <[email protected]> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/module.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <[email protected]>");
MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
MODULE_LICENSE("Dual BSD/GPL");
static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
void *arg)
{
struct net_device *netdev = netdev_notifier_info_to_dev(arg);
struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
if (dev->netdev == NULL || dev->netdev != netdev)
goto done;
switch (event) {
case NETDEV_UP:
dev->state = IB_PORT_ACTIVE;
erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
dev->mtu = netdev->mtu;
}
break;
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGE:
default:
break;
}
done:
return NOTIFY_OK;
}
static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
{
struct net_device *netdev;
int ret = -EPROBE_DEFER;
/* Already binded to a net_device, so we skip. */
if (dev->netdev)
return 0;
rtnl_lock();
for_each_netdev(&init_net, netdev) {
/*
* In erdma, the paired netdev and ibdev should have the same
* MAC address. erdma can get the value from its PCIe bar
* registers. Since erdma can not get the paired netdev
* reference directly, we do a traverse here to get the paired
* netdev.
*/
if (ether_addr_equal_unaligned(netdev->perm_addr,
dev->attrs.peer_addr)) {
ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
if (ret) {
rtnl_unlock();
ibdev_warn(&dev->ibdev,
"failed (%d) to link netdev", ret);
return ret;
}
dev->netdev = netdev;
break;
}
}
rtnl_unlock();
return ret;
}
static int erdma_device_register(struct erdma_dev *dev)
{
struct ib_device *ibdev = &dev->ibdev;
int ret;
ret = erdma_enum_and_get_netdev(dev);
if (ret)
return ret;
dev->mtu = dev->netdev->mtu;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
if (ret) {
dev_err(&dev->pdev->dev,
"ib_register_device failed: ret = %d\n", ret);
return ret;
}
dev->netdev_nb.notifier_call = erdma_netdev_event;
ret = register_netdevice_notifier(&dev->netdev_nb);
if (ret) {
ibdev_err(&dev->ibdev, "failed to register notifier.\n");
ib_unregister_device(ibdev);
}
return ret;
}
static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
{
struct erdma_dev *dev = data;
erdma_cmdq_completion_handler(&dev->cmdq);
erdma_aeq_event_handler(dev);
return IRQ_HANDLED;
}
static int erdma_request_vectors(struct erdma_dev *dev)
{
int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
int ret;
ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
ret);
return ret;
}
dev->attrs.irq_num = ret;
return 0;
}
static int erdma_comm_irq_init(struct erdma_dev *dev)
{
snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
pci_name(dev->pdev));
dev->comm_irq.msix_vector =
pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
&dev->comm_irq.affinity_hint_mask);
irq_set_affinity_hint(dev->comm_irq.msix_vector,
&dev->comm_irq.affinity_hint_mask);
return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
dev->comm_irq.name, dev);
}
static void erdma_comm_irq_uninit(struct erdma_dev *dev)
{
irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
free_irq(dev->comm_irq.msix_vector, dev);
}
static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
0);
if (!dev->resp_pool)
return -ENOMEM;
dev->db_pool = dma_pool_create("erdma_db_pool", &pdev->dev,
ERDMA_DB_SIZE, ERDMA_DB_SIZE, 0);
if (!dev->db_pool) {
ret = -ENOMEM;
goto destroy_resp_pool;
}
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
goto destroy_db_pool;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
destroy_db_pool:
dma_pool_destroy(dev->db_pool);
destroy_resp_pool:
dma_pool_destroy(dev->resp_pool);
return ret;
}
static void erdma_device_uninit(struct erdma_dev *dev)
{
dma_pool_destroy(dev->db_pool);
dma_pool_destroy(dev->resp_pool);
}
static void erdma_hw_reset(struct erdma_dev *dev)
{
u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
}
static int erdma_wait_hw_init_done(struct erdma_dev *dev)
{
int i;
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG,
FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1));
for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
if (erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
ERDMA_REG_DEV_ST_INIT_DONE_MASK))
break;
msleep(ERDMA_REG_ACCESS_WAIT_MS);
}
if (i == ERDMA_WAIT_DEV_DONE_CNT) {
dev_err(&dev->pdev->dev, "wait init done failed.\n");
return -ETIMEDOUT;
}
return 0;
}
static const struct pci_device_id erdma_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
{}
};
static int erdma_probe_dev(struct pci_dev *pdev)
{
struct erdma_dev *dev;
int bars, err;
u32 version;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
return err;
}
pci_set_master(pdev);
dev = ib_alloc_device(erdma_dev, ibdev);
if (!dev) {
dev_err(&pdev->dev, "ib_alloc_device failed\n");
err = -ENOMEM;
goto err_disable_device;
}
pci_set_drvdata(pdev, dev);
dev->pdev = pdev;
dev->attrs.numa_node = dev_to_node(&pdev->dev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
if (bars != ERDMA_BAR_MASK || err) {
err = err ? err : -EINVAL;
goto err_ib_device_release;
}
dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
dev->func_bar =
devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
if (!dev->func_bar) {
dev_err(&pdev->dev, "devm_ioremap failed.\n");
err = -EFAULT;
goto err_release_bars;
}
version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
if (version == 0) {
/* we knows that it is a non-functional function. */
err = -ENODEV;
goto err_iounmap_func_bar;
}
err = erdma_device_init(dev, pdev);
if (err)
goto err_iounmap_func_bar;
err = erdma_request_vectors(dev);
if (err)
goto err_uninit_device;
err = erdma_comm_irq_init(dev);
if (err)
goto err_free_vectors;
err = erdma_aeq_init(dev);
if (err)
goto err_uninit_comm_irq;
err = erdma_cmdq_init(dev);
if (err)
goto err_uninit_aeq;
err = erdma_wait_hw_init_done(dev);
if (err)
goto err_uninit_cmdq;
err = erdma_ceqs_init(dev);
if (err)
goto err_reset_hw;
erdma_finish_cmdq_init(dev);
return 0;
err_reset_hw:
erdma_hw_reset(dev);
err_uninit_cmdq:
erdma_cmdq_destroy(dev);
err_uninit_aeq:
erdma_eq_destroy(dev, &dev->aeq);
err_uninit_comm_irq:
erdma_comm_irq_uninit(dev);
err_free_vectors:
pci_free_irq_vectors(dev->pdev);
err_uninit_device:
erdma_device_uninit(dev);
err_iounmap_func_bar:
devm_iounmap(&pdev->dev, dev->func_bar);
err_release_bars:
pci_release_selected_regions(pdev, bars);
err_ib_device_release:
ib_dealloc_device(&dev->ibdev);
err_disable_device:
pci_disable_device(pdev);
return err;
}
static void erdma_remove_dev(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
erdma_ceqs_uninit(dev);
erdma_hw_reset(dev);
erdma_cmdq_destroy(dev);
erdma_eq_destroy(dev, &dev->aeq);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
erdma_device_uninit(dev);
devm_iounmap(&pdev->dev, dev->func_bar);
pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
ib_dealloc_device(&dev->ibdev);
pci_disable_device(pdev);
}
#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
static int erdma_dev_attrs_init(struct erdma_dev *dev)
{
int err;
u64 req_hdr, cap0, cap1;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
if (err)
return err;
dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
dev->attrs.max_mr = dev->attrs.max_qp << 1;
dev->attrs.max_cq = dev->attrs.max_qp << 1;
dev->attrs.cap_flags = ERDMA_GET_CAP(FLAGS, cap0);
dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
dev->attrs.max_ord = ERDMA_MAX_ORD;
dev->attrs.max_ird = ERDMA_MAX_IRD;
dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
dev->attrs.max_pd = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
return err;
}
static int erdma_device_config(struct erdma_dev *dev)
{
struct erdma_cmdq_config_device_req req = {};
if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_EXTEND_DB))
return 0;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CONF_DEVICE);
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_res_cb_init(struct erdma_dev *dev)
{
int i, j;
for (i = 0; i < ERDMA_RES_CNT; i++) {
dev->res_cb[i].next_alloc_idx = 1;
spin_lock_init(&dev->res_cb[i].lock);
dev->res_cb[i].bitmap =
bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
if (!dev->res_cb[i].bitmap)
goto err;
}
return 0;
err:
for (j = 0; j < i; j++)
bitmap_free(dev->res_cb[j].bitmap);
return -ENOMEM;
}
static void erdma_res_cb_free(struct erdma_dev *dev)
{
int i;
for (i = 0; i < ERDMA_RES_CNT; i++)
bitmap_free(dev->res_cb[i].bitmap);
}
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
.uverbs_abi_ver = ERDMA_ABI_VERSION,
.alloc_hw_port_stats = erdma_alloc_hw_port_stats,
.alloc_mr = erdma_ib_alloc_mr,
.alloc_pd = erdma_alloc_pd,
.alloc_ucontext = erdma_alloc_ucontext,
.create_cq = erdma_create_cq,
.create_qp = erdma_create_qp,
.dealloc_pd = erdma_dealloc_pd,
.dealloc_ucontext = erdma_dealloc_ucontext,
.dereg_mr = erdma_dereg_mr,
.destroy_cq = erdma_destroy_cq,
.destroy_qp = erdma_destroy_qp,
.disassociate_ucontext = erdma_disassociate_ucontext,
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
.iw_accept = erdma_accept,
.iw_add_ref = erdma_qp_get_ref,
.iw_connect = erdma_connect,
.iw_create_listen = erdma_create_listen,
.iw_destroy_listen = erdma_destroy_listen,
.iw_get_qp = erdma_get_ibqp,
.iw_reject = erdma_reject,
.iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
.modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
.query_device = erdma_query_device,
.query_gid = erdma_query_gid,
.query_port = erdma_query_port,
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
};
static int erdma_ib_device_add(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
struct ib_device *ibdev = &dev->ibdev;
u64 mac;
int ret;
ret = erdma_dev_attrs_init(dev);
if (ret)
return ret;
ret = erdma_device_config(dev);
if (ret)
return ret;
ibdev->node_type = RDMA_NODE_RNIC;
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*
* Current model (one-to-one device association):
* One ERDMA device per net_device or, equivalently,
* per physical port.
*/
ibdev->phys_port_cnt = 1;
ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
ib_set_device_ops(ibdev, &erdma_device_ops);
INIT_LIST_HEAD(&dev->cep_list);
spin_lock_init(&dev->lock);
xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
dev->next_alloc_cqn = 1;
dev->next_alloc_qpn = 1;
ret = erdma_res_cb_init(dev);
if (ret)
return ret;
atomic_set(&dev->num_ctx, 0);
mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
u64_to_ether_addr(mac, dev->attrs.peer_addr);
dev->reflush_wq = alloc_workqueue("erdma-reflush-wq", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
if (!dev->reflush_wq) {
ret = -ENOMEM;
goto err_alloc_workqueue;
}
ret = erdma_device_register(dev);
if (ret)
goto err_register;
return 0;
err_register:
destroy_workqueue(dev->reflush_wq);
err_alloc_workqueue:
xa_destroy(&dev->qp_xa);
xa_destroy(&dev->cq_xa);
erdma_res_cb_free(dev);
return ret;
}
static void erdma_ib_device_remove(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
unregister_netdevice_notifier(&dev->netdev_nb);
ib_unregister_device(&dev->ibdev);
destroy_workqueue(dev->reflush_wq);
erdma_res_cb_free(dev);
xa_destroy(&dev->qp_xa);
xa_destroy(&dev->cq_xa);
}
static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
ret = erdma_probe_dev(pdev);
if (ret)
return ret;
ret = erdma_ib_device_add(pdev);
if (ret) {
erdma_remove_dev(pdev);
return ret;
}
return 0;
}
static void erdma_remove(struct pci_dev *pdev)
{
erdma_ib_device_remove(pdev);
erdma_remove_dev(pdev);
}
static struct pci_driver erdma_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = erdma_pci_tbl,
.probe = erdma_probe,
.remove = erdma_remove
};
MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
static __init int erdma_init_module(void)
{
int ret;
ret = erdma_cm_init();
if (ret)
return ret;
ret = pci_register_driver(&erdma_pci_driver);
if (ret)
erdma_cm_exit();
return ret;
}
static void __exit erdma_exit_module(void)
{
pci_unregister_driver(&erdma_pci_driver);
erdma_cm_exit();
}
module_init(erdma_init_module);
module_exit(erdma_exit_module);
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_devmap_val));
__uint(max_entries, 4);
} dm_ports SEC(".maps");
/* valid program on DEVMAP entry via SEC name;
* has access to egress and ingress ifindex
*/
SEC("xdp/devmap")
int xdp_dummy_dm(struct xdp_md *ctx)
{
return XDP_PASS;
}
SEC("xdp.frags/devmap")
int xdp_dummy_dm_frags(struct xdp_md *ctx)
{
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef KFD_PM4_HEADERS_H_
#define KFD_PM4_HEADERS_H_
#ifndef PM4_MES_HEADER_DEFINED
#define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER {
struct {
/* reserved */
uint32_t reserved1:8;
/* IT opcode */
uint32_t opcode:8;
/* number of DWORDs - 1 in the information body */
uint32_t count:14;
/* packet identifier. It should be 3 for type 3 packets */
uint32_t type:2;
};
uint32_t u32all;
};
#endif /* PM4_MES_HEADER_DEFINED */
/*--------------------MES_MAP_PROCESS-------------------- */
#ifndef PM4_MES_MAP_PROCESS_DEFINED
#define PM4_MES_MAP_PROCESS_DEFINED
struct pm4_map_process {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16;
uint32_t reserved1:8;
uint32_t diq_enable:1;
uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
uint32_t page_table_base:28;
uint32_t reserved3:4;
} bitfields3;
uint32_t ordinal3;
};
uint32_t sh_mem_bases;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
uint32_t sh_mem_config;
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:6;
uint32_t reserved4:2;
uint32_t num_oac:4;
uint32_t reserved5:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields10;
uint32_t ordinal10;
};
};
#endif
#ifndef PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
#define PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
struct pm4_map_process_scratch_kv {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16;
uint32_t reserved1:8;
uint32_t diq_enable:1;
uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
uint32_t page_table_base:28;
uint32_t reserved2:4;
} bitfields3;
uint32_t ordinal3;
};
uint32_t reserved3;
uint32_t sh_mem_bases;
uint32_t sh_mem_config;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
uint32_t sh_hidden_private_base_vmid;
uint32_t reserved4;
uint32_t reserved5;
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:6;
uint32_t reserved6:2;
uint32_t num_oac:4;
uint32_t reserved7:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields14;
uint32_t ordinal14;
};
uint32_t completion_signal_lo32;
uint32_t completion_signal_hi32;
};
#endif
enum {
CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
};
#endif /* KFD_PM4_HEADERS_H_ */
|
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
//
// tegra210_i2s.c - Tegra210 I2S driver
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm_params.h>
#include <sound/simple_card_utils.h>
#include <sound/soc.h>
#include "tegra210_i2s.h"
#include "tegra_cif.h"
static const struct reg_default tegra210_i2s_reg_defaults[] = {
{ TEGRA210_I2S_RX_INT_MASK, 0x00000003 },
{ TEGRA210_I2S_RX_CIF_CTRL, 0x00007700 },
{ TEGRA210_I2S_TX_INT_MASK, 0x00000003 },
{ TEGRA210_I2S_TX_CIF_CTRL, 0x00007700 },
{ TEGRA210_I2S_CG, 0x1 },
{ TEGRA210_I2S_TIMING, 0x0000001f },
{ TEGRA210_I2S_ENABLE, 0x1 },
/*
* Below update does not have any effect on Tegra186 and Tegra194.
* On Tegra210, I2S4 has "i2s4a" and "i2s4b" pins and below update
* is required to select i2s4b for it to be functional for I2S
* operation.
*/
{ TEGRA210_I2S_CYA, 0x1 },
};
static void tegra210_i2s_set_slot_ctrl(struct regmap *regmap,
unsigned int total_slots,
unsigned int tx_slot_mask,
unsigned int rx_slot_mask)
{
regmap_write(regmap, TEGRA210_I2S_SLOT_CTRL, total_slots - 1);
regmap_write(regmap, TEGRA210_I2S_TX_SLOT_CTRL, tx_slot_mask);
regmap_write(regmap, TEGRA210_I2S_RX_SLOT_CTRL, rx_slot_mask);
}
static int tegra210_i2s_set_clock_rate(struct device *dev,
unsigned int clock_rate)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
unsigned int val;
int err;
regmap_read(i2s->regmap, TEGRA210_I2S_CTRL, &val);
/* No need to set rates if I2S is being operated in slave */
if (!(val & I2S_CTRL_MASTER_EN))
return 0;
err = clk_set_rate(i2s->clk_i2s, clock_rate);
if (err) {
dev_err(dev, "can't set I2S bit clock rate %u, err: %d\n",
clock_rate, err);
return err;
}
if (!IS_ERR(i2s->clk_sync_input)) {
/*
* Other I/O modules in AHUB can use i2s bclk as reference
* clock. Below sets sync input clock rate as per bclk,
* which can be used as input to other I/O modules.
*/
err = clk_set_rate(i2s->clk_sync_input, clock_rate);
if (err) {
dev_err(dev,
"can't set I2S sync input rate %u, err = %d\n",
clock_rate, err);
return err;
}
}
return 0;
}
static int tegra210_i2s_sw_reset(struct snd_soc_component *compnt,
int stream)
{
struct device *dev = compnt->dev;
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
unsigned int reset_mask = I2S_SOFT_RESET_MASK;
unsigned int reset_en = I2S_SOFT_RESET_EN;
unsigned int reset_reg, cif_reg, stream_reg;
unsigned int cif_ctrl, stream_ctrl, i2s_ctrl, val;
int err;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
reset_reg = TEGRA210_I2S_RX_SOFT_RESET;
cif_reg = TEGRA210_I2S_RX_CIF_CTRL;
stream_reg = TEGRA210_I2S_RX_CTRL;
} else {
reset_reg = TEGRA210_I2S_TX_SOFT_RESET;
cif_reg = TEGRA210_I2S_TX_CIF_CTRL;
stream_reg = TEGRA210_I2S_TX_CTRL;
}
/* Store CIF and I2S control values */
regmap_read(i2s->regmap, cif_reg, &cif_ctrl);
regmap_read(i2s->regmap, stream_reg, &stream_ctrl);
regmap_read(i2s->regmap, TEGRA210_I2S_CTRL, &i2s_ctrl);
/* Reset to make sure the previous transactions are clean */
regmap_update_bits(i2s->regmap, reset_reg, reset_mask, reset_en);
err = regmap_read_poll_timeout(i2s->regmap, reset_reg, val,
!(val & reset_mask & reset_en),
10, 10000);
if (err) {
dev_err(dev, "timeout: failed to reset I2S for %s\n",
snd_pcm_direction_name(stream));
return err;
}
/* Restore CIF and I2S control values */
regmap_write(i2s->regmap, cif_reg, cif_ctrl);
regmap_write(i2s->regmap, stream_reg, stream_ctrl);
regmap_write(i2s->regmap, TEGRA210_I2S_CTRL, i2s_ctrl);
return 0;
}
static int tegra210_i2s_init(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *compnt = snd_soc_dapm_to_component(w->dapm);
struct device *dev = compnt->dev;
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
unsigned int val, status_reg;
int stream;
int err;
switch (w->reg) {
case TEGRA210_I2S_RX_ENABLE:
stream = SNDRV_PCM_STREAM_PLAYBACK;
status_reg = TEGRA210_I2S_RX_STATUS;
break;
case TEGRA210_I2S_TX_ENABLE:
stream = SNDRV_PCM_STREAM_CAPTURE;
status_reg = TEGRA210_I2S_TX_STATUS;
break;
default:
return -EINVAL;
}
/* Ensure I2S is in disabled state before new session */
err = regmap_read_poll_timeout(i2s->regmap, status_reg, val,
!(val & I2S_EN_MASK & I2S_EN),
10, 10000);
if (err) {
dev_err(dev, "timeout: previous I2S %s is still active\n",
snd_pcm_direction_name(stream));
return err;
}
return tegra210_i2s_sw_reset(compnt, stream);
}
static int __maybe_unused tegra210_i2s_runtime_suspend(struct device *dev)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
regcache_cache_only(i2s->regmap, true);
regcache_mark_dirty(i2s->regmap);
clk_disable_unprepare(i2s->clk_i2s);
return 0;
}
static int __maybe_unused tegra210_i2s_runtime_resume(struct device *dev)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(i2s->clk_i2s);
if (err) {
dev_err(dev, "failed to enable I2S bit clock, err: %d\n", err);
return err;
}
regcache_cache_only(i2s->regmap, false);
regcache_sync(i2s->regmap);
return 0;
}
static void tegra210_i2s_set_data_offset(struct tegra210_i2s *i2s,
unsigned int data_offset)
{
/* Capture path */
regmap_update_bits(i2s->regmap, TEGRA210_I2S_TX_CTRL,
I2S_CTRL_DATA_OFFSET_MASK,
data_offset << I2S_DATA_SHIFT);
/* Playback path */
regmap_update_bits(i2s->regmap, TEGRA210_I2S_RX_CTRL,
I2S_CTRL_DATA_OFFSET_MASK,
data_offset << I2S_DATA_SHIFT);
}
static int tegra210_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask, val;
mask = I2S_CTRL_MASTER_EN_MASK;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BC_FC:
val = 0;
break;
case SND_SOC_DAIFMT_BP_FP:
val = I2S_CTRL_MASTER_EN;
break;
default:
return -EINVAL;
}
mask |= I2S_CTRL_FRAME_FMT_MASK | I2S_CTRL_LRCK_POL_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
val |= I2S_CTRL_FRAME_FMT_FSYNC_MODE;
val |= I2S_CTRL_LRCK_POL_HIGH;
tegra210_i2s_set_data_offset(i2s, 1);
break;
case SND_SOC_DAIFMT_DSP_B:
val |= I2S_CTRL_FRAME_FMT_FSYNC_MODE;
val |= I2S_CTRL_LRCK_POL_HIGH;
tegra210_i2s_set_data_offset(i2s, 0);
break;
/* I2S mode has data offset of 1 */
case SND_SOC_DAIFMT_I2S:
val |= I2S_CTRL_FRAME_FMT_LRCK_MODE;
val |= I2S_CTRL_LRCK_POL_LOW;
tegra210_i2s_set_data_offset(i2s, 1);
break;
/*
* For RJ mode data offset is dependent on the sample size
* and the bclk ratio, and so is set when hw_params is called.
*/
case SND_SOC_DAIFMT_RIGHT_J:
val |= I2S_CTRL_FRAME_FMT_LRCK_MODE;
val |= I2S_CTRL_LRCK_POL_HIGH;
break;
case SND_SOC_DAIFMT_LEFT_J:
val |= I2S_CTRL_FRAME_FMT_LRCK_MODE;
val |= I2S_CTRL_LRCK_POL_HIGH;
tegra210_i2s_set_data_offset(i2s, 0);
break;
default:
return -EINVAL;
}
mask |= I2S_CTRL_EDGE_CTRL_MASK;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
val |= I2S_CTRL_EDGE_CTRL_POS_EDGE;
break;
case SND_SOC_DAIFMT_NB_IF:
val |= I2S_CTRL_EDGE_CTRL_POS_EDGE;
val ^= I2S_CTRL_LRCK_POL_MASK;
break;
case SND_SOC_DAIFMT_IB_NF:
val |= I2S_CTRL_EDGE_CTRL_NEG_EDGE;
break;
case SND_SOC_DAIFMT_IB_IF:
val |= I2S_CTRL_EDGE_CTRL_NEG_EDGE;
val ^= I2S_CTRL_LRCK_POL_MASK;
break;
default:
return -EINVAL;
}
regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, mask, val);
i2s->dai_fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
return 0;
}
static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width)
{
struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
/* Copy the required tx and rx mask */
i2s->tx_mask = (tx_mask > DEFAULT_I2S_SLOT_MASK) ?
DEFAULT_I2S_SLOT_MASK : tx_mask;
i2s->rx_mask = (rx_mask > DEFAULT_I2S_SLOT_MASK) ?
DEFAULT_I2S_SLOT_MASK : rx_mask;
return 0;
}
static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.integer.value[0] = i2s->loopback;
return 0;
}
static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
if (value == i2s->loopback)
return 0;
i2s->loopback = value;
regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
i2s->loopback << I2S_CTRL_LPBK_SHIFT);
return 1;
}
static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.integer.value[0] = i2s->fsync_width;
return 0;
}
static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
if (value == i2s->fsync_width)
return 0;
i2s->fsync_width = value;
/*
* Frame sync width is used only for FSYNC modes and not
* applicable for LRCK modes. Reset value for this field is "0",
* which means the width is one bit clock wide.
* The width requirement may depend on the codec and in such
* cases mixer control is used to update custom values. A value
* of "N" here means, width is "N + 1" bit clock wide.
*/
regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
I2S_CTRL_FSYNC_WIDTH_MASK,
i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
return 1;
}
static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
return 0;
}
static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == i2s->stereo_to_mono[I2S_TX_PATH])
return 0;
i2s->stereo_to_mono[I2S_TX_PATH] = value;
return 1;
}
static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
return 0;
}
static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == i2s->mono_to_stereo[I2S_TX_PATH])
return 0;
i2s->mono_to_stereo[I2S_TX_PATH] = value;
return 1;
}
static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
return 0;
}
static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == i2s->stereo_to_mono[I2S_RX_PATH])
return 0;
i2s->stereo_to_mono[I2S_RX_PATH] = value;
return 1;
}
static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
return 0;
}
static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
unsigned int value = ucontrol->value.enumerated.item[0];
if (value == i2s->mono_to_stereo[I2S_RX_PATH])
return 0;
i2s->mono_to_stereo[I2S_RX_PATH] = value;
return 1;
}
static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
return 0;
}
static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
if (value == i2s->rx_fifo_th)
return 0;
i2s->rx_fifo_th = value;
return 1;
}
static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
ucontrol->value.integer.value[0] = i2s->bclk_ratio;
return 0;
}
static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
if (value == i2s->bclk_ratio)
return 0;
i2s->bclk_ratio = value;
return 1;
}
static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
unsigned int ratio)
{
struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
i2s->bclk_ratio = ratio;
return 0;
}
static int tegra210_i2s_set_timing_params(struct device *dev,
unsigned int sample_size,
unsigned int srate,
unsigned int channels)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
unsigned int val, bit_count, bclk_rate, num_bclk = sample_size;
int err;
if (i2s->bclk_ratio)
num_bclk *= i2s->bclk_ratio;
if (i2s->dai_fmt == SND_SOC_DAIFMT_RIGHT_J)
tegra210_i2s_set_data_offset(i2s, num_bclk - sample_size);
/* I2S bit clock rate */
bclk_rate = srate * channels * num_bclk;
err = tegra210_i2s_set_clock_rate(dev, bclk_rate);
if (err) {
dev_err(dev, "can't set I2S bit clock rate %u, err: %d\n",
bclk_rate, err);
return err;
}
regmap_read(i2s->regmap, TEGRA210_I2S_CTRL, &val);
/*
* For LRCK mode, channel bit count depends on number of bit clocks
* on the left channel, where as for FSYNC mode bit count depends on
* the number of bit clocks in both left and right channels for DSP
* mode or the number of bit clocks in one TDM frame.
*
*/
switch (val & I2S_CTRL_FRAME_FMT_MASK) {
case I2S_CTRL_FRAME_FMT_LRCK_MODE:
bit_count = (bclk_rate / (srate * 2)) - 1;
break;
case I2S_CTRL_FRAME_FMT_FSYNC_MODE:
bit_count = (bclk_rate / srate) - 1;
tegra210_i2s_set_slot_ctrl(i2s->regmap, channels,
i2s->tx_mask, i2s->rx_mask);
break;
default:
dev_err(dev, "invalid I2S frame format\n");
return -EINVAL;
}
if (bit_count > I2S_TIMING_CH_BIT_CNT_MASK) {
dev_err(dev, "invalid I2S channel bit count %u\n", bit_count);
return -EINVAL;
}
regmap_write(i2s->regmap, TEGRA210_I2S_TIMING,
bit_count << I2S_TIMING_CH_BIT_CNT_SHIFT);
return 0;
}
static int tegra210_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct device *dev = dai->dev;
struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int sample_size, channels, srate, val, reg, path;
struct tegra_cif_conf cif_conf;
snd_pcm_format_t sample_format;
memset(&cif_conf, 0, sizeof(struct tegra_cif_conf));
channels = params_channels(params);
if (channels < 1) {
dev_err(dev, "invalid I2S %d channel configuration\n",
channels);
return -EINVAL;
}
cif_conf.audio_ch = channels;
cif_conf.client_ch = channels;
if (i2s->client_channels)
cif_conf.client_ch = i2s->client_channels;
/* AHUB CIF Audio bits configs */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
cif_conf.audio_bits = TEGRA_ACIF_BITS_8;
break;
case SNDRV_PCM_FORMAT_S16_LE:
cif_conf.audio_bits = TEGRA_ACIF_BITS_16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S32_LE:
cif_conf.audio_bits = TEGRA_ACIF_BITS_32;
break;
default:
dev_err(dev, "unsupported params audio bit format!\n");
return -EOPNOTSUPP;
}
sample_format = params_format(params);
if (i2s->client_sample_format >= 0)
sample_format = (snd_pcm_format_t)i2s->client_sample_format;
/*
* Format of the I2S for sending/receiving the audio
* to/from external device.
*/
switch (sample_format) {
case SNDRV_PCM_FORMAT_S8:
val = I2S_BITS_8;
sample_size = 8;
cif_conf.client_bits = TEGRA_ACIF_BITS_8;
break;
case SNDRV_PCM_FORMAT_S16_LE:
val = I2S_BITS_16;
sample_size = 16;
cif_conf.client_bits = TEGRA_ACIF_BITS_16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
val = I2S_BITS_24;
sample_size = 32;
cif_conf.client_bits = TEGRA_ACIF_BITS_24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
val = I2S_BITS_32;
sample_size = 32;
cif_conf.client_bits = TEGRA_ACIF_BITS_32;
break;
default:
dev_err(dev, "unsupported client bit format!\n");
return -EOPNOTSUPP;
}
/* Program sample size */
regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
I2S_CTRL_BIT_SIZE_MASK, val);
srate = params_rate(params);
/* For playback I2S RX-CIF and for capture TX-CIF is used */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
path = I2S_RX_PATH;
else
path = I2S_TX_PATH;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
unsigned int max_th;
/* FIFO threshold in terms of frames */
max_th = (I2S_RX_FIFO_DEPTH / cif_conf.audio_ch) - 1;
if (i2s->rx_fifo_th > max_th)
i2s->rx_fifo_th = max_th;
cif_conf.threshold = i2s->rx_fifo_th;
reg = TEGRA210_I2S_RX_CIF_CTRL;
} else {
reg = TEGRA210_I2S_TX_CIF_CTRL;
}
cif_conf.mono_conv = i2s->mono_to_stereo[path];
cif_conf.stereo_conv = i2s->stereo_to_mono[path];
tegra_set_cif(i2s->regmap, reg, &cif_conf);
return tegra210_i2s_set_timing_params(dev, sample_size, srate,
cif_conf.client_ch);
}
static const struct snd_soc_dai_ops tegra210_i2s_dai_ops = {
.set_fmt = tegra210_i2s_set_fmt,
.hw_params = tegra210_i2s_hw_params,
.set_bclk_ratio = tegra210_i2s_set_dai_bclk_ratio,
.set_tdm_slot = tegra210_i2s_set_tdm_slot,
};
static struct snd_soc_dai_driver tegra210_i2s_dais[] = {
{
.name = "I2S-CIF",
.playback = {
.stream_name = "CIF-Playback",
.channels_min = 1,
.channels_max = 16,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
.stream_name = "CIF-Capture",
.channels_min = 1,
.channels_max = 16,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
},
{
.name = "I2S-DAP",
.playback = {
.stream_name = "DAP-Playback",
.channels_min = 1,
.channels_max = 16,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
.stream_name = "DAP-Capture",
.channels_min = 1,
.channels_max = 16,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
.ops = &tegra210_i2s_dai_ops,
.symmetric_rate = 1,
},
};
static const char * const tegra210_i2s_stereo_conv_text[] = {
"CH0", "CH1", "AVG",
};
static const char * const tegra210_i2s_mono_conv_text[] = {
"Zero", "Copy",
};
static const struct soc_enum tegra210_i2s_mono_conv_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_i2s_mono_conv_text),
tegra210_i2s_mono_conv_text);
static const struct soc_enum tegra210_i2s_stereo_conv_enum =
SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(tegra210_i2s_stereo_conv_text),
tegra210_i2s_stereo_conv_text);
static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
tegra210_i2s_put_loopback),
SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
tegra210_i2s_get_fsync_width,
tegra210_i2s_put_fsync_width),
SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
tegra210_i2s_cget_stereo_to_mono,
tegra210_i2s_cput_stereo_to_mono),
SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
tegra210_i2s_cget_mono_to_stereo,
tegra210_i2s_cput_mono_to_stereo),
SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
tegra210_i2s_pget_mono_to_stereo,
tegra210_i2s_pput_mono_to_stereo),
SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
tegra210_i2s_pget_stereo_to_mono,
tegra210_i2s_pput_stereo_to_mono),
SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
tegra210_i2s_get_bclk_ratio,
tegra210_i2s_put_bclk_ratio),
};
static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
SND_SOC_DAPM_AIF_IN_E("RX", NULL, 0, TEGRA210_I2S_RX_ENABLE,
0, 0, tegra210_i2s_init, SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_AIF_OUT_E("TX", NULL, 0, TEGRA210_I2S_TX_ENABLE,
0, 0, tegra210_i2s_init, SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_MIC("MIC", NULL),
SND_SOC_DAPM_SPK("SPK", NULL),
};
static const struct snd_soc_dapm_route tegra210_i2s_routes[] = {
/* Playback route from XBAR */
{ "XBAR-Playback", NULL, "XBAR-TX" },
{ "CIF-Playback", NULL, "XBAR-Playback" },
{ "RX", NULL, "CIF-Playback" },
{ "DAP-Playback", NULL, "RX" },
{ "SPK", NULL, "DAP-Playback" },
/* Capture route to XBAR */
{ "XBAR-RX", NULL, "XBAR-Capture" },
{ "XBAR-Capture", NULL, "CIF-Capture" },
{ "CIF-Capture", NULL, "TX" },
{ "TX", NULL, "DAP-Capture" },
{ "DAP-Capture", NULL, "MIC" },
};
static const struct snd_soc_component_driver tegra210_i2s_cmpnt = {
.dapm_widgets = tegra210_i2s_widgets,
.num_dapm_widgets = ARRAY_SIZE(tegra210_i2s_widgets),
.dapm_routes = tegra210_i2s_routes,
.num_dapm_routes = ARRAY_SIZE(tegra210_i2s_routes),
.controls = tegra210_i2s_controls,
.num_controls = ARRAY_SIZE(tegra210_i2s_controls),
};
static bool tegra210_i2s_wr_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA210_I2S_RX_ENABLE ... TEGRA210_I2S_RX_SOFT_RESET:
case TEGRA210_I2S_RX_INT_MASK ... TEGRA210_I2S_RX_CLK_TRIM:
case TEGRA210_I2S_TX_ENABLE ... TEGRA210_I2S_TX_SOFT_RESET:
case TEGRA210_I2S_TX_INT_MASK ... TEGRA210_I2S_TX_CLK_TRIM:
case TEGRA210_I2S_ENABLE ... TEGRA210_I2S_CG:
case TEGRA210_I2S_CTRL ... TEGRA210_I2S_CYA:
return true;
default:
return false;
}
}
static bool tegra210_i2s_rd_reg(struct device *dev, unsigned int reg)
{
if (tegra210_i2s_wr_reg(dev, reg))
return true;
switch (reg) {
case TEGRA210_I2S_RX_STATUS:
case TEGRA210_I2S_RX_INT_STATUS:
case TEGRA210_I2S_RX_CIF_FIFO_STATUS:
case TEGRA210_I2S_TX_STATUS:
case TEGRA210_I2S_TX_INT_STATUS:
case TEGRA210_I2S_TX_CIF_FIFO_STATUS:
case TEGRA210_I2S_STATUS:
case TEGRA210_I2S_INT_STATUS:
return true;
default:
return false;
}
}
static bool tegra210_i2s_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case TEGRA210_I2S_RX_STATUS:
case TEGRA210_I2S_RX_INT_STATUS:
case TEGRA210_I2S_RX_CIF_FIFO_STATUS:
case TEGRA210_I2S_TX_STATUS:
case TEGRA210_I2S_TX_INT_STATUS:
case TEGRA210_I2S_TX_CIF_FIFO_STATUS:
case TEGRA210_I2S_STATUS:
case TEGRA210_I2S_INT_STATUS:
case TEGRA210_I2S_RX_SOFT_RESET:
case TEGRA210_I2S_TX_SOFT_RESET:
return true;
default:
return false;
}
}
static const struct regmap_config tegra210_i2s_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = TEGRA210_I2S_CYA,
.writeable_reg = tegra210_i2s_wr_reg,
.readable_reg = tegra210_i2s_rd_reg,
.volatile_reg = tegra210_i2s_volatile_reg,
.reg_defaults = tegra210_i2s_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(tegra210_i2s_reg_defaults),
.cache_type = REGCACHE_FLAT,
};
/*
* The AHUB HW modules are interconnected with CIF which are capable of
* supporting Channel and Sample bit format conversion. This needs different
* CIF Audio and client configuration. As one of the config comes from
* params_channels() or params_format(), the extra configuration is passed from
* CIF Port of DT I2S node which can help to perform this conversion.
*
* 4ch audio = 4ch client = 2ch 2ch
* -----> ADMAIF -----------> CIF -------------> I2S ---->
*/
static void tegra210_parse_client_convert(struct device *dev)
{
struct tegra210_i2s *i2s = dev_get_drvdata(dev);
struct device_node *ports, *ep;
struct simple_util_data data = {};
int cif_port = 0;
ports = of_get_child_by_name(dev->of_node, "ports");
if (ports) {
ep = of_graph_get_endpoint_by_regs(ports, cif_port, -1);
if (ep) {
simple_util_parse_convert(ep, NULL, &data);
of_node_put(ep);
}
of_node_put(ports);
}
if (data.convert_channels)
i2s->client_channels = data.convert_channels;
if (data.convert_sample_format)
i2s->client_sample_format = simple_util_get_sample_fmt(&data);
}
static int tegra210_i2s_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra210_i2s *i2s;
void __iomem *regs;
int err;
i2s = devm_kzalloc(dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
i2s->rx_fifo_th = DEFAULT_I2S_RX_FIFO_THRESHOLD;
i2s->tx_mask = DEFAULT_I2S_SLOT_MASK;
i2s->rx_mask = DEFAULT_I2S_SLOT_MASK;
i2s->loopback = false;
i2s->client_sample_format = -EINVAL;
dev_set_drvdata(dev, i2s);
i2s->clk_i2s = devm_clk_get(dev, "i2s");
if (IS_ERR(i2s->clk_i2s)) {
dev_err(dev, "can't retrieve I2S bit clock\n");
return PTR_ERR(i2s->clk_i2s);
}
/*
* Not an error, as this clock is needed only when some other I/O
* requires input clock from current I2S instance, which is
* configurable from DT.
*/
i2s->clk_sync_input = devm_clk_get(dev, "sync_input");
if (IS_ERR(i2s->clk_sync_input))
dev_dbg(dev, "can't retrieve I2S sync input clock\n");
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
i2s->regmap = devm_regmap_init_mmio(dev, regs,
&tegra210_i2s_regmap_config);
if (IS_ERR(i2s->regmap)) {
dev_err(dev, "regmap init failed\n");
return PTR_ERR(i2s->regmap);
}
tegra210_parse_client_convert(dev);
regcache_cache_only(i2s->regmap, true);
err = devm_snd_soc_register_component(dev, &tegra210_i2s_cmpnt,
tegra210_i2s_dais,
ARRAY_SIZE(tegra210_i2s_dais));
if (err) {
dev_err(dev, "can't register I2S component, err: %d\n", err);
return err;
}
pm_runtime_enable(dev);
return 0;
}
static void tegra210_i2s_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static const struct dev_pm_ops tegra210_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_i2s_runtime_suspend,
tegra210_i2s_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static const struct of_device_id tegra210_i2s_of_match[] = {
{ .compatible = "nvidia,tegra210-i2s" },
{},
};
MODULE_DEVICE_TABLE(of, tegra210_i2s_of_match);
static struct platform_driver tegra210_i2s_driver = {
.driver = {
.name = "tegra210-i2s",
.of_match_table = tegra210_i2s_of_match,
.pm = &tegra210_i2s_pm_ops,
},
.probe = tegra210_i2s_probe,
.remove = tegra210_i2s_remove,
};
module_platform_driver(tegra210_i2s_driver)
MODULE_AUTHOR("Songhee Baek <[email protected]>");
MODULE_DESCRIPTION("Tegra210 ASoC I2S driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Russell King
*/
#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static const struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
};
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static const struct drm_driver armada_drm_driver = {
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
ARMADA_FBDEV_DRIVER_OPS,
.major = 1,
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
.fops = &armada_drm_fops,
};
static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
.fb_create = armada_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
struct resource *mem = NULL;
int ret, n;
for (n = 0; ; n++) {
struct resource *r = platform_get_resource(to_platform_device(dev),
IORESOURCE_MEM, n);
if (!r)
break;
/* Resources above 64K are graphics memory */
if (resource_size(r) > SZ_64K)
mem = r;
else
return -EINVAL;
}
if (!mem)
return -ENXIO;
if (!devm_request_mem_region(dev, mem->start, resource_size(mem),
"armada-drm"))
return -EBUSY;
priv = devm_drm_dev_alloc(dev, &armada_drm_driver,
struct armada_private, drm);
if (IS_ERR(priv)) {
dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n",
__func__, PTR_ERR(priv));
return PTR_ERR(priv);
}
/* Remove early framebuffers */
ret = aperture_remove_all_conflicting_devices(armada_drm_driver.name);
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
return ret;
}
dev_set_drvdata(dev, &priv->drm);
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
priv->drm.mode_config.min_height = 200;
/*
* With vscale enabled, the maximum width is 1920 due to the
* 1920 by 3 lines RAM
*/
priv->drm.mode_config.max_width = 1920;
priv->drm.mode_config.max_height = 2048;
priv->drm.mode_config.preferred_depth = 24;
priv->drm.mode_config.funcs = &armada_drm_mode_config_funcs;
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
mutex_init(&priv->linear_lock);
ret = component_bind_all(dev, &priv->drm);
if (ret)
goto err_kms;
ret = drm_vblank_init(&priv->drm, priv->drm.mode_config.num_crtc);
if (ret)
goto err_comp;
drm_mode_config_reset(&priv->drm);
drm_kms_helper_poll_init(&priv->drm);
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_poll;
#ifdef CONFIG_DEBUG_FS
armada_drm_debugfs_init(priv->drm.primary);
#endif
drm_client_setup(&priv->drm, NULL);
return 0;
err_poll:
drm_kms_helper_poll_fini(&priv->drm);
err_comp:
component_unbind_all(dev, &priv->drm);
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
dev_set_drvdata(dev, NULL);
return ret;
}
static void armada_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct armada_private *priv = drm_to_armada_dev(drm);
drm_kms_helper_poll_fini(&priv->drm);
drm_dev_unregister(&priv->drm);
drm_atomic_helper_shutdown(&priv->drm);
component_unbind_all(dev, &priv->drm);
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
dev_set_drvdata(dev, NULL);
}
static void armada_add_endpoints(struct device *dev,
struct component_match **match, struct device_node *dev_node)
{
struct device_node *ep, *remote;
for_each_endpoint_of_node(dev_node, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (remote && of_device_is_available(remote))
drm_of_component_match_add(dev, match, component_compare_of,
remote);
of_node_put(remote);
}
}
static const struct component_master_ops armada_master_ops = {
.bind = armada_drm_bind,
.unbind = armada_drm_unbind,
};
static int armada_drm_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
struct device *dev = &pdev->dev;
int ret;
ret = drm_of_component_probe(dev, component_compare_dev_name, &armada_master_ops);
if (ret != -EINVAL)
return ret;
if (dev->platform_data) {
char **devices = dev->platform_data;
struct device *d;
int i;
for (i = 0; devices[i]; i++)
component_match_add(dev, &match, component_compare_dev_name,
devices[i]);
if (i == 0) {
dev_err(dev, "missing 'ports' property\n");
return -ENODEV;
}
for (i = 0; devices[i]; i++) {
d = bus_find_device_by_name(&platform_bus_type, NULL,
devices[i]);
if (d && d->of_node)
armada_add_endpoints(dev, &match, d->of_node);
put_device(d);
}
}
return component_master_add_with_match(&pdev->dev, &armada_master_ops,
match);
}
static void armada_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &armada_master_ops);
}
static void armada_drm_shutdown(struct platform_device *pdev)
{
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
static const struct platform_device_id armada_drm_platform_ids[] = {
{
.name = "armada-drm",
}, {
.name = "armada-510-drm",
},
{ },
};
MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
.remove = armada_drm_remove,
.shutdown = armada_drm_shutdown,
.driver = {
.name = "armada-drm",
},
.id_table = armada_drm_platform_ids,
};
static int __init armada_drm_init(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = platform_driver_register(&armada_lcd_platform_driver);
if (ret)
return ret;
ret = platform_driver_register(&armada_drm_platform_driver);
if (ret)
platform_driver_unregister(&armada_lcd_platform_driver);
return ret;
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
platform_driver_unregister(&armada_lcd_platform_driver);
}
module_exit(armada_drm_exit);
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for SoundBlaster 1.0/2.0/Pro soundcards and compatible
* Copyright (c) by Jaroslav Kysela <[email protected]>
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/isa.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/sb.h>
#include <sound/opl3.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>");
MODULE_DESCRIPTION("Sound Blaster 1.0/2.0/Pro");
MODULE_LICENSE("GPL");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */
static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 1,3 */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Sound Blaster soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Sound Blaster soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable Sound Blaster soundcard.");
module_param_hw_array(port, long, ioport, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for SB8 driver.");
module_param_hw_array(irq, int, irq, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ # for SB8 driver.");
module_param_hw_array(dma8, int, dma, NULL, 0444);
MODULE_PARM_DESC(dma8, "8-bit DMA # for SB8 driver.");
struct snd_sb8 {
struct resource *fm_res; /* used to block FM i/o region for legacy cards */
struct snd_sb *chip;
};
static irqreturn_t snd_sb8_interrupt(int irq, void *dev_id)
{
struct snd_sb *chip = dev_id;
if (chip->open & SB_OPEN_PCM) {
return snd_sb8dsp_interrupt(chip);
} else {
return snd_sb8dsp_midi_interrupt(chip);
}
}
static int snd_sb8_match(struct device *pdev, unsigned int dev)
{
if (!enable[dev])
return 0;
if (irq[dev] == SNDRV_AUTO_IRQ) {
dev_err(pdev, "please specify irq\n");
return 0;
}
if (dma8[dev] == SNDRV_AUTO_DMA) {
dev_err(pdev, "please specify dma8\n");
return 0;
}
return 1;
}
static int snd_sb8_probe(struct device *pdev, unsigned int dev)
{
struct snd_sb *chip;
struct snd_card *card;
struct snd_sb8 *acard;
struct snd_opl3 *opl3;
int err;
err = snd_devm_card_new(pdev, index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_sb8), &card);
if (err < 0)
return err;
acard = card->private_data;
/*
* Block the 0x388 port to avoid PnP conflicts.
* No need to check this value after request_region,
* as we never do anything with it.
*/
acard->fm_res = devm_request_region(card->dev, 0x388, 4,
"SoundBlaster FM");
if (port[dev] != SNDRV_AUTO_PORT) {
err = snd_sbdsp_create(card, port[dev], irq[dev],
snd_sb8_interrupt, dma8[dev],
-1, SB_HW_AUTO, &chip);
if (err < 0)
return err;
} else {
/* auto-probe legacy ports */
static const unsigned long possible_ports[] = {
0x220, 0x240, 0x260,
};
int i;
for (i = 0; i < ARRAY_SIZE(possible_ports); i++) {
err = snd_sbdsp_create(card, possible_ports[i],
irq[dev],
snd_sb8_interrupt,
dma8[dev],
-1,
SB_HW_AUTO,
&chip);
if (err >= 0) {
port[dev] = possible_ports[i];
break;
}
}
if (i >= ARRAY_SIZE(possible_ports))
return -EINVAL;
}
acard->chip = chip;
if (chip->hardware >= SB_HW_16) {
if (chip->hardware == SB_HW_ALS100)
dev_warn(pdev, "ALS100 chip detected at 0x%lx, try snd-als100 module\n",
port[dev]);
else
dev_warn(pdev, "SB 16 chip detected at 0x%lx, try snd-sb16 module\n",
port[dev]);
return -ENODEV;
}
err = snd_sb8dsp_pcm(chip, 0);
if (err < 0)
return err;
err = snd_sbmixer_new(chip);
if (err < 0)
return err;
if (chip->hardware == SB_HW_10 || chip->hardware == SB_HW_20) {
err = snd_opl3_create(card, chip->port + 8, 0,
OPL3_HW_AUTO, 1, &opl3);
if (err < 0)
dev_warn(pdev, "sb8: no OPL device at 0x%lx\n", chip->port + 8);
} else {
err = snd_opl3_create(card, chip->port, chip->port + 2,
OPL3_HW_AUTO, 1, &opl3);
if (err < 0) {
dev_warn(pdev, "sb8: no OPL device at 0x%lx-0x%lx\n",
chip->port, chip->port + 2);
}
}
if (err >= 0) {
err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
if (err < 0)
return err;
}
err = snd_sb8dsp_midi(chip, 0);
if (err < 0)
return err;
strcpy(card->driver, chip->hardware == SB_HW_PRO ? "SB Pro" : "SB8");
strcpy(card->shortname, chip->name);
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
chip->name,
chip->port,
irq[dev], dma8[dev]);
err = snd_card_register(card);
if (err < 0)
return err;
dev_set_drvdata(pdev, card);
return 0;
}
#ifdef CONFIG_PM
static int snd_sb8_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_sb8 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_sbmixer_suspend(chip);
return 0;
}
static int snd_sb8_resume(struct device *dev, unsigned int n)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_sb8 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
snd_sbdsp_reset(chip);
snd_sbmixer_resume(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif
#define DEV_NAME "sb8"
static struct isa_driver snd_sb8_driver = {
.match = snd_sb8_match,
.probe = snd_sb8_probe,
#ifdef CONFIG_PM
.suspend = snd_sb8_suspend,
.resume = snd_sb8_resume,
#endif
.driver = {
.name = DEV_NAME
},
};
module_isa_driver(snd_sb8_driver, SNDRV_CARDS);
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* RMNET Data Virtual Network Device APIs
*/
#ifndef _RMNET_VND_H_
#define _RMNET_VND_H_
int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep,
struct netlink_ext_ack *extack);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_setup(struct net_device *dev);
int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
struct net_device *real_dev);
#endif /* _RMNET_VND_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip_vs_est.c: simple rate estimator for IPVS
*
* Authors: Wensong Zhang <[email protected]>
*
* Changes: Hans Schillstrom <[email protected]>
* Network name space (netns) aware.
* Global data moved to netns i.e struct netns_ipvs
* Affected data: est_list and est_lock.
* estimation_timer() runs with timer per netns.
* get_stats()) do the per cpu summing.
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/list.h>
#include <linux/rcupdate_wait.h>
#include <net/ip_vs.h>
/*
This code is to estimate rate in a shorter interval (such as 8
seconds) for virtual services and real servers. For measure rate in a
long interval, it is easy to implement a user level daemon which
periodically reads those statistical counters and measure rate.
We measure rate during the last 8 seconds every 2 seconds:
avgrate = avgrate*(1-W) + rate*W
where W = 2^(-2)
NOTES.
* Average bps is scaled by 2^5, while average pps and cps are scaled by 2^10.
* Netlink users can see 64-bit values but sockopt users are restricted
to 32-bit values for conns, packets, bps, cps and pps.
* A lot of code is taken from net/core/gen_estimator.c
KEY POINTS:
- cpustats counters are updated per-cpu in SoftIRQ context with BH disabled
- kthreads read the cpustats to update the estimators (svcs, dests, total)
- the states of estimators can be read (get stats) or modified (zero stats)
from processes
KTHREADS:
- estimators are added initially to est_temp_list and later kthread 0
distributes them to one or many kthreads for estimation
- kthread contexts are created and attached to array
- the kthread tasks are started when first service is added, before that
the total stats are not estimated
- when configuration (cpulist/nice) is changed, the tasks are restarted
by work (est_reload_work)
- kthread tasks are stopped while the cpulist is empty
- the kthread context holds lists with estimators (chains) which are
processed every 2 seconds
- as estimators can be added dynamically and in bursts, we try to spread
them to multiple chains which are estimated at different time
- on start, kthread 0 enters calculation phase to determine the chain limits
and the limit of estimators per kthread
- est_add_ktid: ktid where to add new ests, can point to empty slot where
we should add kt data
*/
static struct lock_class_key __ipvs_est_key;
static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs);
static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs);
static void ip_vs_chain_estimation(struct hlist_head *chain)
{
struct ip_vs_estimator *e;
struct ip_vs_cpu_stats *c;
struct ip_vs_stats *s;
u64 rate;
hlist_for_each_entry_rcu(e, chain, list) {
u64 conns, inpkts, outpkts, inbytes, outbytes;
u64 kconns = 0, kinpkts = 0, koutpkts = 0;
u64 kinbytes = 0, koutbytes = 0;
unsigned int start;
int i;
if (kthread_should_stop())
break;
s = container_of(e, struct ip_vs_stats, est);
for_each_possible_cpu(i) {
c = per_cpu_ptr(s->cpustats, i);
do {
start = u64_stats_fetch_begin(&c->syncp);
conns = u64_stats_read(&c->cnt.conns);
inpkts = u64_stats_read(&c->cnt.inpkts);
outpkts = u64_stats_read(&c->cnt.outpkts);
inbytes = u64_stats_read(&c->cnt.inbytes);
outbytes = u64_stats_read(&c->cnt.outbytes);
} while (u64_stats_fetch_retry(&c->syncp, start));
kconns += conns;
kinpkts += inpkts;
koutpkts += outpkts;
kinbytes += inbytes;
koutbytes += outbytes;
}
spin_lock(&s->lock);
s->kstats.conns = kconns;
s->kstats.inpkts = kinpkts;
s->kstats.outpkts = koutpkts;
s->kstats.inbytes = kinbytes;
s->kstats.outbytes = koutbytes;
/* scaled by 2^10, but divided 2 seconds */
rate = (s->kstats.conns - e->last_conns) << 9;
e->last_conns = s->kstats.conns;
e->cps += ((s64)rate - (s64)e->cps) >> 2;
rate = (s->kstats.inpkts - e->last_inpkts) << 9;
e->last_inpkts = s->kstats.inpkts;
e->inpps += ((s64)rate - (s64)e->inpps) >> 2;
rate = (s->kstats.outpkts - e->last_outpkts) << 9;
e->last_outpkts = s->kstats.outpkts;
e->outpps += ((s64)rate - (s64)e->outpps) >> 2;
/* scaled by 2^5, but divided 2 seconds */
rate = (s->kstats.inbytes - e->last_inbytes) << 4;
e->last_inbytes = s->kstats.inbytes;
e->inbps += ((s64)rate - (s64)e->inbps) >> 2;
rate = (s->kstats.outbytes - e->last_outbytes) << 4;
e->last_outbytes = s->kstats.outbytes;
e->outbps += ((s64)rate - (s64)e->outbps) >> 2;
spin_unlock(&s->lock);
}
}
static void ip_vs_tick_estimation(struct ip_vs_est_kt_data *kd, int row)
{
struct ip_vs_est_tick_data *td;
int cid;
rcu_read_lock();
td = rcu_dereference(kd->ticks[row]);
if (!td)
goto out;
for_each_set_bit(cid, td->present, IPVS_EST_TICK_CHAINS) {
if (kthread_should_stop())
break;
ip_vs_chain_estimation(&td->chains[cid]);
cond_resched_rcu();
td = rcu_dereference(kd->ticks[row]);
if (!td)
break;
}
out:
rcu_read_unlock();
}
static int ip_vs_estimation_kthread(void *data)
{
struct ip_vs_est_kt_data *kd = data;
struct netns_ipvs *ipvs = kd->ipvs;
int row = kd->est_row;
unsigned long now;
int id = kd->id;
long gap;
if (id > 0) {
if (!ipvs->est_chain_max)
return 0;
} else {
if (!ipvs->est_chain_max) {
ipvs->est_calc_phase = 1;
/* commit est_calc_phase before reading est_genid */
smp_mb();
}
/* kthread 0 will handle the calc phase */
if (ipvs->est_calc_phase)
ip_vs_est_calc_phase(ipvs);
}
while (1) {
if (!id && !hlist_empty(&ipvs->est_temp_list))
ip_vs_est_drain_temp_list(ipvs);
set_current_state(TASK_IDLE);
if (kthread_should_stop())
break;
/* before estimation, check if we should sleep */
now = jiffies;
gap = kd->est_timer - now;
if (gap > 0) {
if (gap > IPVS_EST_TICK) {
kd->est_timer = now - IPVS_EST_TICK;
gap = IPVS_EST_TICK;
}
schedule_timeout(gap);
} else {
__set_current_state(TASK_RUNNING);
if (gap < -8 * IPVS_EST_TICK)
kd->est_timer = now;
}
if (kd->tick_len[row])
ip_vs_tick_estimation(kd, row);
row++;
if (row >= IPVS_EST_NTICKS)
row = 0;
WRITE_ONCE(kd->est_row, row);
kd->est_timer += IPVS_EST_TICK;
}
__set_current_state(TASK_RUNNING);
return 0;
}
/* Schedule stop/start for kthread tasks */
void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
{
/* Ignore reloads before first service is added */
if (!ipvs->enable)
return;
ip_vs_est_stopped_recalc(ipvs);
/* Bump the kthread configuration genid */
atomic_inc(&ipvs->est_genid);
queue_delayed_work(system_long_wq, &ipvs->est_reload_work, 0);
}
/* Start kthread task with current configuration */
int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
struct ip_vs_est_kt_data *kd)
{
unsigned long now;
int ret = 0;
long gap;
lockdep_assert_held(&ipvs->est_mutex);
if (kd->task)
goto out;
now = jiffies;
gap = kd->est_timer - now;
/* Sync est_timer if task is starting later */
if (abs(gap) > 4 * IPVS_EST_TICK)
kd->est_timer = now;
kd->task = kthread_create(ip_vs_estimation_kthread, kd, "ipvs-e:%d:%d",
ipvs->gen, kd->id);
if (IS_ERR(kd->task)) {
ret = PTR_ERR(kd->task);
kd->task = NULL;
goto out;
}
set_user_nice(kd->task, sysctl_est_nice(ipvs));
set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
pr_info("starting estimator thread %d...\n", kd->id);
wake_up_process(kd->task);
out:
return ret;
}
void ip_vs_est_kthread_stop(struct ip_vs_est_kt_data *kd)
{
if (kd->task) {
pr_info("stopping estimator thread %d...\n", kd->id);
kthread_stop(kd->task);
kd->task = NULL;
}
}
/* Apply parameters to kthread */
static void ip_vs_est_set_params(struct netns_ipvs *ipvs,
struct ip_vs_est_kt_data *kd)
{
kd->chain_max = ipvs->est_chain_max;
/* We are using single chain on RCU preemption */
if (IPVS_EST_TICK_CHAINS == 1)
kd->chain_max *= IPVS_EST_CHAIN_FACTOR;
kd->tick_max = IPVS_EST_TICK_CHAINS * kd->chain_max;
kd->est_max_count = IPVS_EST_NTICKS * kd->tick_max;
}
/* Create and start estimation kthread in a free or new array slot */
static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
{
struct ip_vs_est_kt_data *kd = NULL;
int id = ipvs->est_kt_count;
int ret = -ENOMEM;
void *arr = NULL;
int i;
if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
ipvs->enable && ipvs->est_max_threads)
return -EINVAL;
mutex_lock(&ipvs->est_mutex);
for (i = 0; i < id; i++) {
if (!ipvs->est_kt_arr[i])
break;
}
if (i >= id) {
arr = krealloc_array(ipvs->est_kt_arr, id + 1,
sizeof(struct ip_vs_est_kt_data *),
GFP_KERNEL);
if (!arr)
goto out;
ipvs->est_kt_arr = arr;
} else {
id = i;
}
kd = kzalloc(sizeof(*kd), GFP_KERNEL);
if (!kd)
goto out;
kd->ipvs = ipvs;
bitmap_fill(kd->avail, IPVS_EST_NTICKS);
kd->est_timer = jiffies;
kd->id = id;
ip_vs_est_set_params(ipvs, kd);
/* Pre-allocate stats used in calc phase */
if (!id && !kd->calc_stats) {
kd->calc_stats = ip_vs_stats_alloc();
if (!kd->calc_stats)
goto out;
}
/* Start kthread tasks only when services are present */
if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
ret = ip_vs_est_kthread_start(ipvs, kd);
if (ret < 0)
goto out;
}
if (arr)
ipvs->est_kt_count++;
ipvs->est_kt_arr[id] = kd;
kd = NULL;
/* Use most recent kthread for new ests */
ipvs->est_add_ktid = id;
ret = 0;
out:
mutex_unlock(&ipvs->est_mutex);
if (kd) {
ip_vs_stats_free(kd->calc_stats);
kfree(kd);
}
return ret;
}
/* Select ktid where to add new ests: available, unused or new slot */
static void ip_vs_est_update_ktid(struct netns_ipvs *ipvs)
{
int ktid, best = ipvs->est_kt_count;
struct ip_vs_est_kt_data *kd;
for (ktid = 0; ktid < ipvs->est_kt_count; ktid++) {
kd = ipvs->est_kt_arr[ktid];
if (kd) {
if (kd->est_count < kd->est_max_count) {
best = ktid;
break;
}
} else if (ktid < best) {
best = ktid;
}
}
ipvs->est_add_ktid = best;
}
/* Add estimator to current kthread (est_add_ktid) */
static int ip_vs_enqueue_estimator(struct netns_ipvs *ipvs,
struct ip_vs_estimator *est)
{
struct ip_vs_est_kt_data *kd = NULL;
struct ip_vs_est_tick_data *td;
int ktid, row, crow, cid, ret;
int delay = est->ktrow;
BUILD_BUG_ON_MSG(IPVS_EST_TICK_CHAINS > 127,
"Too many chains for ktcid");
if (ipvs->est_add_ktid < ipvs->est_kt_count) {
kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
if (kd)
goto add_est;
}
ret = ip_vs_est_add_kthread(ipvs);
if (ret < 0)
goto out;
kd = ipvs->est_kt_arr[ipvs->est_add_ktid];
add_est:
ktid = kd->id;
/* For small number of estimators prefer to use few ticks,
* otherwise try to add into the last estimated row.
* est_row and add_row point after the row we should use
*/
if (kd->est_count >= 2 * kd->tick_max || delay < IPVS_EST_NTICKS - 1)
crow = READ_ONCE(kd->est_row);
else
crow = kd->add_row;
crow += delay;
if (crow >= IPVS_EST_NTICKS)
crow -= IPVS_EST_NTICKS;
/* Assume initial delay ? */
if (delay >= IPVS_EST_NTICKS - 1) {
/* Preserve initial delay or decrease it if no space in tick */
row = crow;
if (crow < IPVS_EST_NTICKS - 1) {
crow++;
row = find_last_bit(kd->avail, crow);
}
if (row >= crow)
row = find_last_bit(kd->avail, IPVS_EST_NTICKS);
} else {
/* Preserve delay or increase it if no space in tick */
row = IPVS_EST_NTICKS;
if (crow > 0)
row = find_next_bit(kd->avail, IPVS_EST_NTICKS, crow);
if (row >= IPVS_EST_NTICKS)
row = find_first_bit(kd->avail, IPVS_EST_NTICKS);
}
td = rcu_dereference_protected(kd->ticks[row], 1);
if (!td) {
td = kzalloc(sizeof(*td), GFP_KERNEL);
if (!td) {
ret = -ENOMEM;
goto out;
}
rcu_assign_pointer(kd->ticks[row], td);
}
cid = find_first_zero_bit(td->full, IPVS_EST_TICK_CHAINS);
kd->est_count++;
kd->tick_len[row]++;
if (!td->chain_len[cid])
__set_bit(cid, td->present);
td->chain_len[cid]++;
est->ktid = ktid;
est->ktrow = row;
est->ktcid = cid;
hlist_add_head_rcu(&est->list, &td->chains[cid]);
if (td->chain_len[cid] >= kd->chain_max) {
__set_bit(cid, td->full);
if (kd->tick_len[row] >= kd->tick_max)
__clear_bit(row, kd->avail);
}
/* Update est_add_ktid to point to first available/empty kt slot */
if (kd->est_count == kd->est_max_count)
ip_vs_est_update_ktid(ipvs);
ret = 0;
out:
return ret;
}
/* Start estimation for stats */
int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
int ret;
if (!ipvs->est_max_threads && ipvs->enable)
ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
est->ktid = -1;
est->ktrow = IPVS_EST_NTICKS - 1; /* Initial delay */
/* We prefer this code to be short, kthread 0 will requeue the
* estimator to available chain. If tasks are disabled, we
* will not allocate much memory, just for kt 0.
*/
ret = 0;
if (!ipvs->est_kt_count || !ipvs->est_kt_arr[0])
ret = ip_vs_est_add_kthread(ipvs);
if (ret >= 0)
hlist_add_head(&est->list, &ipvs->est_temp_list);
else
INIT_HLIST_NODE(&est->list);
return ret;
}
static void ip_vs_est_kthread_destroy(struct ip_vs_est_kt_data *kd)
{
if (kd) {
if (kd->task) {
pr_info("stop unused estimator thread %d...\n", kd->id);
kthread_stop(kd->task);
}
ip_vs_stats_free(kd->calc_stats);
kfree(kd);
}
}
/* Unlink estimator from chain */
void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
struct ip_vs_est_tick_data *td;
struct ip_vs_est_kt_data *kd;
int ktid = est->ktid;
int row = est->ktrow;
int cid = est->ktcid;
/* Failed to add to chain ? */
if (hlist_unhashed(&est->list))
return;
/* On return, estimator can be freed, dequeue it now */
/* In est_temp_list ? */
if (ktid < 0) {
hlist_del(&est->list);
goto end_kt0;
}
hlist_del_rcu(&est->list);
kd = ipvs->est_kt_arr[ktid];
td = rcu_dereference_protected(kd->ticks[row], 1);
__clear_bit(cid, td->full);
td->chain_len[cid]--;
if (!td->chain_len[cid])
__clear_bit(cid, td->present);
kd->tick_len[row]--;
__set_bit(row, kd->avail);
if (!kd->tick_len[row]) {
RCU_INIT_POINTER(kd->ticks[row], NULL);
kfree_rcu(td, rcu_head);
}
kd->est_count--;
if (kd->est_count) {
/* This kt slot can become available just now, prefer it */
if (ktid < ipvs->est_add_ktid)
ipvs->est_add_ktid = ktid;
return;
}
if (ktid > 0) {
mutex_lock(&ipvs->est_mutex);
ip_vs_est_kthread_destroy(kd);
ipvs->est_kt_arr[ktid] = NULL;
if (ktid == ipvs->est_kt_count - 1) {
ipvs->est_kt_count--;
while (ipvs->est_kt_count > 1 &&
!ipvs->est_kt_arr[ipvs->est_kt_count - 1])
ipvs->est_kt_count--;
}
mutex_unlock(&ipvs->est_mutex);
/* This slot is now empty, prefer another available kt slot */
if (ktid == ipvs->est_add_ktid)
ip_vs_est_update_ktid(ipvs);
}
end_kt0:
/* kt 0 is freed after all other kthreads and chains are empty */
if (ipvs->est_kt_count == 1 && hlist_empty(&ipvs->est_temp_list)) {
kd = ipvs->est_kt_arr[0];
if (!kd || !kd->est_count) {
mutex_lock(&ipvs->est_mutex);
if (kd) {
ip_vs_est_kthread_destroy(kd);
ipvs->est_kt_arr[0] = NULL;
}
ipvs->est_kt_count--;
mutex_unlock(&ipvs->est_mutex);
ipvs->est_add_ktid = 0;
}
}
}
/* Register all ests from est_temp_list to kthreads */
static void ip_vs_est_drain_temp_list(struct netns_ipvs *ipvs)
{
struct ip_vs_estimator *est;
while (1) {
int max = 16;
mutex_lock(&__ip_vs_mutex);
while (max-- > 0) {
est = hlist_entry_safe(ipvs->est_temp_list.first,
struct ip_vs_estimator, list);
if (est) {
if (kthread_should_stop())
goto unlock;
hlist_del_init(&est->list);
if (ip_vs_enqueue_estimator(ipvs, est) >= 0)
continue;
est->ktid = -1;
hlist_add_head(&est->list,
&ipvs->est_temp_list);
/* Abort, some entries will not be estimated
* until next attempt
*/
}
goto unlock;
}
mutex_unlock(&__ip_vs_mutex);
cond_resched();
}
unlock:
mutex_unlock(&__ip_vs_mutex);
}
/* Calculate limits for all kthreads */
static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct ip_vs_est_kt_data *kd;
struct hlist_head chain;
struct ip_vs_stats *s;
int cache_factor = 4;
int i, loops, ntest;
s32 min_est = 0;
ktime_t t1, t2;
int max = 8;
int ret = 1;
s64 diff;
u64 val;
INIT_HLIST_HEAD(&chain);
mutex_lock(&__ip_vs_mutex);
kd = ipvs->est_kt_arr[0];
mutex_unlock(&__ip_vs_mutex);
s = kd ? kd->calc_stats : NULL;
if (!s)
goto out;
hlist_add_head(&s->est.list, &chain);
loops = 1;
/* Get best result from many tests */
for (ntest = 0; ntest < 12; ntest++) {
if (!(ntest & 3)) {
/* Wait for cpufreq frequency transition */
wait_event_idle_timeout(wq, kthread_should_stop(),
HZ / 50);
if (!ipvs->enable || kthread_should_stop())
goto stop;
}
local_bh_disable();
rcu_read_lock();
/* Put stats in cache */
ip_vs_chain_estimation(&chain);
t1 = ktime_get();
for (i = loops * cache_factor; i > 0; i--)
ip_vs_chain_estimation(&chain);
t2 = ktime_get();
rcu_read_unlock();
local_bh_enable();
if (!ipvs->enable || kthread_should_stop())
goto stop;
cond_resched();
diff = ktime_to_ns(ktime_sub(t2, t1));
if (diff <= 1 * NSEC_PER_USEC) {
/* Do more loops on low time resolution */
loops *= 2;
continue;
}
if (diff >= NSEC_PER_SEC)
continue;
val = diff;
do_div(val, loops);
if (!min_est || val < min_est) {
min_est = val;
/* goal: 95usec per chain */
val = 95 * NSEC_PER_USEC;
if (val >= min_est) {
do_div(val, min_est);
max = (int)val;
} else {
max = 1;
}
}
}
out:
if (s)
hlist_del_init(&s->est.list);
*chain_max = max;
return ret;
stop:
ret = 0;
goto out;
}
/* Calculate the parameters and apply them in context of kt #0
* ECP: est_calc_phase
* ECM: est_chain_max
* ECP ECM Insert Chain enable Description
* ---------------------------------------------------------------------------
* 0 0 est_temp_list 0 create kt #0 context
* 0 0 est_temp_list 0->1 service added, start kthread #0 task
* 0->1 0 est_temp_list 1 kt task #0 started, enters calc phase
* 1 0 est_temp_list 1 kt #0: determine est_chain_max,
* stop tasks, move ests to est_temp_list
* and free kd for kthreads 1..last
* 1->0 0->N kt chains 1 ests can go to kthreads
* 0 N kt chains 1 drain est_temp_list, create new kthread
* contexts, start tasks, estimate
*/
static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
{
int genid = atomic_read(&ipvs->est_genid);
struct ip_vs_est_tick_data *td;
struct ip_vs_est_kt_data *kd;
struct ip_vs_estimator *est;
struct ip_vs_stats *stats;
int id, row, cid, delay;
bool last, last_td;
int chain_max;
int step;
if (!ip_vs_est_calc_limits(ipvs, &chain_max))
return;
mutex_lock(&__ip_vs_mutex);
/* Stop all other tasks, so that we can immediately move the
* estimators to est_temp_list without RCU grace period
*/
mutex_lock(&ipvs->est_mutex);
for (id = 1; id < ipvs->est_kt_count; id++) {
/* netns clean up started, abort */
if (!ipvs->enable)
goto unlock2;
kd = ipvs->est_kt_arr[id];
if (!kd)
continue;
ip_vs_est_kthread_stop(kd);
}
mutex_unlock(&ipvs->est_mutex);
/* Move all estimators to est_temp_list but carefully,
* all estimators and kthread data can be released while
* we reschedule. Even for kthread 0.
*/
step = 0;
/* Order entries in est_temp_list in ascending delay, so now
* walk delay(desc), id(desc), cid(asc)
*/
delay = IPVS_EST_NTICKS;
next_delay:
delay--;
if (delay < 0)
goto end_dequeue;
last_kt:
/* Destroy contexts backwards */
id = ipvs->est_kt_count;
next_kt:
if (!ipvs->enable || kthread_should_stop())
goto unlock;
id--;
if (id < 0)
goto next_delay;
kd = ipvs->est_kt_arr[id];
if (!kd)
goto next_kt;
/* kt 0 can exist with empty chains */
if (!id && kd->est_count <= 1)
goto next_delay;
row = kd->est_row + delay;
if (row >= IPVS_EST_NTICKS)
row -= IPVS_EST_NTICKS;
td = rcu_dereference_protected(kd->ticks[row], 1);
if (!td)
goto next_kt;
cid = 0;
walk_chain:
if (kthread_should_stop())
goto unlock;
step++;
if (!(step & 63)) {
/* Give chance estimators to be added (to est_temp_list)
* and deleted (releasing kthread contexts)
*/
mutex_unlock(&__ip_vs_mutex);
cond_resched();
mutex_lock(&__ip_vs_mutex);
/* Current kt released ? */
if (id >= ipvs->est_kt_count)
goto last_kt;
if (kd != ipvs->est_kt_arr[id])
goto next_kt;
/* Current td released ? */
if (td != rcu_dereference_protected(kd->ticks[row], 1))
goto next_kt;
/* No fatal changes on the current kd and td */
}
est = hlist_entry_safe(td->chains[cid].first, struct ip_vs_estimator,
list);
if (!est) {
cid++;
if (cid >= IPVS_EST_TICK_CHAINS)
goto next_kt;
goto walk_chain;
}
/* We can cheat and increase est_count to protect kt 0 context
* from release but we prefer to keep the last estimator
*/
last = kd->est_count <= 1;
/* Do not free kt #0 data */
if (!id && last)
goto next_delay;
last_td = kd->tick_len[row] <= 1;
stats = container_of(est, struct ip_vs_stats, est);
ip_vs_stop_estimator(ipvs, stats);
/* Tasks are stopped, move without RCU grace period */
est->ktid = -1;
est->ktrow = row - kd->est_row;
if (est->ktrow < 0)
est->ktrow += IPVS_EST_NTICKS;
hlist_add_head(&est->list, &ipvs->est_temp_list);
/* kd freed ? */
if (last)
goto next_kt;
/* td freed ? */
if (last_td)
goto next_kt;
goto walk_chain;
end_dequeue:
/* All estimators removed while calculating ? */
if (!ipvs->est_kt_count)
goto unlock;
kd = ipvs->est_kt_arr[0];
if (!kd)
goto unlock;
kd->add_row = kd->est_row;
ipvs->est_chain_max = chain_max;
ip_vs_est_set_params(ipvs, kd);
pr_info("using max %d ests per chain, %d per kthread\n",
kd->chain_max, kd->est_max_count);
/* Try to keep tot_stats in kt0, enqueue it early */
if (ipvs->tot_stats && !hlist_unhashed(&ipvs->tot_stats->s.est.list) &&
ipvs->tot_stats->s.est.ktid == -1) {
hlist_del(&ipvs->tot_stats->s.est.list);
hlist_add_head(&ipvs->tot_stats->s.est.list,
&ipvs->est_temp_list);
}
mutex_lock(&ipvs->est_mutex);
/* We completed the calc phase, new calc phase not requested */
if (genid == atomic_read(&ipvs->est_genid))
ipvs->est_calc_phase = 0;
unlock2:
mutex_unlock(&ipvs->est_mutex);
unlock:
mutex_unlock(&__ip_vs_mutex);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est = &stats->est;
struct ip_vs_kstats *k = &stats->kstats;
/* reset counters, caller must hold the stats->lock lock */
est->last_inbytes = k->inbytes;
est->last_outbytes = k->outbytes;
est->last_conns = k->conns;
est->last_inpkts = k->inpkts;
est->last_outpkts = k->outpkts;
est->cps = 0;
est->inpps = 0;
est->outpps = 0;
est->inbps = 0;
est->outbps = 0;
}
/* Get decoded rates */
void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats)
{
struct ip_vs_estimator *e = &stats->est;
dst->cps = (e->cps + 0x1FF) >> 10;
dst->inpps = (e->inpps + 0x1FF) >> 10;
dst->outpps = (e->outpps + 0x1FF) >> 10;
dst->inbps = (e->inbps + 0xF) >> 5;
dst->outbps = (e->outbps + 0xF) >> 5;
}
int __net_init ip_vs_estimator_net_init(struct netns_ipvs *ipvs)
{
INIT_HLIST_HEAD(&ipvs->est_temp_list);
ipvs->est_kt_arr = NULL;
ipvs->est_max_threads = 0;
ipvs->est_calc_phase = 0;
ipvs->est_chain_max = 0;
ipvs->est_kt_count = 0;
ipvs->est_add_ktid = 0;
atomic_set(&ipvs->est_genid, 0);
atomic_set(&ipvs->est_genid_done, 0);
__mutex_init(&ipvs->est_mutex, "ipvs->est_mutex", &__ipvs_est_key);
return 0;
}
void __net_exit ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs)
{
int i;
for (i = 0; i < ipvs->est_kt_count; i++)
ip_vs_est_kthread_destroy(ipvs->est_kt_arr[i]);
kfree(ipvs->est_kt_arr);
mutex_destroy(&ipvs->est_mutex);
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2023, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TC_CONNTRACK_H
#define EFX_TC_CONNTRACK_H
#include "net_driver.h"
#if IS_ENABLED(CONFIG_SFC_SRIOV)
#include <linux/refcount.h>
#include <net/netfilter/nf_flow_table.h>
struct efx_tc_ct_zone {
u16 zone;
struct rhash_head linkage;
refcount_t ref;
struct nf_flowtable *nf_ft;
struct efx_nic *efx;
struct mutex mutex; /* protects cts list */
struct list_head cts; /* list of efx_tc_ct_entry in this zone */
};
/* create/uncreate/teardown hashtables */
int efx_tc_init_conntrack(struct efx_nic *efx);
void efx_tc_destroy_conntrack(struct efx_nic *efx);
void efx_tc_fini_conntrack(struct efx_nic *efx);
struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
struct nf_flowtable *ct_ft);
void efx_tc_ct_unregister_zone(struct efx_nic *efx,
struct efx_tc_ct_zone *ct_zone);
struct efx_tc_ct_entry {
unsigned long cookie;
struct rhash_head linkage;
__be16 eth_proto;
u8 ip_proto;
bool dnat;
__be32 src_ip, dst_ip, nat_ip;
struct in6_addr src_ip6, dst_ip6;
__be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */
struct efx_tc_ct_zone *zone;
u32 mark;
struct efx_tc_counter *cnt;
struct list_head list; /* entry on zone->cts */
};
#endif /* CONFIG_SFC_SRIOV */
#endif /* EFX_TC_CONNTRACK_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Navman Serial USB driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <[email protected]>
*
* TODO:
* Add termios method that uses copy_hw but also kills all echo
* flags as the navman is rx only so cannot echo.
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
{ USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static void navman_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
int result;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
static int navman_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result = 0;
if (port->interrupt_in_urb) {
dev_dbg(&port->dev, "%s - adding interrupt input for treo\n",
__func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting interrupt urb, error %d\n",
__func__, result);
}
return result;
}
static void navman_close(struct usb_serial_port *port)
{
usb_kill_urb(port->interrupt_in_urb);
}
static int navman_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
/*
* This device can't write any data, only read from the device
*/
return -EOPNOTSUPP;
}
static struct usb_serial_driver navman_device = {
.driver = {
.name = "navman",
},
.id_table = id_table,
.num_ports = 1,
.open = navman_open,
.close = navman_close,
.write = navman_write,
.read_int_callback = navman_read_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&navman_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION("Navman USB Serial driver");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definition for kernel virtual machines on s390.
*
* Adapted copy of struct definition kvm_s390_sie_block from
* arch/s390/include/asm/kvm_host.h for use in userspace selftest programs.
*
* Copyright IBM Corp. 2008, 2024
*
* Authors:
* Christoph Schlameuss <[email protected]>
* Carsten Otte <[email protected]>
*/
#ifndef SELFTEST_KVM_SIE_H
#define SELFTEST_KVM_SIE_H
#include <linux/types.h>
struct kvm_s390_sie_block {
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT 0x04000000
#define CPUSTAT_IO_INT 0x02000000
#define CPUSTAT_EXT_INT 0x01000000
#define CPUSTAT_RUNNING 0x00800000
#define CPUSTAT_RETAINED 0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB 0x00010000
#define CPUSTAT_RRF 0x00008000
#define CPUSTAT_SLSV 0x00004000
#define CPUSTAT_SLSR 0x00002000
#define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_KSS 0x00000200
#define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
#define CPUSTAT_GED2 0x00000010
#define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002
#define CPUSTAT_P 0x00000001
__u32 cpuflags; /* 0x0000 */
__u32: 1; /* 0x0004 */
__u32 prefix : 18;
__u32: 1;
__u32 ibc : 12;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE BIT(0)
__u32 prog0c; /* 0x000c */
union {
__u8 reserved10[16]; /* 0x0010 */
struct {
__u64 pv_handle_cpu;
__u64 pv_handle_config;
};
};
#define PROG_BLOCK_SIE BIT(0)
#define PROG_REQUEST BIT(1)
__u32 prog20; /* 0x0020 */
__u8 reserved24[4]; /* 0x0024 */
__u64 cputm; /* 0x0028 */
__u64 ckc; /* 0x0030 */
__u64 epoch; /* 0x0038 */
__u32 svcc; /* 0x0040 */
#define LCTL_CR0 0x8000
#define LCTL_CR6 0x0200
#define LCTL_CR9 0x0040
#define LCTL_CR10 0x0020
#define LCTL_CR11 0x0010
#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
#define ICTL_OPEREXC 0x80000000
#define ICTL_PINT 0x20000000
#define ICTL_LPSW 0x00400000
#define ICTL_STCTL 0x00040000
#define ICTL_ISKE 0x00004000
#define ICTL_SSKE 0x00002000
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
#define ECA_CEI 0x80000000
#define ECA_IB 0x40000000
#define ECA_SIGPI 0x10000000
#define ECA_MVPGI 0x01000000
#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
#define ICPT_EXTREQ 0x10
#define ICPT_EXTINT 0x14
#define ICPT_IOREQ 0x18
#define ICPT_WAIT 0x1c
#define ICPT_VALIDITY 0x20
#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
#define ICPT_KSS 0x5c
#define ICPT_MCHKREQ 0x60
#define ICPT_INT_ENABLE 0x64
#define ICPT_PV_INSTR 0x68
#define ICPT_PV_NOTIFY 0x6c
#define ICPT_PV_PREF 0x70
__u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
__u8 reserved54; /* 0x0054 */
#define IICTL_CODE_NONE 0x00
#define IICTL_CODE_MCHK 0x01
#define IICTL_CODE_EXT 0x02
#define IICTL_CODE_IO 0x03
#define IICTL_CODE_RESTART 0x04
#define IICTL_CODE_SPECIFICATION 0x10
#define IICTL_CODE_OPERAND 0x11
__u8 iictl; /* 0x0055 */
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
#define FPF_BPBC 0x20
__u8 fpf; /* 0x0060 */
#define ECB_GS 0x40
#define ECB_TE 0x10
#define ECB_SPECI 0x08
#define ECB_SRSI 0x04
#define ECB_HOSTPROTINT 0x02
#define ECB_PTF 0x01
__u8 ecb; /* 0x0061 */
#define ECB2_CMMA 0x80
#define ECB2_IEP 0x20
#define ECB2_PFMFI 0x08
#define ECB2_ESCA 0x04
#define ECB2_ZPCI_LSI 0x02
__u8 ecb2; /* 0x0062 */
#define ECB3_AISI 0x20
#define ECB3_AISII 0x10
#define ECB3_DEA 0x08
#define ECB3_AES 0x04
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
#define ESCA_SCAOL_MASK ~0x3fU
__u32 scaol; /* 0x0064 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
__u8 cpnc; /* 0x006a */
__u8 reserved6b; /* 0x006b */
__u32 todpr; /* 0x006c */
#define GISA_FORMAT1 0x00000001
__u32 gd; /* 0x0070 */
__u8 reserved74[12]; /* 0x0074 */
__u64 mso; /* 0x0080 */
__u64 msl; /* 0x0088 */
__u64 psw_mask; /* 0x0090 */
__u64 psw_addr; /* 0x0098 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
__u8 reservedb0[8]; /* 0x00b0 */
#define HPID_KVM 0x4
#define HPID_VSIE 0x5
__u8 hpid; /* 0x00b8 */
__u8 reservedb9[7]; /* 0x00b9 */
union {
struct {
__u32 eiparams; /* 0x00c0 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
};
__u64 mcic; /* 0x00c0 */
} __packed;
__u32 reservedc8; /* 0x00c8 */
union {
struct {
__u16 pgmilc; /* 0x00cc */
__u16 iprcc; /* 0x00ce */
};
__u32 edc; /* 0x00cc */
} __packed;
union {
struct {
__u32 dxc; /* 0x00d0 */
__u16 mcn; /* 0x00d4 */
__u8 perc; /* 0x00d6 */
__u8 peratmid; /* 0x00d7 */
};
__u64 faddr; /* 0x00d0 */
} __packed;
__u64 peraddr; /* 0x00d8 */
__u8 eai; /* 0x00e0 */
__u8 peraid; /* 0x00e1 */
__u8 oai; /* 0x00e2 */
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
union {
__u64 tecmc; /* 0x00e8 */
struct {
__u16 subchannel_id; /* 0x00e8 */
__u16 subchannel_nr; /* 0x00ea */
__u32 io_int_parm; /* 0x00ec */
__u32 io_int_word; /* 0x00f0 */
};
} __packed;
__u8 reservedf4[8]; /* 0x00f4 */
#define CRYCB_FORMAT_MASK 0x00000003
#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
union {
__u64 gbea; /* 0x0180 */
__u64 sidad;
};
__u8 reserved188[8]; /* 0x0188 */
__u64 sdnxo; /* 0x0190 */
__u8 reserved198[8]; /* 0x0198 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[8]; /* 0x01c0 */
#define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
#define ECD_ECC 0x00200000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u64 riccbd; /* 0x01f0 */
__u64 gvrd; /* 0x01f8 */
} __packed __aligned(512);
#endif /* SELFTEST_KVM_SIE_H */
|
/* SPDX-License-Identifier: MIT */
/******************************************************************************
* vscsiif.h
*
* Based on the blkif.h code.
*
* Copyright(c) FUJITSU Limited 2008.
*/
#ifndef __XEN__PUBLIC_IO_SCSI_H__
#define __XEN__PUBLIC_IO_SCSI_H__
#include "ring.h"
#include "../grant_table.h"
/*
* Feature and Parameter Negotiation
* =================================
* The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
* communicate capabilities and to negotiate operating parameters. This
* section enumerates these nodes which reside in the respective front and
* backend portions of the XenStore, following the XenBus convention.
*
* Any specified default value is in effect if the corresponding XenBus node
* is not present in the XenStore.
*
* XenStore nodes in sections marked "PRIVATE" are solely for use by the
* driver side whose XenBus tree contains them.
*
*****************************************************************************
* Backend XenBus Nodes
*****************************************************************************
*
*------------------ Backend Device Identification (PRIVATE) ------------------
*
* p-devname
* Values: string
*
* A free string used to identify the physical device (e.g. a disk name).
*
* p-dev
* Values: string
*
* A string specifying the backend device: either a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers), or a WWN (e.g.
* "naa.60014054ac780582:0").
*
* v-dev
* Values: string
*
* A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers).
*
*--------------------------------- Features ---------------------------------
*
* feature-sg-grant
* Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
* Default Value: 0
*
* Specifies the maximum number of scatter/gather elements in grant pages
* supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
* SG elements specified directly in the request.
*
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
*
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
* Values: unsigned
*
* The identifier of the Xen event channel used to signal activity
* in the ring buffer.
*
* ring-ref
* Values: unsigned
*
* The Xen grant reference granting permission for the backend to map
* the sole page in a single page sized ring buffer.
*
* protocol
* Values: string (XEN_IO_PROTO_ABI_*)
* Default Value: XEN_IO_PROTO_ABI_NATIVE
*
* The machine ABI rules governing the format of all ring request and
* response structures.
*/
/*
* Xenstore format in practice
* ===========================
*
* The backend driver uses a single_host:many_devices notation to manage domU
* devices. Everything is stored in /local/domain/<backend_domid>/backend/vscsi/.
* The xenstore layout looks like this (dom0 is assumed to be the backend_domid):
*
* <domid>/<vhost>/feature-host = "0"
* <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
* <domid>/<vhost>/frontend-id = "<domid>"
* <domid>/<vhost>/online = "1"
* <domid>/<vhost>/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" or "naa.wwn:lun"
* <domid>/<vhost>/vscsi-devs/dev-0/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
* <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
*
* The frontend driver maintains its state in
* /local/domain/<domid>/device/vscsi/.
*
* <vhost>/backend = "/local/domain/0/backend/vscsi/<domid>/<vhost>"
* <vhost>/backend-id = "0"
* <vhost>/event-channel = "20"
* <vhost>/ring-ref = "43"
* <vhost>/state = "4"
* <vhost>/vscsi-devs/dev-0/state = "4"
* <vhost>/vscsi-devs/dev-1/state = "4"
*
* In addition to the entries for backend and frontend these flags are stored
* for the toolstack:
*
* <domid>/<vhost>/vscsi-devs/dev-1/p-devname = "/dev/$device"
* <domid>/<vhost>/libxl_ctrl_index = "0"
*
*
* Backend/frontend protocol
* =========================
*
* To create a vhost along with a device:
* <domid>/<vhost>/feature-host = "0"
* <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
* <domid>/<vhost>/frontend-id = "<domid>"
* <domid>/<vhost>/online = "1"
* <domid>/<vhost>/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1"
* <domid>/<vhost>/vscsi-devs/dev-0/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
* Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-0/state become 4
*
* To add another device to a vhost:
* <domid>/<vhost>/state = "7"
* <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
* Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-1/state become 4
*
* To remove a device from a vhost:
* <domid>/<vhost>/state = "7"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "5"
* Wait for <domid>/<vhost>/state to become 4
* Wait for <domid>/<vhost>/vscsi-devs/dev-1/state become 6
* Remove <domid>/<vhost>/vscsi-devs/dev-1/{state,p-dev,v-dev,p-devname}
* Remove <domid>/<vhost>/vscsi-devs/dev-1/
*
*/
/* Requests from the frontend to the backend */
/*
* Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
* The target is specified via channel, id and lun.
*
* The operation to be performed is specified via a CDB in cmnd[], the length
* of the CDB is in cmd_len. sc_data_direction specifies the direction of data
* (to the device, from the device, or none at all).
*
* If data is to be transferred to or from the device the buffer(s) in the
* guest memory is/are specified via one or multiple scsiif_request_segment
* descriptors each specifying a memory page via a grant_ref_t, a offset into
* the page and the length of the area in that page. All scsiif_request_segment
* areas concatenated form the resulting data buffer used by the operation.
* If the number of scsiif_request_segment areas is not too large (less than
* or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
* seg[] array and the number of valid scsiif_request_segment elements is to be
* set in nr_segments.
*
* If "feature-sg-grant" in the Xenstore is set it is possible to specify more
* than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
* The maximum number of allowed scsiif_request_segment elements is the value
* of the "feature-sg-grant" entry from Xenstore. When using indirection the
* seg[] array doesn't contain specifications of the data buffers, but
* references to scsiif_request_segment arrays, which in turn reference the
* data buffers. While nr_segments holds the number of populated seg[] entries
* (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
* elements referencing the target data buffers is calculated from the lengths
* of the seg[] elements (the sum of all valid seg[].length divided by the
* size of one scsiif_request_segment structure). The frontend may use a mix of
* direct and indirect requests.
*/
#define VSCSIIF_ACT_SCSI_CDB 1
/*
* Request abort of a running operation for the specified target given by
* channel, id, lun and the operation's rqid in ref_rqid.
*/
#define VSCSIIF_ACT_SCSI_ABORT 2
/*
* Request a device reset of the specified target (channel and id).
*/
#define VSCSIIF_ACT_SCSI_RESET 3
/*
* Preset scatter/gather elements for a following request. Deprecated.
* Keeping the define only to avoid usage of the value "4" for other actions.
*/
#define VSCSIIF_ACT_SCSI_SG_PRESET 4
/*
* Maximum scatter/gather segments per request.
*
* Considering balance between allocating at least 16 "vscsiif_request"
* structures on one page (4096 bytes) and the number of scatter/gather
* elements needed, we decided to use 26 as a magic number.
*
* If "feature-sg-grant" is set, more scatter/gather elements can be specified
* by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
* In this case the vscsiif_request seg elements don't contain references to
* the user data, but to the SG elements referencing the user data.
*/
#define VSCSIIF_SG_TABLESIZE 26
/*
* based on Linux kernel 2.6.18, still valid
*
* Changing these values requires support of multiple protocols via the rings
* as "old clients" will blindly use these values and the resulting structure
* sizes.
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
#define VSCSIIF_PAGE_SIZE 4096
struct scsiif_request_segment {
grant_ref_t gref;
uint16_t offset;
uint16_t length;
};
#define VSCSIIF_SG_PER_PAGE (VSCSIIF_PAGE_SIZE / \
sizeof(struct scsiif_request_segment))
/* Size of one request is 252 bytes */
struct vscsiif_request {
uint16_t rqid; /* private guest value, echoed in resp */
uint8_t act; /* command between backend and frontend */
uint8_t cmd_len; /* valid CDB bytes */
uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
uint16_t timeout_per_command; /* deprecated */
uint16_t channel, id, lun; /* (virtual) device specification */
uint16_t ref_rqid; /* command abort reference */
uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
DMA_FROM_DEVICE(2)
DMA_NONE(3) requests */
uint8_t nr_segments; /* Number of pieces of scatter-gather */
/*
* flag in nr_segments: SG elements via grant page
*
* If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
* of grant pages containing SG elements. Usable if "feature-sg-grant" set.
*/
#define VSCSIIF_SG_GRANT 0x80
struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
uint32_t reserved[3];
};
/* Size of one response is 252 bytes */
struct vscsiif_response {
uint16_t rqid; /* identifies request */
uint8_t padding;
uint8_t sense_len;
uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
int32_t rslt;
uint32_t residual_len; /* request bufflen -
return the value from physical device */
uint32_t reserved[36];
};
/* SCSI I/O status from vscsiif_response->rslt */
#define XEN_VSCSIIF_RSLT_STATUS(x) ((x) & 0x00ff)
/* Host I/O status from vscsiif_response->rslt */
#define XEN_VSCSIIF_RSLT_HOST(x) (((x) & 0x00ff0000) >> 16)
#define XEN_VSCSIIF_RSLT_HOST_OK 0
/* Couldn't connect before timeout */
#define XEN_VSCSIIF_RSLT_HOST_NO_CONNECT 1
/* Bus busy through timeout */
#define XEN_VSCSIIF_RSLT_HOST_BUS_BUSY 2
/* Timed out for other reason */
#define XEN_VSCSIIF_RSLT_HOST_TIME_OUT 3
/* Bad target */
#define XEN_VSCSIIF_RSLT_HOST_BAD_TARGET 4
/* Abort for some other reason */
#define XEN_VSCSIIF_RSLT_HOST_ABORT 5
/* Parity error */
#define XEN_VSCSIIF_RSLT_HOST_PARITY 6
/* Internal error */
#define XEN_VSCSIIF_RSLT_HOST_ERROR 7
/* Reset by somebody */
#define XEN_VSCSIIF_RSLT_HOST_RESET 8
/* Unexpected interrupt */
#define XEN_VSCSIIF_RSLT_HOST_BAD_INTR 9
/* Force command past mid-layer */
#define XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH 10
/* Retry requested */
#define XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR 11
/* Hidden retry requested */
#define XEN_VSCSIIF_RSLT_HOST_IMM_RETRY 12
/* Requeue command requested */
#define XEN_VSCSIIF_RSLT_HOST_REQUEUE 13
/* Transport error disrupted I/O */
#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED 14
/* Transport class fastfailed */
#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST 15
/* Permanent target failure */
#define XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE 16
/* Permanent nexus failure on path */
#define XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE 17
/* Space allocation on device failed */
#define XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE 18
/* Medium error */
#define XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR 19
/* Transport marginal errors */
#define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL 20
/* Result values of reset operations */
#define XEN_VSCSIIF_RSLT_RESET_SUCCESS 0x2002
#define XEN_VSCSIIF_RSLT_RESET_FAILED 0x2003
DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2022-2024 Rivos Inc.
* Copyright © 2023 FORTH-ICS/CARV
*
* RISCV IOMMU as a PCIe device
*
* Authors
* Tomasz Jeznach <[email protected]>
* Nick Kossifidis <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include "iommu-bits.h"
#include "iommu.h"
/* QEMU RISC-V IOMMU implementation */
#define PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014
/* Rivos Inc. assigned PCI Vendor and Device IDs */
#ifndef PCI_VENDOR_ID_RIVOS
#define PCI_VENDOR_ID_RIVOS 0x1efd
#endif
#define PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA 0x0008
static int riscv_iommu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu;
int rc, vec;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM))
return -ENODEV;
if (pci_resource_len(pdev, 0) < RISCV_IOMMU_REG_SIZE)
return -ENODEV;
rc = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (rc)
return dev_err_probe(dev, rc, "pcim_iomap_regions failed\n");
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
iommu->dev = dev;
iommu->reg = pcim_iomap_table(pdev)[0];
pci_set_master(pdev);
dev_set_drvdata(dev, iommu);
/* Check device reported capabilities / features. */
iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES);
iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
/* The PCI driver only uses MSIs, make sure the IOMMU supports this */
switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) {
case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
break;
default:
return dev_err_probe(dev, -ENODEV,
"unable to use message-signaled interrupts\n");
}
/* Allocate and assign IRQ vectors for the various events */
rc = pci_alloc_irq_vectors(pdev, 1, RISCV_IOMMU_INTR_COUNT,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (rc <= 0)
return dev_err_probe(dev, -ENODEV,
"unable to allocate irq vectors\n");
iommu->irqs_count = rc;
for (vec = 0; vec < iommu->irqs_count; vec++)
iommu->irqs[vec] = msi_get_virq(dev, vec);
/* Enable message-signaled interrupts, fctl.WSI */
if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) {
iommu->fctl ^= RISCV_IOMMU_FCTL_WSI;
riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl);
}
return riscv_iommu_init(iommu);
}
static void riscv_iommu_pci_remove(struct pci_dev *pdev)
{
struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev);
riscv_iommu_remove(iommu);
}
static const struct pci_device_id riscv_iommu_pci_tbl[] = {
{PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0},
{PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0},
{0,}
};
static struct pci_driver riscv_iommu_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = riscv_iommu_pci_tbl,
.probe = riscv_iommu_pci_probe,
.remove = riscv_iommu_pci_remove,
.driver = {
.suppress_bind_attrs = true,
},
};
builtin_pci_driver(riscv_iommu_pci_driver);
|
// SPDX-License-Identifier: GPL-2.0
/*
* QNX6 file system, Linux implementation.
*
* Version : 1.0.0
*
* History :
*
* 01-02-2012 by Kai Bankett ([email protected]) : first release.
*
*/
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include "qnx6.h"
static void qnx6_mmi_copy_sb(struct qnx6_super_block *qsb,
struct qnx6_mmi_super_block *sb)
{
qsb->sb_magic = sb->sb_magic;
qsb->sb_checksum = sb->sb_checksum;
qsb->sb_serial = sb->sb_serial;
qsb->sb_blocksize = sb->sb_blocksize;
qsb->sb_num_inodes = sb->sb_num_inodes;
qsb->sb_free_inodes = sb->sb_free_inodes;
qsb->sb_num_blocks = sb->sb_num_blocks;
qsb->sb_free_blocks = sb->sb_free_blocks;
/* the rest of the superblock is the same */
memcpy(&qsb->Inode, &sb->Inode, sizeof(sb->Inode));
memcpy(&qsb->Bitmap, &sb->Bitmap, sizeof(sb->Bitmap));
memcpy(&qsb->Longfile, &sb->Longfile, sizeof(sb->Longfile));
}
struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, int silent)
{
struct buffer_head *bh1, *bh2 = NULL;
struct qnx6_mmi_super_block *sb1, *sb2;
struct qnx6_super_block *qsb = NULL;
struct qnx6_sb_info *sbi;
__u64 offset;
/* Check the superblock signatures
start with the first superblock */
bh1 = sb_bread(s, 0);
if (!bh1) {
pr_err("Unable to read first mmi superblock\n");
return NULL;
}
sb1 = (struct qnx6_mmi_super_block *)bh1->b_data;
sbi = QNX6_SB(s);
if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) {
if (!silent) {
pr_err("wrong signature (magic) in superblock #1.\n");
goto out;
}
}
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb1->sb_checksum) !=
crc32_be(0, (char *)(bh1->b_data + 8), 504)) {
pr_err("superblock #1 checksum error\n");
goto out;
}
/* calculate second superblock blocknumber */
offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA /
fs32_to_cpu(sbi, sb1->sb_blocksize);
/* set new blocksize */
if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) {
pr_err("unable to set blocksize\n");
goto out;
}
/* blocksize invalidates bh - pull it back in */
brelse(bh1);
bh1 = sb_bread(s, 0);
if (!bh1)
goto out;
sb1 = (struct qnx6_mmi_super_block *)bh1->b_data;
/* read second superblock */
bh2 = sb_bread(s, offset);
if (!bh2) {
pr_err("unable to read the second superblock\n");
goto out;
}
sb2 = (struct qnx6_mmi_super_block *)bh2->b_data;
if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) {
if (!silent)
pr_err("wrong signature (magic) in superblock #2.\n");
goto out;
}
/* checksum check - start at byte 8 and end at byte 512 */
if (fs32_to_cpu(sbi, sb2->sb_checksum)
!= crc32_be(0, (char *)(bh2->b_data + 8), 504)) {
pr_err("superblock #1 checksum error\n");
goto out;
}
qsb = kmalloc(sizeof(*qsb), GFP_KERNEL);
if (!qsb) {
pr_err("unable to allocate memory.\n");
goto out;
}
if (fs64_to_cpu(sbi, sb1->sb_serial) >
fs64_to_cpu(sbi, sb2->sb_serial)) {
/* superblock #1 active */
qnx6_mmi_copy_sb(qsb, sb1);
#ifdef CONFIG_QNX6FS_DEBUG
qnx6_superblock_debug(qsb, s);
#endif
memcpy(bh1->b_data, qsb, sizeof(struct qnx6_super_block));
sbi->sb_buf = bh1;
sbi->sb = (struct qnx6_super_block *)bh1->b_data;
brelse(bh2);
pr_info("superblock #1 active\n");
} else {
/* superblock #2 active */
qnx6_mmi_copy_sb(qsb, sb2);
#ifdef CONFIG_QNX6FS_DEBUG
qnx6_superblock_debug(qsb, s);
#endif
memcpy(bh2->b_data, qsb, sizeof(struct qnx6_super_block));
sbi->sb_buf = bh2;
sbi->sb = (struct qnx6_super_block *)bh2->b_data;
brelse(bh1);
pr_info("superblock #2 active\n");
}
kfree(qsb);
/* offset for mmi_fs is just SUPERBLOCK_AREA bytes */
sbi->s_blks_off = QNX6_SUPERBLOCK_AREA / s->s_blocksize;
/* success */
return sbi->sb;
out:
if (bh1 != NULL)
brelse(bh1);
if (bh2 != NULL)
brelse(bh2);
return NULL;
}
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2023 Edgeble AI Technologies Pvt. Ltd.
*/
#include <dt-bindings/gpio/gpio.h>
/ {
chosen {
stdout-path = "serial2:1500000n8";
};
vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3_pcie2x1l0";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
startup-delay-us = <5000>;
vin-supply = <&vcc_3v3_s3>;
};
vcc3v3_pcie3x2: regulator-vcc3v3-pcie3x2 {
compatible = "regulator-fixed";
enable-active-high;
gpios = <&gpio2 RK_PC4 GPIO_ACTIVE_HIGH>; /* PCIE_4G_PWEN */
pinctrl-names = "default";
pinctrl-0 = <&pcie3x2_vcc3v3_en>;
regulator-name = "vcc3v3_pcie3x2";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
startup-delay-us = <5000>;
vin-supply = <&vcc5v0_sys>;
};
vcc3v3_pcie3x4: regulator-vcc3v3-pcie3x4 {
compatible = "regulator-fixed";
enable-active-high;
gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>; /* PCIE30x4_PWREN_H */
pinctrl-names = "default";
pinctrl-0 = <&pcie3x4_vcc3v3_en>;
regulator-name = "vcc3v3_pcie3x4";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
startup-delay-us = <5000>;
vin-supply = <&vcc5v0_sys>;
};
vcc5v0_host: regulator-vcc5v0-host {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio3 RK_PC7 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
regulator-name = "vcc5v0_host";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-boot-on;
regulator-always-on;
vin-supply = <&vcc5v0_sys>;
};
};
&combphy0_ps {
status = "okay";
};
&combphy1_ps {
status = "okay";
};
&combphy2_psu {
status = "okay";
};
&i2c6 {
status = "okay";
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
interrupt-parent = <&gpio0>;
interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <0>;
clock-output-names = "hym8563";
pinctrl-names = "default";
pinctrl-0 = <&hym8563_int>;
wakeup-source;
};
};
/* ETH */
&pcie2x1l0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie2_0_rst>;
reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>; /* PCIE20_1_PERST_L */
vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
status = "okay";
};
&pcie30phy {
status = "okay";
};
/* B-Key and E-Key */
&pcie3x2 {
pinctrl-names = "default";
pinctrl-0 = <&pcie3x2_rst>;
reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTn_M1_L */
vpcie3v3-supply = <&vcc3v3_pcie3x2>;
status = "okay";
};
/* M-Key */
&pcie3x4 {
pinctrl-names = "default";
pinctrl-0 = <&pcie3x4_rst>;
reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* PCIE30X2_PERSTn_M1_L */
vpcie3v3-supply = <&vcc3v3_pcie3x4>;
status = "okay";
};
&pinctrl {
pcie2 {
pcie2_0_rst: pcie2-0-rst {
rockchip,pins = <4 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pcie3 {
pcie3x2_rst: pcie3x2-rst {
rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie3x2_vcc3v3_en: pcie3x2-vcc3v3-en {
rockchip,pins = <2 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie3x4_rst: pcie3x4-rst {
rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie3x4_vcc3v3_en: pcie3x4-vcc3v3-en {
rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hym8563 {
hym8563_int: hym8563-int {
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
vcc5v0_host_en: vcc5v0-host-en {
rockchip,pins = <3 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
/* FAN */
&pwm2 {
pinctrl-0 = <&pwm2m1_pins>;
pinctrl-names = "default";
status = "okay";
};
&sata0 {
status = "okay";
};
&sdmmc {
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
disable-wp;
no-sdio;
no-mmc;
sd-uhs-sdr104;
vmmc-supply = <&vcc_3v3_s3>;
vqmmc-supply = <&vccio_sd_s0>;
status = "okay";
};
&uart2 {
pinctrl-0 = <&uart2m0_xfer>;
status = "okay";
};
/* RS232 */
&uart6 {
pinctrl-0 = <&uart6m0_xfer>;
pinctrl-names = "default";
status = "okay";
};
/* RS485 */
&uart7 {
pinctrl-0 = <&uart7m2_xfer>;
pinctrl-names = "default";
status = "okay";
};
&u2phy2 {
status = "okay";
};
&u2phy2_host {
/* connected to USB hub, which is powered by vcc5v0_sys */
phy-supply = <&vcc5v0_sys>;
status = "okay";
};
&u2phy3 {
status = "okay";
};
&u2phy3_host {
phy-supply = <&vcc5v0_host>;
status = "okay";
};
&usb_host0_ehci {
status = "okay";
};
&usb_host0_ohci {
status = "okay";
};
&usb_host1_ehci {
status = "okay";
};
&usb_host1_ohci {
status = "okay";
};
&usb_host2_xhci {
status = "okay";
};
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <[email protected]>
* Copyright (C) 2006-2007 Daniel Drake <[email protected]>
*/
#ifndef _ZD_CHIP_H
#define _ZD_CHIP_H
#include <net/mac80211.h>
#include "zd_rf.h"
#include "zd_usb.h"
/* Header for the Media Access Controller (MAC) and the Baseband Processor
* (BBP). It appears that the ZD1211 wraps the old ZD1205 with USB glue and
* adds a processor for handling the USB protocol.
*/
/* Address space */
enum {
/* CONTROL REGISTERS */
CR_START = 0x9000,
/* FIRMWARE */
FW_START = 0xee00,
/* EEPROM */
E2P_START = 0xf800,
E2P_LEN = 0x800,
/* EEPROM layout */
E2P_LOAD_CODE_LEN = 0xe, /* base 0xf800 */
E2P_LOAD_VECT_LEN = 0x9, /* base 0xf80e */
/* E2P_DATA indexes into this */
E2P_DATA_LEN = 0x7e, /* base 0xf817 */
E2P_BOOT_CODE_LEN = 0x760, /* base 0xf895 */
E2P_INTR_VECT_LEN = 0xb, /* base 0xfff5 */
/* Some precomputed offsets into the EEPROM */
E2P_DATA_OFFSET = E2P_LOAD_CODE_LEN + E2P_LOAD_VECT_LEN,
E2P_BOOT_CODE_OFFSET = E2P_DATA_OFFSET + E2P_DATA_LEN,
};
#define CTL_REG(offset) ((zd_addr_t)(CR_START + (offset)))
#define E2P_DATA(offset) ((zd_addr_t)(E2P_START + E2P_DATA_OFFSET + (offset)))
#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset)))
/* 8-bit hardware registers */
#define ZD_CR0 CTL_REG(0x0000)
#define ZD_CR1 CTL_REG(0x0004)
#define ZD_CR2 CTL_REG(0x0008)
#define ZD_CR3 CTL_REG(0x000C)
#define ZD_CR5 CTL_REG(0x0010)
/* bit 5: if set short preamble used
* bit 6: filter band - Japan channel 14 on, else off
*/
#define ZD_CR6 CTL_REG(0x0014)
#define ZD_CR7 CTL_REG(0x0018)
#define ZD_CR8 CTL_REG(0x001C)
#define ZD_CR4 CTL_REG(0x0020)
#define ZD_CR9 CTL_REG(0x0024)
/* bit 2: antenna switch (together with ZD_CR10) */
#define ZD_CR10 CTL_REG(0x0028)
/* bit 1: antenna switch (together with ZD_CR9)
* RF2959 controls with ZD_CR11 radion on and off
*/
#define ZD_CR11 CTL_REG(0x002C)
/* bit 6: TX power control for OFDM
* RF2959 controls with ZD_CR10 radio on and off
*/
#define ZD_CR12 CTL_REG(0x0030)
#define ZD_CR13 CTL_REG(0x0034)
#define ZD_CR14 CTL_REG(0x0038)
#define ZD_CR15 CTL_REG(0x003C)
#define ZD_CR16 CTL_REG(0x0040)
#define ZD_CR17 CTL_REG(0x0044)
#define ZD_CR18 CTL_REG(0x0048)
#define ZD_CR19 CTL_REG(0x004C)
#define ZD_CR20 CTL_REG(0x0050)
#define ZD_CR21 CTL_REG(0x0054)
#define ZD_CR22 CTL_REG(0x0058)
#define ZD_CR23 CTL_REG(0x005C)
#define ZD_CR24 CTL_REG(0x0060) /* CCA threshold */
#define ZD_CR25 CTL_REG(0x0064)
#define ZD_CR26 CTL_REG(0x0068)
#define ZD_CR27 CTL_REG(0x006C)
#define ZD_CR28 CTL_REG(0x0070)
#define ZD_CR29 CTL_REG(0x0074)
#define ZD_CR30 CTL_REG(0x0078)
#define ZD_CR31 CTL_REG(0x007C) /* TX power control for RF in
* CCK mode
*/
#define ZD_CR32 CTL_REG(0x0080)
#define ZD_CR33 CTL_REG(0x0084)
#define ZD_CR34 CTL_REG(0x0088)
#define ZD_CR35 CTL_REG(0x008C)
#define ZD_CR36 CTL_REG(0x0090)
#define ZD_CR37 CTL_REG(0x0094)
#define ZD_CR38 CTL_REG(0x0098)
#define ZD_CR39 CTL_REG(0x009C)
#define ZD_CR40 CTL_REG(0x00A0)
#define ZD_CR41 CTL_REG(0x00A4)
#define ZD_CR42 CTL_REG(0x00A8)
#define ZD_CR43 CTL_REG(0x00AC)
#define ZD_CR44 CTL_REG(0x00B0)
#define ZD_CR45 CTL_REG(0x00B4)
#define ZD_CR46 CTL_REG(0x00B8)
#define ZD_CR47 CTL_REG(0x00BC) /* CCK baseband gain
* (patch value might be in EEPROM)
*/
#define ZD_CR48 CTL_REG(0x00C0)
#define ZD_CR49 CTL_REG(0x00C4)
#define ZD_CR50 CTL_REG(0x00C8)
#define ZD_CR51 CTL_REG(0x00CC) /* TX power control for RF in
* 6-36M modes
*/
#define ZD_CR52 CTL_REG(0x00D0) /* TX power control for RF in
* 48M mode
*/
#define ZD_CR53 CTL_REG(0x00D4) /* TX power control for RF in
* 54M mode
*/
#define ZD_CR54 CTL_REG(0x00D8)
#define ZD_CR55 CTL_REG(0x00DC)
#define ZD_CR56 CTL_REG(0x00E0)
#define ZD_CR57 CTL_REG(0x00E4)
#define ZD_CR58 CTL_REG(0x00E8)
#define ZD_CR59 CTL_REG(0x00EC)
#define ZD_CR60 CTL_REG(0x00F0)
#define ZD_CR61 CTL_REG(0x00F4)
#define ZD_CR62 CTL_REG(0x00F8)
#define ZD_CR63 CTL_REG(0x00FC)
#define ZD_CR64 CTL_REG(0x0100)
#define ZD_CR65 CTL_REG(0x0104) /* OFDM 54M calibration */
#define ZD_CR66 CTL_REG(0x0108) /* OFDM 48M calibration */
#define ZD_CR67 CTL_REG(0x010C) /* OFDM 36M calibration */
#define ZD_CR68 CTL_REG(0x0110) /* CCK calibration */
#define ZD_CR69 CTL_REG(0x0114)
#define ZD_CR70 CTL_REG(0x0118)
#define ZD_CR71 CTL_REG(0x011C)
#define ZD_CR72 CTL_REG(0x0120)
#define ZD_CR73 CTL_REG(0x0124)
#define ZD_CR74 CTL_REG(0x0128)
#define ZD_CR75 CTL_REG(0x012C)
#define ZD_CR76 CTL_REG(0x0130)
#define ZD_CR77 CTL_REG(0x0134)
#define ZD_CR78 CTL_REG(0x0138)
#define ZD_CR79 CTL_REG(0x013C)
#define ZD_CR80 CTL_REG(0x0140)
#define ZD_CR81 CTL_REG(0x0144)
#define ZD_CR82 CTL_REG(0x0148)
#define ZD_CR83 CTL_REG(0x014C)
#define ZD_CR84 CTL_REG(0x0150)
#define ZD_CR85 CTL_REG(0x0154)
#define ZD_CR86 CTL_REG(0x0158)
#define ZD_CR87 CTL_REG(0x015C)
#define ZD_CR88 CTL_REG(0x0160)
#define ZD_CR89 CTL_REG(0x0164)
#define ZD_CR90 CTL_REG(0x0168)
#define ZD_CR91 CTL_REG(0x016C)
#define ZD_CR92 CTL_REG(0x0170)
#define ZD_CR93 CTL_REG(0x0174)
#define ZD_CR94 CTL_REG(0x0178)
#define ZD_CR95 CTL_REG(0x017C)
#define ZD_CR96 CTL_REG(0x0180)
#define ZD_CR97 CTL_REG(0x0184)
#define ZD_CR98 CTL_REG(0x0188)
#define ZD_CR99 CTL_REG(0x018C)
#define ZD_CR100 CTL_REG(0x0190)
#define ZD_CR101 CTL_REG(0x0194)
#define ZD_CR102 CTL_REG(0x0198)
#define ZD_CR103 CTL_REG(0x019C)
#define ZD_CR104 CTL_REG(0x01A0)
#define ZD_CR105 CTL_REG(0x01A4)
#define ZD_CR106 CTL_REG(0x01A8)
#define ZD_CR107 CTL_REG(0x01AC)
#define ZD_CR108 CTL_REG(0x01B0)
#define ZD_CR109 CTL_REG(0x01B4)
#define ZD_CR110 CTL_REG(0x01B8)
#define ZD_CR111 CTL_REG(0x01BC)
#define ZD_CR112 CTL_REG(0x01C0)
#define ZD_CR113 CTL_REG(0x01C4)
#define ZD_CR114 CTL_REG(0x01C8)
#define ZD_CR115 CTL_REG(0x01CC)
#define ZD_CR116 CTL_REG(0x01D0)
#define ZD_CR117 CTL_REG(0x01D4)
#define ZD_CR118 CTL_REG(0x01D8)
#define ZD_CR119 CTL_REG(0x01DC)
#define ZD_CR120 CTL_REG(0x01E0)
#define ZD_CR121 CTL_REG(0x01E4)
#define ZD_CR122 CTL_REG(0x01E8)
#define ZD_CR123 CTL_REG(0x01EC)
#define ZD_CR124 CTL_REG(0x01F0)
#define ZD_CR125 CTL_REG(0x01F4)
#define ZD_CR126 CTL_REG(0x01F8)
#define ZD_CR127 CTL_REG(0x01FC)
#define ZD_CR128 CTL_REG(0x0200)
#define ZD_CR129 CTL_REG(0x0204)
#define ZD_CR130 CTL_REG(0x0208)
#define ZD_CR131 CTL_REG(0x020C)
#define ZD_CR132 CTL_REG(0x0210)
#define ZD_CR133 CTL_REG(0x0214)
#define ZD_CR134 CTL_REG(0x0218)
#define ZD_CR135 CTL_REG(0x021C)
#define ZD_CR136 CTL_REG(0x0220)
#define ZD_CR137 CTL_REG(0x0224)
#define ZD_CR138 CTL_REG(0x0228)
#define ZD_CR139 CTL_REG(0x022C)
#define ZD_CR140 CTL_REG(0x0230)
#define ZD_CR141 CTL_REG(0x0234)
#define ZD_CR142 CTL_REG(0x0238)
#define ZD_CR143 CTL_REG(0x023C)
#define ZD_CR144 CTL_REG(0x0240)
#define ZD_CR145 CTL_REG(0x0244)
#define ZD_CR146 CTL_REG(0x0248)
#define ZD_CR147 CTL_REG(0x024C)
#define ZD_CR148 CTL_REG(0x0250)
#define ZD_CR149 CTL_REG(0x0254)
#define ZD_CR150 CTL_REG(0x0258)
#define ZD_CR151 CTL_REG(0x025C)
#define ZD_CR152 CTL_REG(0x0260)
#define ZD_CR153 CTL_REG(0x0264)
#define ZD_CR154 CTL_REG(0x0268)
#define ZD_CR155 CTL_REG(0x026C)
#define ZD_CR156 CTL_REG(0x0270)
#define ZD_CR157 CTL_REG(0x0274)
#define ZD_CR158 CTL_REG(0x0278)
#define ZD_CR159 CTL_REG(0x027C)
#define ZD_CR160 CTL_REG(0x0280)
#define ZD_CR161 CTL_REG(0x0284)
#define ZD_CR162 CTL_REG(0x0288)
#define ZD_CR163 CTL_REG(0x028C)
#define ZD_CR164 CTL_REG(0x0290)
#define ZD_CR165 CTL_REG(0x0294)
#define ZD_CR166 CTL_REG(0x0298)
#define ZD_CR167 CTL_REG(0x029C)
#define ZD_CR168 CTL_REG(0x02A0)
#define ZD_CR169 CTL_REG(0x02A4)
#define ZD_CR170 CTL_REG(0x02A8)
#define ZD_CR171 CTL_REG(0x02AC)
#define ZD_CR172 CTL_REG(0x02B0)
#define ZD_CR173 CTL_REG(0x02B4)
#define ZD_CR174 CTL_REG(0x02B8)
#define ZD_CR175 CTL_REG(0x02BC)
#define ZD_CR176 CTL_REG(0x02C0)
#define ZD_CR177 CTL_REG(0x02C4)
#define ZD_CR178 CTL_REG(0x02C8)
#define ZD_CR179 CTL_REG(0x02CC)
#define ZD_CR180 CTL_REG(0x02D0)
#define ZD_CR181 CTL_REG(0x02D4)
#define ZD_CR182 CTL_REG(0x02D8)
#define ZD_CR183 CTL_REG(0x02DC)
#define ZD_CR184 CTL_REG(0x02E0)
#define ZD_CR185 CTL_REG(0x02E4)
#define ZD_CR186 CTL_REG(0x02E8)
#define ZD_CR187 CTL_REG(0x02EC)
#define ZD_CR188 CTL_REG(0x02F0)
#define ZD_CR189 CTL_REG(0x02F4)
#define ZD_CR190 CTL_REG(0x02F8)
#define ZD_CR191 CTL_REG(0x02FC)
#define ZD_CR192 CTL_REG(0x0300)
#define ZD_CR193 CTL_REG(0x0304)
#define ZD_CR194 CTL_REG(0x0308)
#define ZD_CR195 CTL_REG(0x030C)
#define ZD_CR196 CTL_REG(0x0310)
#define ZD_CR197 CTL_REG(0x0314)
#define ZD_CR198 CTL_REG(0x0318)
#define ZD_CR199 CTL_REG(0x031C)
#define ZD_CR200 CTL_REG(0x0320)
#define ZD_CR201 CTL_REG(0x0324)
#define ZD_CR202 CTL_REG(0x0328)
#define ZD_CR203 CTL_REG(0x032C) /* I2C bus template value & flash
* control
*/
#define ZD_CR204 CTL_REG(0x0330)
#define ZD_CR205 CTL_REG(0x0334)
#define ZD_CR206 CTL_REG(0x0338)
#define ZD_CR207 CTL_REG(0x033C)
#define ZD_CR208 CTL_REG(0x0340)
#define ZD_CR209 CTL_REG(0x0344)
#define ZD_CR210 CTL_REG(0x0348)
#define ZD_CR211 CTL_REG(0x034C)
#define ZD_CR212 CTL_REG(0x0350)
#define ZD_CR213 CTL_REG(0x0354)
#define ZD_CR214 CTL_REG(0x0358)
#define ZD_CR215 CTL_REG(0x035C)
#define ZD_CR216 CTL_REG(0x0360)
#define ZD_CR217 CTL_REG(0x0364)
#define ZD_CR218 CTL_REG(0x0368)
#define ZD_CR219 CTL_REG(0x036C)
#define ZD_CR220 CTL_REG(0x0370)
#define ZD_CR221 CTL_REG(0x0374)
#define ZD_CR222 CTL_REG(0x0378)
#define ZD_CR223 CTL_REG(0x037C)
#define ZD_CR224 CTL_REG(0x0380)
#define ZD_CR225 CTL_REG(0x0384)
#define ZD_CR226 CTL_REG(0x0388)
#define ZD_CR227 CTL_REG(0x038C)
#define ZD_CR228 CTL_REG(0x0390)
#define ZD_CR229 CTL_REG(0x0394)
#define ZD_CR230 CTL_REG(0x0398)
#define ZD_CR231 CTL_REG(0x039C)
#define ZD_CR232 CTL_REG(0x03A0)
#define ZD_CR233 CTL_REG(0x03A4)
#define ZD_CR234 CTL_REG(0x03A8)
#define ZD_CR235 CTL_REG(0x03AC)
#define ZD_CR236 CTL_REG(0x03B0)
#define ZD_CR240 CTL_REG(0x03C0)
/* bit 7: host-controlled RF register writes
* ZD_CR241-ZD_CR245: for hardware controlled writing of RF bits, not needed for
* USB
*/
#define ZD_CR241 CTL_REG(0x03C4)
#define ZD_CR242 CTL_REG(0x03C8)
#define ZD_CR243 CTL_REG(0x03CC)
#define ZD_CR244 CTL_REG(0x03D0)
#define ZD_CR245 CTL_REG(0x03D4)
#define ZD_CR251 CTL_REG(0x03EC) /* only used for activation and
* deactivation of Airoha RFs AL2230
* and AL7230B
*/
#define ZD_CR252 CTL_REG(0x03F0)
#define ZD_CR253 CTL_REG(0x03F4)
#define ZD_CR254 CTL_REG(0x03F8)
#define ZD_CR255 CTL_REG(0x03FC)
#define CR_MAX_PHY_REG 255
/* Taken from the ZYDAS driver, not all of them are relevant for the ZD1211
* driver.
*/
#define CR_RF_IF_CLK CTL_REG(0x0400)
#define CR_RF_IF_DATA CTL_REG(0x0404)
#define CR_PE1_PE2 CTL_REG(0x0408)
#define CR_PE2_DLY CTL_REG(0x040C)
#define CR_LE1 CTL_REG(0x0410)
#define CR_LE2 CTL_REG(0x0414)
/* Seems to enable/disable GPI (General Purpose IO?) */
#define CR_GPI_EN CTL_REG(0x0418)
#define CR_RADIO_PD CTL_REG(0x042C)
#define CR_RF2948_PD CTL_REG(0x042C)
#define CR_ENABLE_PS_MANUAL_AGC CTL_REG(0x043C)
#define CR_CONFIG_PHILIPS CTL_REG(0x0440)
#define CR_SA2400_SER_AP CTL_REG(0x0444)
#define CR_I2C_WRITE CTL_REG(0x0444)
#define CR_SA2400_SER_RP CTL_REG(0x0448)
#define CR_RADIO_PE CTL_REG(0x0458)
#define CR_RST_BUS_MASTER CTL_REG(0x045C)
#define CR_RFCFG CTL_REG(0x0464)
#define CR_HSTSCHG CTL_REG(0x046C)
#define CR_PHY_ON CTL_REG(0x0474)
#define CR_RX_DELAY CTL_REG(0x0478)
#define CR_RX_PE_DELAY CTL_REG(0x047C)
#define CR_GPIO_1 CTL_REG(0x0490)
#define CR_GPIO_2 CTL_REG(0x0494)
#define CR_EncryBufMux CTL_REG(0x04A8)
#define CR_PS_CTRL CTL_REG(0x0500)
#define CR_ADDA_PWR_DWN CTL_REG(0x0504)
#define CR_ADDA_MBIAS_WARMTIME CTL_REG(0x0508)
#define CR_MAC_PS_STATE CTL_REG(0x050C)
#define CR_INTERRUPT CTL_REG(0x0510)
#define INT_TX_COMPLETE (1 << 0)
#define INT_RX_COMPLETE (1 << 1)
#define INT_RETRY_FAIL (1 << 2)
#define INT_WAKEUP (1 << 3)
#define INT_DTIM_NOTIFY (1 << 5)
#define INT_CFG_NEXT_BCN (1 << 6)
#define INT_BUS_ABORT (1 << 7)
#define INT_TX_FIFO_READY (1 << 8)
#define INT_UART (1 << 9)
#define INT_TX_COMPLETE_EN (1 << 16)
#define INT_RX_COMPLETE_EN (1 << 17)
#define INT_RETRY_FAIL_EN (1 << 18)
#define INT_WAKEUP_EN (1 << 19)
#define INT_DTIM_NOTIFY_EN (1 << 21)
#define INT_CFG_NEXT_BCN_EN (1 << 22)
#define INT_BUS_ABORT_EN (1 << 23)
#define INT_TX_FIFO_READY_EN (1 << 24)
#define INT_UART_EN (1 << 25)
#define CR_TSF_LOW_PART CTL_REG(0x0514)
#define CR_TSF_HIGH_PART CTL_REG(0x0518)
/* Following three values are in time units (1024us)
* Following condition must be met:
* atim < tbtt < bcn
*/
#define CR_ATIM_WND_PERIOD CTL_REG(0x051C)
#define CR_BCN_INTERVAL CTL_REG(0x0520)
#define CR_PRE_TBTT CTL_REG(0x0524)
/* in units of TU(1024us) */
/* for UART support */
#define CR_UART_RBR_THR_DLL CTL_REG(0x0540)
#define CR_UART_DLM_IER CTL_REG(0x0544)
#define CR_UART_IIR_FCR CTL_REG(0x0548)
#define CR_UART_LCR CTL_REG(0x054c)
#define CR_UART_MCR CTL_REG(0x0550)
#define CR_UART_LSR CTL_REG(0x0554)
#define CR_UART_MSR CTL_REG(0x0558)
#define CR_UART_ECR CTL_REG(0x055c)
#define CR_UART_STATUS CTL_REG(0x0560)
#define CR_PCI_TX_ADDR_P1 CTL_REG(0x0600)
#define CR_PCI_TX_AddR_P2 CTL_REG(0x0604)
#define CR_PCI_RX_AddR_P1 CTL_REG(0x0608)
#define CR_PCI_RX_AddR_P2 CTL_REG(0x060C)
/* must be overwritten if custom MAC address will be used */
#define CR_MAC_ADDR_P1 CTL_REG(0x0610)
#define CR_MAC_ADDR_P2 CTL_REG(0x0614)
#define CR_BSSID_P1 CTL_REG(0x0618)
#define CR_BSSID_P2 CTL_REG(0x061C)
#define CR_BCN_PLCP_CFG CTL_REG(0x0620)
/* Group hash table for filtering incoming packets.
*
* The group hash table is 64 bit large and split over two parts. The first
* part is the lower part. The upper 6 bits of the last byte of the target
* address are used as index. Packets are received if the hash table bit is
* set. This is used for multicast handling, but for broadcasts (address
* ff:ff:ff:ff:ff:ff) the highest bit in the second table must also be set.
*/
#define CR_GROUP_HASH_P1 CTL_REG(0x0624)
#define CR_GROUP_HASH_P2 CTL_REG(0x0628)
#define CR_RX_TIMEOUT CTL_REG(0x062C)
/* Basic rates supported by the BSS. When producing ACK or CTS messages, the
* device will use a rate in this table that is less than or equal to the rate
* of the incoming frame which prompted the response. */
#define CR_BASIC_RATE_TBL CTL_REG(0x0630)
#define CR_RATE_1M (1 << 0) /* 802.11b */
#define CR_RATE_2M (1 << 1) /* 802.11b */
#define CR_RATE_5_5M (1 << 2) /* 802.11b */
#define CR_RATE_11M (1 << 3) /* 802.11b */
#define CR_RATE_6M (1 << 8) /* 802.11g */
#define CR_RATE_9M (1 << 9) /* 802.11g */
#define CR_RATE_12M (1 << 10) /* 802.11g */
#define CR_RATE_18M (1 << 11) /* 802.11g */
#define CR_RATE_24M (1 << 12) /* 802.11g */
#define CR_RATE_36M (1 << 13) /* 802.11g */
#define CR_RATE_48M (1 << 14) /* 802.11g */
#define CR_RATE_54M (1 << 15) /* 802.11g */
#define CR_RATES_80211G 0xff00
#define CR_RATES_80211B 0x000f
/* Mandatory rates required in the BSS. When producing ACK or CTS messages, if
* the device could not find an appropriate rate in CR_BASIC_RATE_TBL, it will
* look for a rate in this table that is less than or equal to the rate of
* the incoming frame. */
#define CR_MANDATORY_RATE_TBL CTL_REG(0x0634)
#define CR_RTS_CTS_RATE CTL_REG(0x0638)
/* These are all bit indexes in CR_RTS_CTS_RATE, so remember to shift. */
#define RTSCTS_SH_RTS_RATE 0
#define RTSCTS_SH_EXP_CTS_RATE 4
#define RTSCTS_SH_RTS_MOD_TYPE 8
#define RTSCTS_SH_RTS_PMB_TYPE 9
#define RTSCTS_SH_CTS_RATE 16
#define RTSCTS_SH_CTS_MOD_TYPE 24
#define RTSCTS_SH_CTS_PMB_TYPE 25
#define CR_WEP_PROTECT CTL_REG(0x063C)
#define CR_RX_THRESHOLD CTL_REG(0x0640)
/* register for controlling the LEDS */
#define CR_LED CTL_REG(0x0644)
/* masks for controlling LEDs */
#define LED1 (1 << 8)
#define LED2 (1 << 9)
#define LED_SW (1 << 10)
/* Seems to indicate that the configuration is over.
*/
#define CR_AFTER_PNP CTL_REG(0x0648)
#define CR_ACK_TIME_80211 CTL_REG(0x0658)
#define CR_RX_OFFSET CTL_REG(0x065c)
#define CR_BCN_LENGTH CTL_REG(0x0664)
#define CR_PHY_DELAY CTL_REG(0x066C)
#define CR_BCN_FIFO CTL_REG(0x0670)
#define CR_SNIFFER_ON CTL_REG(0x0674)
#define CR_ENCRYPTION_TYPE CTL_REG(0x0678)
#define NO_WEP 0
#define WEP64 1
#define WEP128 5
#define WEP256 6
#define ENC_SNIFFER 8
#define CR_ZD1211_RETRY_MAX CTL_REG(0x067C)
#define CR_REG1 CTL_REG(0x0680)
/* Setting the bit UNLOCK_PHY_REGS disallows the write access to physical
* registers, so one could argue it is a LOCK bit. But calling it
* LOCK_PHY_REGS makes it confusing.
*/
#define UNLOCK_PHY_REGS (1 << 7)
#define CR_DEVICE_STATE CTL_REG(0x0684)
#define CR_UNDERRUN_CNT CTL_REG(0x0688)
#define CR_RX_FILTER CTL_REG(0x068c)
#define RX_FILTER_ASSOC_REQUEST (1 << 0)
#define RX_FILTER_ASSOC_RESPONSE (1 << 1)
#define RX_FILTER_REASSOC_REQUEST (1 << 2)
#define RX_FILTER_REASSOC_RESPONSE (1 << 3)
#define RX_FILTER_PROBE_REQUEST (1 << 4)
#define RX_FILTER_PROBE_RESPONSE (1 << 5)
/* bits 6 and 7 reserved */
#define RX_FILTER_BEACON (1 << 8)
#define RX_FILTER_ATIM (1 << 9)
#define RX_FILTER_DISASSOC (1 << 10)
#define RX_FILTER_AUTH (1 << 11)
#define RX_FILTER_DEAUTH (1 << 12)
#define RX_FILTER_PSPOLL (1 << 26)
#define RX_FILTER_RTS (1 << 27)
#define RX_FILTER_CTS (1 << 28)
#define RX_FILTER_ACK (1 << 29)
#define RX_FILTER_CFEND (1 << 30)
#define RX_FILTER_CFACK (1 << 31)
/* Enable bits for all frames you are interested in. */
#define STA_RX_FILTER (RX_FILTER_ASSOC_REQUEST | RX_FILTER_ASSOC_RESPONSE | \
RX_FILTER_REASSOC_REQUEST | RX_FILTER_REASSOC_RESPONSE | \
RX_FILTER_PROBE_REQUEST | RX_FILTER_PROBE_RESPONSE | \
(0x3 << 6) /* vendor driver sets these reserved bits */ | \
RX_FILTER_BEACON | RX_FILTER_ATIM | RX_FILTER_DISASSOC | \
RX_FILTER_AUTH | RX_FILTER_DEAUTH | \
(0x7 << 13) /* vendor driver sets these reserved bits */ | \
RX_FILTER_PSPOLL | RX_FILTER_ACK) /* 0x2400ffff */
#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
RX_FILTER_CFEND | RX_FILTER_CFACK)
#define BCN_MODE_AP 0x1000000
#define BCN_MODE_IBSS 0x2000000
/* Monitor mode sets filter to 0xfffff */
#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
#define CR_IFS_VALUE CTL_REG(0x0698)
#define IFS_VALUE_DIFS_SH 0
#define IFS_VALUE_EIFS_SH 12
#define IFS_VALUE_SIFS_SH 24
#define IFS_VALUE_DEFAULT (( 50 << IFS_VALUE_DIFS_SH) | \
(1148 << IFS_VALUE_EIFS_SH) | \
( 10 << IFS_VALUE_SIFS_SH))
#define CR_RX_TIME_OUT CTL_REG(0x069C)
#define CR_TOTAL_RX_FRM CTL_REG(0x06A0)
#define CR_CRC32_CNT CTL_REG(0x06A4)
#define CR_CRC16_CNT CTL_REG(0x06A8)
#define CR_DECRYPTION_ERR_UNI CTL_REG(0x06AC)
#define CR_RX_FIFO_OVERRUN CTL_REG(0x06B0)
#define CR_DECRYPTION_ERR_MUL CTL_REG(0x06BC)
#define CR_NAV_CNT CTL_REG(0x06C4)
#define CR_NAV_CCA CTL_REG(0x06C8)
#define CR_RETRY_CNT CTL_REG(0x06CC)
#define CR_READ_TCB_ADDR CTL_REG(0x06E8)
#define CR_READ_RFD_ADDR CTL_REG(0x06EC)
#define CR_CWMIN_CWMAX CTL_REG(0x06F0)
#define CR_TOTAL_TX_FRM CTL_REG(0x06F4)
/* CAM: Continuous Access Mode (power management) */
#define CR_CAM_MODE CTL_REG(0x0700)
#define MODE_IBSS 0x0
#define MODE_AP 0x1
#define MODE_STA 0x2
#define MODE_AP_WDS 0x3
#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704)
#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708)
#define CR_CAM_ADDRESS CTL_REG(0x070C)
#define CR_CAM_DATA CTL_REG(0x0710)
#define CR_ROMDIR CTL_REG(0x0714)
#define CR_DECRY_ERR_FLG_LOW CTL_REG(0x0714)
#define CR_DECRY_ERR_FLG_HIGH CTL_REG(0x0718)
#define CR_WEPKEY0 CTL_REG(0x0720)
#define CR_WEPKEY1 CTL_REG(0x0724)
#define CR_WEPKEY2 CTL_REG(0x0728)
#define CR_WEPKEY3 CTL_REG(0x072C)
#define CR_WEPKEY4 CTL_REG(0x0730)
#define CR_WEPKEY5 CTL_REG(0x0734)
#define CR_WEPKEY6 CTL_REG(0x0738)
#define CR_WEPKEY7 CTL_REG(0x073C)
#define CR_WEPKEY8 CTL_REG(0x0740)
#define CR_WEPKEY9 CTL_REG(0x0744)
#define CR_WEPKEY10 CTL_REG(0x0748)
#define CR_WEPKEY11 CTL_REG(0x074C)
#define CR_WEPKEY12 CTL_REG(0x0750)
#define CR_WEPKEY13 CTL_REG(0x0754)
#define CR_WEPKEY14 CTL_REG(0x0758)
#define CR_WEPKEY15 CTL_REG(0x075c)
#define CR_TKIP_MODE CTL_REG(0x0760)
#define CR_EEPROM_PROTECT0 CTL_REG(0x0758)
#define CR_EEPROM_PROTECT1 CTL_REG(0x075C)
#define CR_DBG_FIFO_RD CTL_REG(0x0800)
#define CR_DBG_SELECT CTL_REG(0x0804)
#define CR_FIFO_Length CTL_REG(0x0808)
#define CR_RSSI_MGC CTL_REG(0x0810)
#define CR_PON CTL_REG(0x0818)
#define CR_RX_ON CTL_REG(0x081C)
#define CR_TX_ON CTL_REG(0x0820)
#define CR_CHIP_EN CTL_REG(0x0824)
#define CR_LO_SW CTL_REG(0x0828)
#define CR_TXRX_SW CTL_REG(0x082C)
#define CR_S_MD CTL_REG(0x0830)
#define CR_USB_DEBUG_PORT CTL_REG(0x0888)
#define CR_ZD1211B_CWIN_MAX_MIN_AC0 CTL_REG(0x0b00)
#define CR_ZD1211B_CWIN_MAX_MIN_AC1 CTL_REG(0x0b04)
#define CR_ZD1211B_CWIN_MAX_MIN_AC2 CTL_REG(0x0b08)
#define CR_ZD1211B_CWIN_MAX_MIN_AC3 CTL_REG(0x0b0c)
#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10)
#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14)
#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
* we use 0. The first rate is tried (count+2), then all next rates are tried
* twice, until 1 Mbits is tried. */
#define ZD1211_RETRY_COUNT 0
#define ZD1211B_RETRY_COUNT \
(ZD1211_RETRY_COUNT << 0)| \
(ZD1211_RETRY_COUNT << 8)| \
(ZD1211_RETRY_COUNT << 16)| \
(ZD1211_RETRY_COUNT << 24)
/* Used to detect PLL lock */
#define UW2453_INTR_REG ((zd_addr_t)0x85c1)
#define CWIN_SIZE 0x007f043f
#define HWINT_ENABLED \
(INT_TX_COMPLETE_EN| \
INT_RX_COMPLETE_EN| \
INT_RETRY_FAIL_EN| \
INT_WAKEUP_EN| \
INT_CFG_NEXT_BCN_EN)
#define HWINT_DISABLED 0
#define E2P_PWR_INT_GUARD 8
#define E2P_CHANNEL_COUNT 14
/* If you compare this addresses with the ZYDAS orignal driver, please notify
* that we use word mapping for the EEPROM.
*/
/*
* Upper 16 bit contains the regulatory domain.
*/
#define E2P_SUBID E2P_DATA(0x00)
#define E2P_POD E2P_DATA(0x02)
#define E2P_MAC_ADDR_P1 E2P_DATA(0x04)
#define E2P_MAC_ADDR_P2 E2P_DATA(0x06)
#define E2P_PWR_CAL_VALUE1 E2P_DATA(0x08)
#define E2P_PWR_CAL_VALUE2 E2P_DATA(0x0a)
#define E2P_PWR_CAL_VALUE3 E2P_DATA(0x0c)
#define E2P_PWR_CAL_VALUE4 E2P_DATA(0x0e)
#define E2P_PWR_INT_VALUE1 E2P_DATA(0x10)
#define E2P_PWR_INT_VALUE2 E2P_DATA(0x12)
#define E2P_PWR_INT_VALUE3 E2P_DATA(0x14)
#define E2P_PWR_INT_VALUE4 E2P_DATA(0x16)
/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
* also only 11 channels. */
#define E2P_ALLOWED_CHANNEL E2P_DATA(0x18)
#define E2P_DEVICE_VER E2P_DATA(0x20)
#define E2P_PHY_REG E2P_DATA(0x25)
#define E2P_36M_CAL_VALUE1 E2P_DATA(0x28)
#define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a)
#define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c)
#define E2P_36M_CAL_VALUE4 E2P_DATA(0x2e)
#define E2P_11A_INT_VALUE1 E2P_DATA(0x30)
#define E2P_11A_INT_VALUE2 E2P_DATA(0x32)
#define E2P_11A_INT_VALUE3 E2P_DATA(0x34)
#define E2P_11A_INT_VALUE4 E2P_DATA(0x36)
#define E2P_48M_CAL_VALUE1 E2P_DATA(0x38)
#define E2P_48M_CAL_VALUE2 E2P_DATA(0x3a)
#define E2P_48M_CAL_VALUE3 E2P_DATA(0x3c)
#define E2P_48M_CAL_VALUE4 E2P_DATA(0x3e)
#define E2P_48M_INT_VALUE1 E2P_DATA(0x40)
#define E2P_48M_INT_VALUE2 E2P_DATA(0x42)
#define E2P_48M_INT_VALUE3 E2P_DATA(0x44)
#define E2P_48M_INT_VALUE4 E2P_DATA(0x46)
#define E2P_54M_CAL_VALUE1 E2P_DATA(0x48) /* ??? */
#define E2P_54M_CAL_VALUE2 E2P_DATA(0x4a)
#define E2P_54M_CAL_VALUE3 E2P_DATA(0x4c)
#define E2P_54M_CAL_VALUE4 E2P_DATA(0x4e)
#define E2P_54M_INT_VALUE1 E2P_DATA(0x50)
#define E2P_54M_INT_VALUE2 E2P_DATA(0x52)
#define E2P_54M_INT_VALUE3 E2P_DATA(0x54)
#define E2P_54M_INT_VALUE4 E2P_DATA(0x56)
/* This word contains the base address of the FW_REG_ registers below */
#define FWRAW_REGS_ADDR FWRAW_DATA(0x1d)
/* All 16 bit values, offset from the address in FWRAW_REGS_ADDR */
enum {
FW_REG_FIRMWARE_VER = 0,
/* non-zero if USB high speed connection */
FW_REG_USB_SPEED = 1,
FW_REG_FIX_TX_RATE = 2,
/* Seems to be able to control LEDs over the firmware */
FW_REG_LED_LINK_STATUS = 3,
FW_REG_SOFT_RESET = 4,
FW_REG_FLASH_CHK = 5,
};
/* Values for FW_LINK_STATUS */
#define FW_LINK_OFF 0x0
#define FW_LINK_TX 0x1
/* 0x2 - link led on? */
enum {
/* indices for ofdm_cal_values */
OFDM_36M_INDEX = 0,
OFDM_48M_INDEX = 1,
OFDM_54M_INDEX = 2,
};
struct zd_chip {
struct zd_usb usb;
struct zd_rf rf;
struct mutex mutex;
/* Base address of FW_REG_ registers */
zd_addr_t fw_regs_base;
/* EepSetPoint in the vendor driver */
u8 pwr_cal_values[E2P_CHANNEL_COUNT];
/* integration values in the vendor driver */
u8 pwr_int_values[E2P_CHANNEL_COUNT];
/* SetPointOFDM in the vendor driver */
u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT];
u16 link_led;
unsigned int pa_type:4,
patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
new_phy_layout:1, al2230s_bit:1,
supports_tx_led:1;
};
static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
{
return container_of(usb, struct zd_chip, usb);
}
static inline struct zd_chip *zd_rf_to_chip(struct zd_rf *rf)
{
return container_of(rf, struct zd_chip, rf);
}
#define zd_chip_dev(chip) (&(chip)->usb.intf->dev)
void zd_chip_init(struct zd_chip *chip,
struct ieee80211_hw *hw,
struct usb_interface *intf);
void zd_chip_clear(struct zd_chip *chip);
int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr);
int zd_chip_init_hw(struct zd_chip *chip);
int zd_chip_reset(struct zd_chip *chip);
static inline int zd_chip_is_zd1211b(struct zd_chip *chip)
{
return chip->usb.is_zd1211b;
}
static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values,
const zd_addr_t *addresses,
unsigned int count)
{
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_usb_ioread16v(&chip->usb, values, addresses, count);
}
static inline int zd_ioread16_locked(struct zd_chip *chip, u16 *value,
const zd_addr_t addr)
{
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_usb_ioread16(&chip->usb, value, addr);
}
int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
const zd_addr_t *addresses, unsigned int count);
static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
const zd_addr_t addr)
{
return zd_ioread32v_locked(chip, value, &addr, 1);
}
static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
zd_addr_t addr)
{
struct zd_ioreq16 ioreq;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
ioreq.addr = addr;
ioreq.value = value;
return zd_usb_iowrite16v(&chip->usb, &ioreq, 1);
}
int zd_iowrite16a_locked(struct zd_chip *chip,
const struct zd_ioreq16 *ioreqs, unsigned int count);
int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
unsigned int count);
static inline int zd_iowrite32_locked(struct zd_chip *chip, u32 value,
zd_addr_t addr)
{
struct zd_ioreq32 ioreq;
ioreq.addr = addr;
ioreq.value = value;
return _zd_iowrite32v_locked(chip, &ioreq, 1);
}
int zd_iowrite32a_locked(struct zd_chip *chip,
const struct zd_ioreq32 *ioreqs, unsigned int count);
static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits)
{
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_usb_rfwrite(&chip->usb, value, bits);
}
int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value);
int zd_rfwritev_locked(struct zd_chip *chip,
const u32* values, unsigned int count, u8 bits);
int zd_rfwritev_cr_locked(struct zd_chip *chip,
const u32* values, unsigned int count);
/* Locking functions for reading and writing registers.
* The different parameters are intentional.
*/
int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value);
int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value);
int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value);
int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value);
int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
u32 *values, unsigned int count);
int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
unsigned int count);
int zd_chip_set_channel(struct zd_chip *chip, u8 channel);
static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
{
return chip->rf.channel;
}
u8 zd_chip_get_channel(struct zd_chip *chip);
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
int zd_chip_switch_radio_on(struct zd_chip *chip);
int zd_chip_switch_radio_off(struct zd_chip *chip);
int zd_chip_enable_int(struct zd_chip *chip);
void zd_chip_disable_int(struct zd_chip *chip);
int zd_chip_enable_rxtx(struct zd_chip *chip);
void zd_chip_disable_rxtx(struct zd_chip *chip);
int zd_chip_enable_hwint(struct zd_chip *chip);
int zd_chip_disable_hwint(struct zd_chip *chip);
int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel);
int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, int preamble);
static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type)
{
return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type);
}
static inline int zd_set_encryption_type(struct zd_chip *chip, u32 type)
{
return zd_iowrite32(chip, CR_ENCRYPTION_TYPE, type);
}
static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates)
{
return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates);
}
int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates);
int zd_chip_lock_phy_regs(struct zd_chip *chip);
int zd_chip_unlock_phy_regs(struct zd_chip *chip);
enum led_status {
ZD_LED_OFF = 0,
ZD_LED_SCANNING = 1,
ZD_LED_ASSOCIATED = 2,
};
int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
int type);
static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
{
return zd_ioread32(chip, CR_BCN_INTERVAL, interval);
}
struct rx_status;
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
struct zd_mc_hash {
u32 low;
u32 high;
};
static inline void zd_mc_clear(struct zd_mc_hash *hash)
{
hash->low = 0;
/* The interfaces must always received broadcasts.
* The hash of the broadcast address ff:ff:ff:ff:ff:ff is 63.
*/
hash->high = 0x80000000;
}
static inline void zd_mc_add_all(struct zd_mc_hash *hash)
{
hash->low = hash->high = 0xffffffff;
}
static inline void zd_mc_add_addr(struct zd_mc_hash *hash, u8 *addr)
{
unsigned int i = addr[5] >> 2;
if (i < 32) {
hash->low |= 1 << i;
} else {
hash->high |= 1 << (i-32);
}
}
int zd_chip_set_multicast_hash(struct zd_chip *chip,
struct zd_mc_hash *hash);
u64 zd_chip_get_tsf(struct zd_chip *chip);
#endif /* _ZD_CHIP_H */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Toshiba PCI Secure Digital Host Controller Interface driver
*
* Copyright (C) 2014 Ondrej Zary
* Copyright (C) 2007 Richard Betts, All Rights Reserved.
*
* Based on asic3_mmc.c Copyright (c) 2005 SDG Systems, LLC
*/
#define HCLK 33000000 /* 33 MHz (PCI clock) */
#define SD_PCICFG_CLKSTOP 0x40 /* 0x1f = clock controller, 0 = stop */
#define SD_PCICFG_GATEDCLK 0x41 /* Gated clock */
#define SD_PCICFG_CLKMODE 0x42 /* Control clock of SD controller */
#define SD_PCICFG_PINSTATUS 0x44 /* R/O: read status of SD pins */
#define SD_PCICFG_POWER1 0x48
#define SD_PCICFG_POWER2 0x49
#define SD_PCICFG_POWER3 0x4a
#define SD_PCICFG_CARDDETECT 0x4c
#define SD_PCICFG_SLOTS 0x50 /* R/O: define support slot number */
#define SD_PCICFG_EXTGATECLK1 0xf0 /* Could be used for gated clock */
#define SD_PCICFG_EXTGATECLK2 0xf1 /* Could be used for gated clock */
#define SD_PCICFG_EXTGATECLK3 0xf9 /* Bit 1: double buffer/single buffer */
#define SD_PCICFG_SDLED_ENABLE1 0xfa
#define SD_PCICFG_SDLED_ENABLE2 0xfe
#define SD_PCICFG_CLKMODE_DIV_DISABLE BIT(0)
#define SD_PCICFG_CLKSTOP_ENABLE_ALL 0x1f
#define SD_PCICFG_LED_ENABLE1_START 0x12
#define SD_PCICFG_LED_ENABLE2_START 0x80
#define SD_PCICFG_PWR1_33V 0x08 /* Set for 3.3 volts */
#define SD_PCICFG_PWR1_OFF 0x00 /* Turn off power */
#define SD_PCICFG_PWR2_AUTO 0x02
#define SD_CMD 0x00 /* also for SDIO */
#define SD_ARG0 0x04 /* also for SDIO */
#define SD_ARG1 0x06 /* also for SDIO */
#define SD_STOPINTERNAL 0x08
#define SD_BLOCKCOUNT 0x0a /* also for SDIO */
#define SD_RESPONSE0 0x0c /* also for SDIO */
#define SD_RESPONSE1 0x0e /* also for SDIO */
#define SD_RESPONSE2 0x10 /* also for SDIO */
#define SD_RESPONSE3 0x12 /* also for SDIO */
#define SD_RESPONSE4 0x14 /* also for SDIO */
#define SD_RESPONSE5 0x16 /* also for SDIO */
#define SD_RESPONSE6 0x18 /* also for SDIO */
#define SD_RESPONSE7 0x1a /* also for SDIO */
#define SD_CARDSTATUS 0x1c /* also for SDIO */
#define SD_BUFFERCTRL 0x1e /* also for SDIO */
#define SD_INTMASKCARD 0x20 /* also for SDIO */
#define SD_INTMASKBUFFER 0x22 /* also for SDIO */
#define SD_CARDCLOCKCTRL 0x24
#define SD_CARDXFERDATALEN 0x26 /* also for SDIO */
#define SD_CARDOPTIONSETUP 0x28 /* also for SDIO */
#define SD_ERRORSTATUS0 0x2c /* also for SDIO */
#define SD_ERRORSTATUS1 0x2e /* also for SDIO */
#define SD_DATAPORT 0x30 /* also for SDIO */
#define SD_TRANSACTIONCTRL 0x34 /* also for SDIO */
#define SD_SOFTWARERESET 0xe0 /* also for SDIO */
/* registers above marked "also for SDIO" and all SDIO registers below can be
* accessed at SDIO_BASE + reg address */
#define SDIO_BASE 0x100
#define SDIO_CARDPORTSEL 0x02
#define SDIO_CARDINTCTRL 0x36
#define SDIO_CLOCKNWAITCTRL 0x38
#define SDIO_HOSTINFORMATION 0x3a
#define SDIO_ERRORCTRL 0x3c
#define SDIO_LEDCTRL 0x3e
#define SD_TRANSCTL_SET BIT(8)
#define SD_CARDCLK_DIV_DISABLE BIT(15)
#define SD_CARDCLK_ENABLE_CLOCK BIT(8)
#define SD_CARDCLK_CLK_DIV_512 BIT(7)
#define SD_CARDCLK_CLK_DIV_256 BIT(6)
#define SD_CARDCLK_CLK_DIV_128 BIT(5)
#define SD_CARDCLK_CLK_DIV_64 BIT(4)
#define SD_CARDCLK_CLK_DIV_32 BIT(3)
#define SD_CARDCLK_CLK_DIV_16 BIT(2)
#define SD_CARDCLK_CLK_DIV_8 BIT(1)
#define SD_CARDCLK_CLK_DIV_4 BIT(0)
#define SD_CARDCLK_CLK_DIV_2 0
#define SD_CARDOPT_REQUIRED 0x000e
#define SD_CARDOPT_DATA_RESP_TIMEOUT(x) (((x) & 0x0f) << 4) /* 4 bits */
#define SD_CARDOPT_C2_MODULE_ABSENT BIT(14)
#define SD_CARDOPT_DATA_XFR_WIDTH_1 (1 << 15)
#define SD_CARDOPT_DATA_XFR_WIDTH_4 (0 << 15)
#define SD_CMD_TYPE_CMD (0 << 6)
#define SD_CMD_TYPE_ACMD (1 << 6)
#define SD_CMD_TYPE_AUTHEN (2 << 6)
#define SD_CMD_RESP_TYPE_NONE (3 << 8)
#define SD_CMD_RESP_TYPE_EXT_R1 (4 << 8)
#define SD_CMD_RESP_TYPE_EXT_R1B (5 << 8)
#define SD_CMD_RESP_TYPE_EXT_R2 (6 << 8)
#define SD_CMD_RESP_TYPE_EXT_R3 (7 << 8)
#define SD_CMD_RESP_TYPE_EXT_R6 (4 << 8)
#define SD_CMD_RESP_TYPE_EXT_R7 (4 << 8)
#define SD_CMD_DATA_PRESENT BIT(11)
#define SD_CMD_TRANSFER_READ BIT(12)
#define SD_CMD_MULTI_BLOCK BIT(13)
#define SD_CMD_SECURITY_CMD BIT(14)
#define SD_STOPINT_ISSUE_CMD12 BIT(0)
#define SD_STOPINT_AUTO_ISSUE_CMD12 BIT(8)
#define SD_CARD_RESP_END BIT(0)
#define SD_CARD_RW_END BIT(2)
#define SD_CARD_CARD_REMOVED_0 BIT(3)
#define SD_CARD_CARD_INSERTED_0 BIT(4)
#define SD_CARD_PRESENT_0 BIT(5)
#define SD_CARD_UNK6 BIT(6)
#define SD_CARD_WRITE_PROTECT BIT(7)
#define SD_CARD_CARD_REMOVED_3 BIT(8)
#define SD_CARD_CARD_INSERTED_3 BIT(9)
#define SD_CARD_PRESENT_3 BIT(10)
#define SD_BUF_CMD_INDEX_ERR BIT(16)
#define SD_BUF_CRC_ERR BIT(17)
#define SD_BUF_STOP_BIT_END_ERR BIT(18)
#define SD_BUF_DATA_TIMEOUT BIT(19)
#define SD_BUF_OVERFLOW BIT(20)
#define SD_BUF_UNDERFLOW BIT(21)
#define SD_BUF_CMD_TIMEOUT BIT(22)
#define SD_BUF_UNK7 BIT(23)
#define SD_BUF_READ_ENABLE BIT(24)
#define SD_BUF_WRITE_ENABLE BIT(25)
#define SD_BUF_ILLEGAL_FUNCTION BIT(29)
#define SD_BUF_CMD_BUSY BIT(30)
#define SD_BUF_ILLEGAL_ACCESS BIT(31)
#define SD_ERR0_RESP_CMD_ERR BIT(0)
#define SD_ERR0_RESP_NON_CMD12_END_BIT_ERR BIT(2)
#define SD_ERR0_RESP_CMD12_END_BIT_ERR BIT(3)
#define SD_ERR0_READ_DATA_END_BIT_ERR BIT(4)
#define SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR BIT(5)
#define SD_ERR0_RESP_NON_CMD12_CRC_ERR BIT(8)
#define SD_ERR0_RESP_CMD12_CRC_ERR BIT(9)
#define SD_ERR0_READ_DATA_CRC_ERR BIT(10)
#define SD_ERR0_WRITE_CMD_CRC_ERR BIT(11)
#define SD_ERR1_NO_CMD_RESP BIT(16)
#define SD_ERR1_TIMEOUT_READ_DATA BIT(20)
#define SD_ERR1_TIMEOUT_CRS_STATUS BIT(21)
#define SD_ERR1_TIMEOUT_CRC_BUSY BIT(22)
#define IRQ_DONT_CARE_BITS (SD_CARD_PRESENT_3 \
| SD_CARD_WRITE_PROTECT \
| SD_CARD_UNK6 \
| SD_CARD_PRESENT_0 \
| SD_BUF_UNK7 \
| SD_BUF_CMD_BUSY)
struct toshsd_host {
struct pci_dev *pdev;
struct mmc_host *mmc;
spinlock_t lock;
struct mmc_request *mrq;/* Current request */
struct mmc_command *cmd;/* Current command */
struct mmc_data *data; /* Current data request */
struct sg_mapping_iter sg_miter; /* for PIO */
void __iomem *ioaddr; /* mapped address */
};
|
// SPDX-License-Identifier: GPL-2.0+
/*
* comedi_8255.c
* Generic 8255 digital I/O support
*
* Split from the Comedi "8255" driver module.
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <[email protected]>
*/
/*
* Module: comedi_8255
* Description: Generic 8255 support
* Author: ds
* Updated: Fri, 22 May 2015 12:14:17 +0000
* Status: works
*
* This module is not used directly by end-users. Rather, it is used by
* other drivers to provide support for an 8255 "Programmable Peripheral
* Interface" (PPI) chip.
*
* The classic in digital I/O. The 8255 appears in Comedi as a single
* digital I/O subdevice with 24 channels. The channel 0 corresponds to
* the 8255's port A, bit 0; channel 23 corresponds to port C, bit 7.
* Direction configuration is done in blocks, with channels 0-7, 8-15,
* 16-19, and 20-23 making up the 4 blocks. The only 8255 mode
* supported is mode 0.
*/
#include <linux/module.h>
#include <linux/comedi/comedidev.h>
#include <linux/comedi/comedi_8255.h>
struct subdev_8255_private {
unsigned long context;
int (*io)(struct comedi_device *dev, int dir, int port, int data,
unsigned long context);
};
#ifdef CONFIG_HAS_IOPORT
static int subdev_8255_io(struct comedi_device *dev,
int dir, int port, int data, unsigned long regbase)
{
if (dir) {
outb(data, dev->iobase + regbase + port);
return 0;
}
return inb(dev->iobase + regbase + port);
}
#endif /* CONFIG_HAS_IOPORT */
static int subdev_8255_mmio(struct comedi_device *dev,
int dir, int port, int data, unsigned long regbase)
{
if (dir) {
writeb(data, dev->mmio + regbase + port);
return 0;
}
return readb(dev->mmio + regbase + port);
}
static int subdev_8255_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct subdev_8255_private *spriv = s->private;
unsigned long context = spriv->context;
unsigned int mask;
unsigned int v;
mask = comedi_dio_update_state(s, data);
if (mask) {
if (mask & 0xff)
spriv->io(dev, 1, I8255_DATA_A_REG,
s->state & 0xff, context);
if (mask & 0xff00)
spriv->io(dev, 1, I8255_DATA_B_REG,
(s->state >> 8) & 0xff, context);
if (mask & 0xff0000)
spriv->io(dev, 1, I8255_DATA_C_REG,
(s->state >> 16) & 0xff, context);
}
v = spriv->io(dev, 0, I8255_DATA_A_REG, 0, context);
v |= (spriv->io(dev, 0, I8255_DATA_B_REG, 0, context) << 8);
v |= (spriv->io(dev, 0, I8255_DATA_C_REG, 0, context) << 16);
data[1] = v;
return insn->n;
}
static void subdev_8255_do_config(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct subdev_8255_private *spriv = s->private;
unsigned long context = spriv->context;
int config;
config = I8255_CTRL_CW;
/* 1 in io_bits indicates output, 1 in config indicates input */
if (!(s->io_bits & 0x0000ff))
config |= I8255_CTRL_A_IO;
if (!(s->io_bits & 0x00ff00))
config |= I8255_CTRL_B_IO;
if (!(s->io_bits & 0x0f0000))
config |= I8255_CTRL_C_LO_IO;
if (!(s->io_bits & 0xf00000))
config |= I8255_CTRL_C_HI_IO;
spriv->io(dev, 1, I8255_CTRL_REG, config, context);
}
static int subdev_8255_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
int ret;
if (chan < 8)
mask = 0x0000ff;
else if (chan < 16)
mask = 0x00ff00;
else if (chan < 20)
mask = 0x0f0000;
else
mask = 0xf00000;
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
subdev_8255_do_config(dev, s);
return insn->n;
}
static int __subdev_8255_init(struct comedi_device *dev,
struct comedi_subdevice *s,
int (*io)(struct comedi_device *dev,
int dir, int port, int data,
unsigned long context),
unsigned long context)
{
struct subdev_8255_private *spriv;
if (!io)
return -EINVAL;
spriv = comedi_alloc_spriv(s, sizeof(*spriv));
if (!spriv)
return -ENOMEM;
spriv->context = context;
spriv->io = io;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
s->range_table = &range_digital;
s->maxdata = 1;
s->insn_bits = subdev_8255_insn;
s->insn_config = subdev_8255_insn_config;
subdev_8255_do_config(dev, s);
return 0;
}
#ifdef CONFIG_HAS_IOPORT
/**
* subdev_8255_io_init - initialize DIO subdevice for driving I/O mapped 8255
* @dev: comedi device owning subdevice
* @s: comedi subdevice to initialize
* @regbase: offset of 8255 registers from dev->iobase
*
* Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
*
* Return: -ENOMEM if failed to allocate memory, zero on success.
*/
int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long regbase)
{
return __subdev_8255_init(dev, s, subdev_8255_io, regbase);
}
EXPORT_SYMBOL_GPL(subdev_8255_io_init);
#endif /* CONFIG_HAS_IOPORT */
/**
* subdev_8255_mm_init - initialize DIO subdevice for driving mmio-mapped 8255
* @dev: comedi device owning subdevice
* @s: comedi subdevice to initialize
* @regbase: offset of 8255 registers from dev->mmio
*
* Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
*
* Return: -ENOMEM if failed to allocate memory, zero on success.
*/
int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long regbase)
{
return __subdev_8255_init(dev, s, subdev_8255_mmio, regbase);
}
EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
/**
* subdev_8255_cb_init - initialize DIO subdevice for driving callback-mapped 8255
* @dev: comedi device owning subdevice
* @s: comedi subdevice to initialize
* @io: register I/O call-back function
* @context: call-back context
*
* Initializes a comedi subdevice as a DIO subdevice driving an 8255 chip.
*
* The prototype of the I/O call-back function is of the following form:
*
* int my_8255_callback(struct comedi_device *dev, int dir, int port,
* int data, unsigned long context);
*
* where 'dev', and 'context' match the values passed to this function,
* 'port' is the 8255 port number 0 to 3 (including the control port), 'dir'
* is the direction (0 for read, 1 for write) and 'data' is the value to be
* written. It should return 0 if writing or the value read if reading.
*
*
* Return: -ENOMEM if failed to allocate memory, zero on success.
*/
int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
int (*io)(struct comedi_device *dev, int dir, int port,
int data, unsigned long context),
unsigned long context)
{
return __subdev_8255_init(dev, s, io, context);
}
EXPORT_SYMBOL_GPL(subdev_8255_cb_init);
/**
* subdev_8255_regbase - get offset of 8255 registers or call-back context
* @s: comedi subdevice
*
* Returns the 'regbase' or 'context' parameter that was previously passed to
* subdev_8255_io_init(), subdev_8255_mm_init(), or subdev_8255_cb_init() to
* set up the subdevice. Only valid if the subdevice was set up successfully.
*/
unsigned long subdev_8255_regbase(struct comedi_subdevice *s)
{
struct subdev_8255_private *spriv = s->private;
return spriv->context;
}
EXPORT_SYMBOL_GPL(subdev_8255_regbase);
static int __init comedi_8255_module_init(void)
{
return 0;
}
module_init(comedi_8255_module_init);
static void __exit comedi_8255_module_exit(void)
{
}
module_exit(comedi_8255_module_exit);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi: Generic 8255 digital I/O support");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Nokia Corporation
*
* Test random reads, writes and erases on MTD device.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static int count = 10000;
module_param(count, int, S_IRUGO);
MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
static struct mtd_info *mtd;
static unsigned char *writebuf;
static unsigned char *readbuf;
static unsigned char *bbt;
static int *offsets;
static int pgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
static int rand_eb(void)
{
unsigned int eb;
again:
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb = get_random_u32_below(ebcnt - 1);
if (bbt[eb])
goto again;
return eb;
}
static int rand_offs(void)
{
return get_random_u32_below(bufsize);
}
static int rand_len(int offs)
{
return get_random_u32_below(bufsize - offs);
}
static int do_read(void)
{
int eb = rand_eb();
int offs = rand_offs();
int len = rand_len(offs);
loff_t addr;
if (bbt[eb + 1]) {
if (offs >= mtd->erasesize)
offs -= mtd->erasesize;
if (offs + len > mtd->erasesize)
len = mtd->erasesize - offs;
}
addr = (loff_t)eb * mtd->erasesize + offs;
return mtdtest_read(mtd, addr, len, readbuf);
}
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
loff_t addr;
offs = offsets[eb];
if (offs >= mtd->erasesize) {
err = mtdtest_erase_eraseblock(mtd, eb);
if (err)
return err;
offs = offsets[eb] = 0;
}
len = rand_len(offs);
len = ((len + pgsize - 1) / pgsize) * pgsize;
if (offs + len > mtd->erasesize) {
if (bbt[eb + 1])
len = mtd->erasesize - offs;
else {
err = mtdtest_erase_eraseblock(mtd, eb + 1);
if (err)
return err;
offsets[eb + 1] = 0;
}
}
addr = (loff_t)eb * mtd->erasesize + offs;
err = mtdtest_write(mtd, addr, len, writebuf);
if (unlikely(err))
return err;
offs += len;
while (offs > mtd->erasesize) {
offsets[eb++] = mtd->erasesize;
offs -= mtd->erasesize;
}
offsets[eb] = offs;
return 0;
}
static int do_operation(void)
{
if (get_random_u32_below(2))
return do_read();
else
return do_write();
}
static int __init mtd_stresstest_init(void)
{
int err;
int i, op;
uint64_t tmp;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (ebcnt < 2) {
pr_err("error: need at least 2 eraseblocks\n");
err = -ENOSPC;
goto out_put_mtd;
}
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
err = -ENOMEM;
readbuf = vmalloc(bufsize);
writebuf = vmalloc(bufsize);
offsets = kmalloc_array(ebcnt, sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets)
goto out;
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
get_random_bytes(writebuf, bufsize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Do operations */
pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
out_put_mtd:
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_stresstest_init);
static void __exit mtd_stresstest_exit(void)
{
return;
}
module_exit(mtd_stresstest_exit);
MODULE_DESCRIPTION("Stress test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/if.h>
#include <linux/if_tun.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include "../kselftest_harness.h"
static int tun_attach(int fd, char *dev)
{
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, dev);
ifr.ifr_flags = IFF_ATTACH_QUEUE;
return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
}
static int tun_detach(int fd, char *dev)
{
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, dev);
ifr.ifr_flags = IFF_DETACH_QUEUE;
return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
}
static int tun_alloc(char *dev)
{
struct ifreq ifr;
int fd, err;
fd = open("/dev/net/tun", O_RDWR);
if (fd < 0) {
fprintf(stderr, "can't open tun: %s\n", strerror(errno));
return fd;
}
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, dev);
ifr.ifr_flags = IFF_TAP | IFF_NAPI | IFF_MULTI_QUEUE;
err = ioctl(fd, TUNSETIFF, (void *) &ifr);
if (err < 0) {
fprintf(stderr, "can't TUNSETIFF: %s\n", strerror(errno));
close(fd);
return err;
}
strcpy(dev, ifr.ifr_name);
return fd;
}
static int tun_delete(char *dev)
{
struct {
struct nlmsghdr nh;
struct ifinfomsg ifm;
unsigned char data[64];
} req;
struct rtattr *rta;
int ret, rtnl;
rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
if (rtnl < 0) {
fprintf(stderr, "can't open rtnl: %s\n", strerror(errno));
return 1;
}
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(req.ifm)));
req.nh.nlmsg_flags = NLM_F_REQUEST;
req.nh.nlmsg_type = RTM_DELLINK;
req.ifm.ifi_family = AF_UNSPEC;
rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
rta->rta_type = IFLA_IFNAME;
rta->rta_len = RTA_LENGTH(IFNAMSIZ);
req.nh.nlmsg_len += rta->rta_len;
memcpy(RTA_DATA(rta), dev, IFNAMSIZ);
ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
if (ret < 0)
fprintf(stderr, "can't send: %s\n", strerror(errno));
ret = (unsigned int)ret != req.nh.nlmsg_len;
close(rtnl);
return ret;
}
FIXTURE(tun)
{
char ifname[IFNAMSIZ];
int fd, fd2;
};
FIXTURE_SETUP(tun)
{
memset(self->ifname, 0, sizeof(self->ifname));
self->fd = tun_alloc(self->ifname);
ASSERT_GE(self->fd, 0);
self->fd2 = tun_alloc(self->ifname);
ASSERT_GE(self->fd2, 0);
}
FIXTURE_TEARDOWN(tun)
{
if (self->fd >= 0)
close(self->fd);
if (self->fd2 >= 0)
close(self->fd2);
}
TEST_F(tun, delete_detach_close) {
EXPECT_EQ(tun_delete(self->ifname), 0);
EXPECT_EQ(tun_detach(self->fd, self->ifname), -1);
EXPECT_EQ(errno, 22);
}
TEST_F(tun, detach_delete_close) {
EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
EXPECT_EQ(tun_delete(self->ifname), 0);
}
TEST_F(tun, detach_close_delete) {
EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
close(self->fd);
self->fd = -1;
EXPECT_EQ(tun_delete(self->ifname), 0);
}
TEST_F(tun, reattach_delete_close) {
EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
EXPECT_EQ(tun_delete(self->ifname), 0);
}
TEST_F(tun, reattach_close_delete) {
EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
close(self->fd);
self->fd = -1;
EXPECT_EQ(tun_delete(self->ifname), 0);
}
TEST_HARNESS_MAIN
|
/* SPDX-License-Identifier: 0BSD */
/*
* XZ decompressor
*
* Authors: Lasse Collin <[email protected]>
* Igor Pavlov <https://7-zip.org/>
*/
#ifndef XZ_H
#define XZ_H
#ifdef __KERNEL__
# include <linux/stddef.h>
# include <linux/types.h>
#else
# include <stddef.h>
# include <stdint.h>
#endif
/**
* enum xz_mode - Operation mode
*
* @XZ_SINGLE: Single-call mode. This uses less RAM than
* multi-call modes, because the LZMA2
* dictionary doesn't need to be allocated as
* part of the decoder state. All required data
* structures are allocated at initialization,
* so xz_dec_run() cannot return XZ_MEM_ERROR.
* @XZ_PREALLOC: Multi-call mode with preallocated LZMA2
* dictionary buffer. All data structures are
* allocated at initialization, so xz_dec_run()
* cannot return XZ_MEM_ERROR.
* @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is
* allocated once the required size has been
* parsed from the stream headers. If the
* allocation fails, xz_dec_run() will return
* XZ_MEM_ERROR.
*
* It is possible to enable support only for a subset of the above
* modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
* or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
* with support for all operation modes, but the preboot code may
* be built with fewer features to minimize code size.
*/
enum xz_mode {
XZ_SINGLE,
XZ_PREALLOC,
XZ_DYNALLOC
};
/**
* enum xz_ret - Return codes
* @XZ_OK: Everything is OK so far. More input or more
* output space is required to continue. This
* return code is possible only in multi-call mode
* (XZ_PREALLOC or XZ_DYNALLOC).
* @XZ_STREAM_END: Operation finished successfully.
* @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding
* is still possible in multi-call mode by simply
* calling xz_dec_run() again.
* Note that this return value is used only if
* XZ_DEC_ANY_CHECK was defined at build time,
* which is not used in the kernel. Unsupported
* check types return XZ_OPTIONS_ERROR if
* XZ_DEC_ANY_CHECK was not defined at build time.
* @XZ_MEM_ERROR: Allocating memory failed. This return code is
* possible only if the decoder was initialized
* with XZ_DYNALLOC. The amount of memory that was
* tried to be allocated was no more than the
* dict_max argument given to xz_dec_init().
* @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than
* allowed by the dict_max argument given to
* xz_dec_init(). This return value is possible
* only in multi-call mode (XZ_PREALLOC or
* XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
* ignores the dict_max argument.
* @XZ_FORMAT_ERROR: File format was not recognized (wrong magic
* bytes).
* @XZ_OPTIONS_ERROR: This implementation doesn't support the requested
* compression options. In the decoder this means
* that the header CRC32 matches, but the header
* itself specifies something that we don't support.
* @XZ_DATA_ERROR: Compressed data is corrupt.
* @XZ_BUF_ERROR: Cannot make any progress. Details are slightly
* different between multi-call and single-call
* mode; more information below.
*
* In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
* to XZ code cannot consume any input and cannot produce any new output.
* This happens when there is no new input available, or the output buffer
* is full while at least one output byte is still pending. Assuming your
* code is not buggy, you can get this error only when decoding a compressed
* stream that is truncated or otherwise corrupt.
*
* In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
* is too small or the compressed input is corrupt in a way that makes the
* decoder produce more output than the caller expected. When it is
* (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
* is used instead of XZ_BUF_ERROR.
*/
enum xz_ret {
XZ_OK,
XZ_STREAM_END,
XZ_UNSUPPORTED_CHECK,
XZ_MEM_ERROR,
XZ_MEMLIMIT_ERROR,
XZ_FORMAT_ERROR,
XZ_OPTIONS_ERROR,
XZ_DATA_ERROR,
XZ_BUF_ERROR
};
/**
* struct xz_buf - Passing input and output buffers to XZ code
* @in: Beginning of the input buffer. This may be NULL if and only
* if in_pos is equal to in_size.
* @in_pos: Current position in the input buffer. This must not exceed
* in_size.
* @in_size: Size of the input buffer
* @out: Beginning of the output buffer. This may be NULL if and only
* if out_pos is equal to out_size.
* @out_pos: Current position in the output buffer. This must not exceed
* out_size.
* @out_size: Size of the output buffer
*
* Only the contents of the output buffer from out[out_pos] onward, and
* the variables in_pos and out_pos are modified by the XZ code.
*/
struct xz_buf {
const uint8_t *in;
size_t in_pos;
size_t in_size;
uint8_t *out;
size_t out_pos;
size_t out_size;
};
/*
* struct xz_dec - Opaque type to hold the XZ decoder state
*/
struct xz_dec;
/**
* xz_dec_init() - Allocate and initialize a XZ decoder state
* @mode: Operation mode
* @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for
* multi-call decoding. This is ignored in single-call mode
* (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes
* or 2^n + 2^(n-1) bytes (the latter sizes are less common
* in practice), so other values for dict_max don't make sense.
* In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
* 512 KiB, and 1 MiB are probably the only reasonable values,
* except for kernel and initramfs images where a bigger
* dictionary can be fine and useful.
*
* Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at
* once. The caller must provide enough output space or the decoding will
* fail. The output space is used as the dictionary buffer, which is why
* there is no need to allocate the dictionary as part of the decoder's
* internal state.
*
* Because the output buffer is used as the workspace, streams encoded using
* a big dictionary are not a problem in single-call mode. It is enough that
* the output buffer is big enough to hold the actual uncompressed data; it
* can be smaller than the dictionary size stored in the stream headers.
*
* Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes
* of memory is preallocated for the LZMA2 dictionary. This way there is no
* risk that xz_dec_run() could run out of memory, since xz_dec_run() will
* never allocate any memory. Instead, if the preallocated dictionary is too
* small for decoding the given input stream, xz_dec_run() will return
* XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be
* decoded to avoid allocating excessive amount of memory for the dictionary.
*
* Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC):
* dict_max specifies the maximum allowed dictionary size that xz_dec_run()
* may allocate once it has parsed the dictionary size from the stream
* headers. This way excessive allocations can be avoided while still
* limiting the maximum memory usage to a sane value to prevent running the
* system out of memory when decompressing streams from untrusted sources.
*
* On success, xz_dec_init() returns a pointer to struct xz_dec, which is
* ready to be used with xz_dec_run(). If memory allocation fails,
* xz_dec_init() returns NULL.
*/
struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
/**
* xz_dec_run() - Run the XZ decoder
* @s: Decoder state allocated using xz_dec_init()
* @b: Input and output buffers
*
* The possible return values depend on build options and operation mode.
* See enum xz_ret for details.
*
* Note that if an error occurs in single-call mode (return value is not
* XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
* contents of the output buffer from b->out[b->out_pos] onward are
* undefined. This is true even after XZ_BUF_ERROR, because with some filter
* chains, there may be a second pass over the output buffer, and this pass
* cannot be properly done if the output buffer is truncated. Thus, you
* cannot give the single-call decoder a too small buffer and then expect to
* get that amount valid data from the beginning of the stream. You must use
* the multi-call decoder if you don't want to uncompress the whole stream.
*/
enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
/**
* xz_dec_reset() - Reset an already allocated decoder state
* @s: Decoder state allocated using xz_dec_init()
*
* This function can be used to reset the multi-call decoder state without
* freeing and reallocating memory with xz_dec_end() and xz_dec_init().
*
* In single-call mode, xz_dec_reset() is always called in the beginning of
* xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
* multi-call mode.
*/
void xz_dec_reset(struct xz_dec *s);
/**
* xz_dec_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_init(). If s is NULL,
* this function does nothing.
*/
void xz_dec_end(struct xz_dec *s);
/**
* DOC: MicroLZMA decompressor
*
* This MicroLZMA header format was created for use in EROFS but may be used
* by others too. **In most cases one needs the XZ APIs above instead.**
*
* The compressed format supported by this decoder is a raw LZMA stream
* whose first byte (always 0x00) has been replaced with bitwise-negation
* of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
* 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
* Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
* marker must not be used. The unused values are reserved for future use.
*/
/*
* struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
*/
struct xz_dec_microlzma;
/**
* xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
* @mode: XZ_SINGLE or XZ_PREALLOC
* @dict_size: LZMA dictionary size. This must be at least 4 KiB and
* at most 3 GiB.
*
* In contrast to xz_dec_init(), this function only allocates the memory
* and remembers the dictionary size. xz_dec_microlzma_reset() must be used
* before calling xz_dec_microlzma_run().
*
* The amount of allocated memory is a little less than 30 KiB with XZ_SINGLE.
* With XZ_PREALLOC also a dictionary buffer of dict_size bytes is allocated.
*
* On success, xz_dec_microlzma_alloc() returns a pointer to
* struct xz_dec_microlzma. If memory allocation fails or
* dict_size is invalid, NULL is returned.
*/
struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
uint32_t dict_size);
/**
* xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc()
* @comp_size: Compressed size of the input stream
* @uncomp_size: Uncompressed size of the input stream. A value smaller
* than the real uncompressed size of the input stream can
* be specified if uncomp_size_is_exact is set to false.
* uncomp_size can never be set to a value larger than the
* expected real uncompressed size because it would eventually
* result in XZ_DATA_ERROR.
* @uncomp_size_is_exact: This is an int instead of bool to avoid
* requiring stdbool.h. This should normally be set to true.
* When this is set to false, error detection is weaker.
*/
void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
uint32_t uncomp_size, int uncomp_size_is_exact);
/**
* xz_dec_microlzma_run() - Run the MicroLZMA decoder
* @s: Decoder state initialized using xz_dec_microlzma_reset()
* @b: Input and output buffers
*
* This works similarly to xz_dec_run() with a few important differences.
* Only the differences are documented here.
*
* The only possible return values are XZ_OK, XZ_STREAM_END, and
* XZ_DATA_ERROR. This function cannot return XZ_BUF_ERROR: if no progress
* is possible due to lack of input data or output space, this function will
* keep returning XZ_OK. Thus, the calling code must be written so that it
* will eventually provide input and output space matching (or exceeding)
* comp_size and uncomp_size arguments given to xz_dec_microlzma_reset().
* If the caller cannot do this (for example, if the input file is truncated
* or otherwise corrupt), the caller must detect this error by itself to
* avoid an infinite loop.
*
* If the compressed data seems to be corrupt, XZ_DATA_ERROR is returned.
* This can happen also when incorrect dictionary, uncompressed, or
* compressed sizes have been specified.
*
* With XZ_PREALLOC only: As an extra feature, b->out may be NULL to skip over
* uncompressed data. This way the caller doesn't need to provide a temporary
* output buffer for the bytes that will be ignored.
*
* With XZ_SINGLE only: In contrast to xz_dec_run(), the return value XZ_OK
* is also possible and thus XZ_SINGLE is actually a limited multi-call mode.
* After XZ_OK the bytes decoded so far may be read from the output buffer.
* It is possible to continue decoding but the variables b->out and b->out_pos
* MUST NOT be changed by the caller. Increasing the value of b->out_size is
* allowed to make more output space available; one doesn't need to provide
* space for the whole uncompressed data on the first call. The input buffer
* may be changed normally like with XZ_PREALLOC. This way input data can be
* provided from non-contiguous memory.
*/
enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b);
/**
* xz_dec_microlzma_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc().
* If s is NULL, this function does nothing.
*/
void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
/*
* Standalone build (userspace build or in-kernel build for boot time use)
* needs a CRC32 implementation. For normal in-kernel use, kernel's own
* CRC32 module is used instead, and users of this module don't need to
* care about the functions below.
*/
#ifndef XZ_INTERNAL_CRC32
# ifdef __KERNEL__
# define XZ_INTERNAL_CRC32 0
# else
# define XZ_INTERNAL_CRC32 1
# endif
#endif
#if XZ_INTERNAL_CRC32
/*
* This must be called before any other xz_* function to initialize
* the CRC32 lookup table.
*/
void xz_crc32_init(void);
/*
* Update CRC32 value using the polynomial from IEEE-802.3. To start a new
* calculation, the third argument must be zero. To continue the calculation,
* the previously returned value is passed as the third argument.
*/
uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
#endif
#endif
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Backup battery driver for Wolfson Microelectronics wm831x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/auxadc.h>
#include <linux/mfd/wm831x/pmu.h>
#include <linux/mfd/wm831x/pdata.h>
struct wm831x_backup {
struct wm831x *wm831x;
struct power_supply *backup;
struct power_supply_desc backup_desc;
char name[20];
};
static int wm831x_backup_read_voltage(struct wm831x *wm831x,
enum wm831x_auxadc src,
union power_supply_propval *val)
{
int ret;
ret = wm831x_auxadc_read_uv(wm831x, src);
if (ret >= 0)
val->intval = ret;
return ret;
}
/*********************************************************************
* Backup supply properties
*********************************************************************/
static void wm831x_config_backup(struct wm831x *wm831x)
{
struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_backup_pdata *pdata;
int ret, reg;
if (!wm831x_pdata || !wm831x_pdata->backup) {
dev_warn(wm831x->dev,
"No backup battery charger configuration\n");
return;
}
pdata = wm831x_pdata->backup;
reg = 0;
if (pdata->charger_enable)
reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
if (pdata->no_constant_voltage)
reg |= WM831X_BKUP_CHG_MODE;
switch (pdata->vlim) {
case 2500:
break;
case 3100:
reg |= WM831X_BKUP_CHG_VLIM;
break;
default:
dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
pdata->vlim);
}
switch (pdata->ilim) {
case 100:
break;
case 200:
reg |= 1;
break;
case 300:
reg |= 2;
break;
case 400:
reg |= 3;
break;
default:
dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
pdata->ilim);
}
ret = wm831x_reg_unlock(wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
return;
}
ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
WM831X_BKUP_CHG_ENA_MASK |
WM831X_BKUP_CHG_MODE_MASK |
WM831X_BKUP_BATT_DET_ENA_MASK |
WM831X_BKUP_CHG_VLIM_MASK |
WM831X_BKUP_CHG_ILIM_MASK,
reg);
if (ret != 0)
dev_err(wm831x->dev,
"Failed to set backup charger config: %d\n", ret);
wm831x_reg_lock(wm831x);
}
static int wm831x_backup_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct wm831x_backup *devdata = dev_get_drvdata(psy->dev.parent);
struct wm831x *wm831x = devdata->wm831x;
int ret = 0;
ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
if (ret < 0)
return ret;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (ret & WM831X_BKUP_CHG_STS)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
val);
break;
case POWER_SUPPLY_PROP_PRESENT:
if (ret & WM831X_BKUP_CHG_STS)
val->intval = 1;
else
val->intval = 0;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static enum power_supply_property wm831x_backup_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_PRESENT,
};
/*********************************************************************
* Initialisation
*********************************************************************/
static int wm831x_backup_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_backup *devdata;
devdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_backup),
GFP_KERNEL);
if (devdata == NULL)
return -ENOMEM;
devdata->wm831x = wm831x;
/* We ignore configuration failures since we can still read
* back the status without enabling the charger (which may
* already be enabled anyway).
*/
wm831x_config_backup(wm831x);
if (wm831x_pdata && wm831x_pdata->wm831x_num)
snprintf(devdata->name, sizeof(devdata->name),
"wm831x-backup.%d", wm831x_pdata->wm831x_num);
else
snprintf(devdata->name, sizeof(devdata->name),
"wm831x-backup");
devdata->backup_desc.name = devdata->name;
devdata->backup_desc.type = POWER_SUPPLY_TYPE_BATTERY;
devdata->backup_desc.properties = wm831x_backup_props;
devdata->backup_desc.num_properties = ARRAY_SIZE(wm831x_backup_props);
devdata->backup_desc.get_property = wm831x_backup_get_prop;
devdata->backup = devm_power_supply_register(&pdev->dev,
&devdata->backup_desc, NULL);
return PTR_ERR_OR_ZERO(devdata->backup);
}
static struct platform_driver wm831x_backup_driver = {
.probe = wm831x_backup_probe,
.driver = {
.name = "wm831x-backup",
},
};
module_platform_driver(wm831x_backup_driver);
MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-backup");
|
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _smuio_13_0_6_SH_MASK_HEADER
#define _smuio_13_0_6_SH_MASK_HEADER
// addressBlock: smuio_smuio_reset_SmuSmuioDec
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
//SMUIO_SOC_HALT
#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2
#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3
#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L
#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L
// addressBlock: smuio_smuio_tsc_SmuSmuioDec
//PWROK_REFCLK_GAP_CYCLES
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
//GOLDEN_TSC_INCREMENT_UPPER
#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
//GOLDEN_TSC_INCREMENT_LOWER
#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
//GOLDEN_TSC_COUNT_UPPER
#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
//GOLDEN_TSC_COUNT_LOWER
#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
//SOC_GOLDEN_TSC_SHADOW_UPPER
#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
//SOC_GOLDEN_TSC_SHADOW_LOWER
#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
//SOC_GAP_PWROK
#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
//PWR_DISP_TIMER_CONTROL
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
//PWR_DISP_TIMER_DEBUG
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
//PWR_DISP_TIMER2_CONTROL
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
//PWR_DISP_TIMER2_DEBUG
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
//PWR_DISP_TIMER_GLOBAL_CONTROL
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
//PWR_IH_CONTROL
#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
// addressBlock: smuio_smuio_misc_SmuSmuioDec
//SMUIO_MCM_CONFIG
#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8
#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc
#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000300L
#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L
#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
//IP_DISCOVERY_VERSION
#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER0
#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER1
#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER2
#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER3
#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER4
#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER5
#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER6
#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
//SCRATCH_REGISTER7
#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
// addressBlock: smuio_smuio_i2c_SmuSmuioDec
//CKSVII2C_IC_CON
#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0
#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1
#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3
#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4
#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5
#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6
#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7
#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8
#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9
#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL__SHIFT 0xb
#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L
#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L
#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L
#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L
#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L
#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L
#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L
#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L
#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL_MASK 0x00000800L
//CKSVII2C_IC_TAR
#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0
#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa
#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb
#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc
#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL
#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L
#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L
#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L
//CKSVII2C_IC_SAR
#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0
#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL
//CKSVII2C_IC_HS_MADDR
#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0
#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L
//CKSVII2C_IC_DATA_CMD
#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0
#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8
#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9
#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa
#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE__SHIFT 0xb
#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL
#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L
#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L
#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L
#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE_MASK 0x00000800L
//CKSVII2C_IC_SS_SCL_HCNT
#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_SS_SCL_LCNT
#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_FS_SCL_HCNT
#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_FS_SCL_LCNT
#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_HS_SCL_HCNT
#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_HS_SCL_LCNT
#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C_IC_INTR_STAT
#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0
#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1
#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2
#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3
#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4
#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5
#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6
#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7
#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8
#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9
#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa
#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb
#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc
#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd
#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW__SHIFT 0xe
#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L
#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L
#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L
#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L
#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L
#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L
#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L
#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L
#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L
#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L
#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L
#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L
#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L
#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L
#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW_MASK 0x00004000L
//CKSVII2C_IC_INTR_MASK
#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0
#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1
#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2
#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3
#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4
#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5
#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6
#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7
#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8
#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9
#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa
#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb
#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc
#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd
#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW__SHIFT 0xe
#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L
#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L
#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L
#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L
#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L
#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L
#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L
#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L
#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L
#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L
#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L
#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L
#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L
#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L
#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW_MASK 0x00004000L
//CKSVII2C_IC_RAW_INTR_STAT
//CKSVII2C_IC_RX_TL
#define CKSVII2C_IC_RX_TL__RX_TL__SHIFT 0x0
#define CKSVII2C_IC_RX_TL__RX_TL_MASK 0x000000FFL
//CKSVII2C_IC_TX_TL
#define CKSVII2C_IC_TX_TL__TX_TL__SHIFT 0x0
#define CKSVII2C_IC_TX_TL__TX_TL_MASK 0x000000FFL
//CKSVII2C_IC_CLR_INTR
//CKSVII2C_IC_CLR_RX_UNDER
//CKSVII2C_IC_CLR_RX_OVER
//CKSVII2C_IC_CLR_TX_OVER
//CKSVII2C_IC_CLR_RD_REQ
//CKSVII2C_IC_CLR_TX_ABRT
//CKSVII2C_IC_CLR_RX_DONE
//CKSVII2C_IC_CLR_ACTIVITY
//CKSVII2C_IC_CLR_STOP_DET
//CKSVII2C_IC_CLR_START_DET
//CKSVII2C_IC_CLR_GEN_CALL
//CKSVII2C_IC_ENABLE
#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0
#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1
#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK__SHIFT 0x2
#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE__SHIFT 0x3
#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L
#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L
#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK_MASK 0x00000004L
#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE_MASK 0x00000008L
//CKSVII2C_IC_STATUS
#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0
#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1
#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2
#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3
#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4
#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5
#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6
#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL__SHIFT 0x8
#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL__SHIFT 0xa
#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED__SHIFT 0xb
#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L
#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L
#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L
#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L
#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L
#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L
#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L
#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY_MASK 0x00000080L
#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL_MASK 0x00000100L
#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY_MASK 0x00000200L
#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL_MASK 0x00000400L
#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED_MASK 0x00000800L
//CKSVII2C_IC_TXFLR
#define CKSVII2C_IC_TXFLR__TXFLR__SHIFT 0x0
#define CKSVII2C_IC_TXFLR__TXFLR_MASK 0x0000003FL
//CKSVII2C_IC_RXFLR
#define CKSVII2C_IC_RXFLR__RXFLR__SHIFT 0x0
#define CKSVII2C_IC_RXFLR__RXFLR_MASK 0x0000003FL
//CKSVII2C_IC_SDA_HOLD
#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD__SHIFT 0x0
#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD__SHIFT 0x10
#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD_MASK 0x0000FFFFL
#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD_MASK 0x00FF0000L
//CKSVII2C_IC_TX_ABRT_SOURCE
//CKSVII2C_IC_SLV_DATA_NACK_ONLY
//CKSVII2C_IC_DMA_CR
//CKSVII2C_IC_DMA_TDLR
//CKSVII2C_IC_DMA_RDLR
//CKSVII2C_IC_SDA_SETUP
#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0
#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL
//CKSVII2C_IC_ACK_GENERAL_CALL
#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0
#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L
//CKSVII2C_IC_ENABLE_STATUS
#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0
#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY__SHIFT 0x1
#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST__SHIFT 0x2
#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L
#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY_MASK 0x00000002L
#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST_MASK 0x00000004L
//CKSVII2C_IC_FS_SPKLEN
#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0
#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL
//CKSVII2C_IC_HS_SPKLEN
#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0
#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL
//CKSVII2C_IC_CLR_RESTART_DET
//CKSVII2C_IC_COMP_PARAM_1
#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH__SHIFT 0x0
#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE__SHIFT 0x2
#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES__SHIFT 0x4
#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO__SHIFT 0x5
#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA__SHIFT 0x6
#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS__SHIFT 0x7
#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH__SHIFT 0x8
#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH__SHIFT 0x10
#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH_MASK 0x00000003L
#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE_MASK 0x0000000CL
#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES_MASK 0x00000010L
#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO_MASK 0x00000020L
#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA_MASK 0x00000040L
#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS_MASK 0x00000080L
#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH_MASK 0x0000FF00L
#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH_MASK 0x00FF0000L
//CKSVII2C_IC_COMP_VERSION
#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0
#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL
//CKSVII2C_IC_COMP_TYPE
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
//CKSVII2C1_IC_CON
#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1__SHIFT 0xb
#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1_MASK 0x00000800L
//CKSVII2C1_IC_TAR
#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
//CKSVII2C1_IC_SAR
#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
//CKSVII2C1_IC_HS_MADDR
#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
//CKSVII2C1_IC_DATA_CMD
#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE__SHIFT 0xb
#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE_MASK 0x00000800L
//CKSVII2C1_IC_SS_SCL_HCNT
#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_SS_SCL_LCNT
#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_FS_SCL_HCNT
#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_FS_SCL_LCNT
#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_HS_SCL_HCNT
#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_HS_SCL_LCNT
#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
//CKSVII2C1_IC_INTR_STAT
#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW__SHIFT 0xe
#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW_MASK 0x00004000L
//CKSVII2C1_IC_INTR_MASK
#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW__SHIFT 0xe
#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW_MASK 0x00004000L
//CKSVII2C1_IC_RAW_INTR_STAT
//CKSVII2C1_IC_RX_TL
#define CKSVII2C1_IC_RX_TL__RX1_TL__SHIFT 0x0
#define CKSVII2C1_IC_RX_TL__RX1_TL_MASK 0x000000FFL
//CKSVII2C1_IC_TX_TL
#define CKSVII2C1_IC_TX_TL__TX1_TL__SHIFT 0x0
#define CKSVII2C1_IC_TX_TL__TX1_TL_MASK 0x000000FFL
//CKSVII2C1_IC_CLR_INTR
//CKSVII2C1_IC_CLR_RX_UNDER
//CKSVII2C1_IC_CLR_RX_OVER
//CKSVII2C1_IC_CLR_TX_OVER
//CKSVII2C1_IC_CLR_RD_REQ
//CKSVII2C1_IC_CLR_TX_ABRT
//CKSVII2C1_IC_CLR_RX_DONE
//CKSVII2C1_IC_CLR_ACTIVITY
//CKSVII2C1_IC_CLR_STOP_DET
//CKSVII2C1_IC_CLR_START_DET
//CKSVII2C1_IC_CLR_GEN_CALL
//CKSVII2C1_IC_ENABLE
#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK__SHIFT 0x2
#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE__SHIFT 0x3
#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK_MASK 0x00000004L
#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE_MASK 0x00000008L
//CKSVII2C1_IC_STATUS
#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY__SHIFT 0x7
#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL__SHIFT 0x8
#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY__SHIFT 0x9
#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL__SHIFT 0xa
#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED__SHIFT 0xb
#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY_MASK 0x00000080L
#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL_MASK 0x00000100L
#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY_MASK 0x00000200L
#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL_MASK 0x00000400L
#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED_MASK 0x00000800L
//CKSVII2C1_IC_TXFLR
#define CKSVII2C1_IC_TXFLR__TXFLR1__SHIFT 0x0
#define CKSVII2C1_IC_TXFLR__TXFLR1_MASK 0x0000003FL
//CKSVII2C1_IC_RXFLR
#define CKSVII2C1_IC_RXFLR__RXFLR1__SHIFT 0x0
#define CKSVII2C1_IC_RXFLR__RXFLR1_MASK 0x0000003FL
//CKSVII2C1_IC_SDA_HOLD
#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD__SHIFT 0x0
#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD__SHIFT 0x10
#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD_MASK 0x0000FFFFL
#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD_MASK 0x00FF0000L
//CKSVII2C1_IC_TX_ABRT_SOURCE
//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
//CKSVII2C1_IC_DMA_CR
//CKSVII2C1_IC_DMA_TDLR
//CKSVII2C1_IC_DMA_RDLR
//CKSVII2C1_IC_SDA_SETUP
#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
//CKSVII2C1_IC_ACK_GENERAL_CALL
#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
//CKSVII2C1_IC_ENABLE_STATUS
#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY__SHIFT 0x1
#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST__SHIFT 0x2
#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY_MASK 0x00000002L
#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST_MASK 0x00000004L
//CKSVII2C1_IC_FS_SPKLEN
#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN__SHIFT 0x0
#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN_MASK 0x000000FFL
//CKSVII2C1_IC_HS_SPKLEN
#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN__SHIFT 0x0
#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN_MASK 0x000000FFL
//CKSVII2C1_IC_CLR_RESTART_DET
//CKSVII2C1_IC_COMP_PARAM_1
#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH__SHIFT 0x0
#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE__SHIFT 0x2
#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES__SHIFT 0x4
#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO__SHIFT 0x5
#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA__SHIFT 0x6
#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS__SHIFT 0x7
#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH__SHIFT 0x8
#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH__SHIFT 0x10
#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH_MASK 0x00000003L
#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE_MASK 0x0000000CL
#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES_MASK 0x00000010L
#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO_MASK 0x00000020L
#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA_MASK 0x00000040L
#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS_MASK 0x00000080L
#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH_MASK 0x0000FF00L
#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH_MASK 0x00FF0000L
//CKSVII2C1_IC_COMP_VERSION
#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION__SHIFT 0x0
#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION_MASK 0xFFFFFFFFL
//CKSVII2C1_IC_COMP_TYPE
#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE__SHIFT 0x0
#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE_MASK 0xFFFFFFFFL
//SMUIO_PWRMGT
#define SMUIO_PWRMGT__i2c_clk_gate_en__SHIFT 0x0
#define SMUIO_PWRMGT__i2c1_clk_gate_en__SHIFT 0x4
#define SMUIO_PWRMGT__i2c_clk_gate_en_MASK 0x00000001L
#define SMUIO_PWRMGT__i2c1_clk_gate_en_MASK 0x00000010L
// addressBlock: smuio_smuio_rom_SmuSmuioDec
//ROM_CNTL
#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x0
#define ROM_CNTL__READ_MODE__SHIFT 0x1
#define ROM_CNTL__READ_MODE_OVERRIDE__SHIFT 0x3
#define ROM_CNTL__SPI_TIMING_RELAX_SCK__SHIFT 0x4
#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE__SHIFT 0x5
#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE__SHIFT 0x6
#define ROM_CNTL__DUMMY_CYCLE_NUM__SHIFT 0x8
#define ROM_CNTL__SPI_TIMING_RELAX__SHIFT 0x14
#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE__SHIFT 0x15
#define ROM_CNTL__SPI_FAST_MODE__SHIFT 0x16
#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE__SHIFT 0x17
#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE__SHIFT 0x1c
#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE__SHIFT 0x1d
#define ROM_CNTL__PAD_SAMPLE_MODE__SHIFT 0x1e
#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE__SHIFT 0x1f
#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x00000001L
#define ROM_CNTL__READ_MODE_MASK 0x00000006L
#define ROM_CNTL__READ_MODE_OVERRIDE_MASK 0x00000008L
#define ROM_CNTL__SPI_TIMING_RELAX_SCK_MASK 0x00000010L
#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE_MASK 0x00000020L
#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE_MASK 0x00000040L
#define ROM_CNTL__DUMMY_CYCLE_NUM_MASK 0x00000F00L
#define ROM_CNTL__SPI_TIMING_RELAX_MASK 0x00100000L
#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE_MASK 0x00200000L
#define ROM_CNTL__SPI_FAST_MODE_MASK 0x00400000L
#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE_MASK 0x00800000L
#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0x0F000000L
#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE_MASK 0x10000000L
#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE_MASK 0x20000000L
#define ROM_CNTL__PAD_SAMPLE_MODE_MASK 0x40000000L
#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE_MASK 0x80000000L
//PAGE_MIRROR_CNTL
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x1c
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0x01FFFFFFL
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x02000000L
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0x0C000000L
#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x10000000L
//ROM_STATUS
#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
#define ROM_STATUS__ROM_BUSY_MASK 0x00000001L
//CGTT_ROM_CLK_CTRL0
#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
//ROM_INDEX
#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL
//ROM_DATA
#define ROM_DATA__ROM_DATA__SHIFT 0x0
#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL
//ROM_START
#define ROM_START__ROM_START__SHIFT 0x0
#define ROM_START__ROM_START_MASK 0x01FFFFFFL
//ROM_SW_CNTL
#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x13
#define ROM_SW_CNTL__DATA_SIZE_MASK 0x0000FFFFL
#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x00070000L
#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x00080000L
//ROM_SW_STATUS
#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x00000001L
//ROM_SW_COMMAND
#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0x000000FFL
#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xFFFFFF00L
//ROM_SW_DATA_1
#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_2
#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_3
#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_4
#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_5
#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_6
#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_7
#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_8
#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_9
#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_10
#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_11
#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_12
#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_13
#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_14
#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_15
#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_16
#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_17
#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_18
#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_19
#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_20
#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_21
#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_22
#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_23
#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_24
#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_25
#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_26
#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_27
#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_28
#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_29
#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_30
#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_31
#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_32
#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_33
#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_34
#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_35
#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_36
#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_37
#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_38
#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_39
#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_40
#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_41
#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_42
#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_43
#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_44
#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_45
#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_46
#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_47
#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_48
#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_49
#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_50
#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_51
#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_52
#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_53
#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_54
#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_55
#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_56
#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_57
#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_58
#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_59
#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_60
#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_61
#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_62
#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_63
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xFFFFFFFFL
//ROM_SW_DATA_64
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL
// addressBlock: smuio_smuio_gpio_SmuSmuioDec
//SMU_GPIOPAD_SW_INT_STAT
#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
//SMU_GPIOPAD_MASK
#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_A
#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0
#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_TXIMPSEL
#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0
#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_EN
#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0
#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_Y
#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0
#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_RXEN
#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0
#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_RCVR_SEL0
#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0
#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_RCVR_SEL1
#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0
#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_PU_EN
#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_PD_EN
#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_PINSTRAPS
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
//DFT_PINSTRAPS
#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0
#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000000FFL
//SMU_GPIOPAD_INT_STAT_EN
#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL
#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
//SMU_GPIOPAD_INT_STAT
#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL
#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
//SMU_GPIOPAD_INT_STAT_AK
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
//SMU_GPIOPAD_INT_EN
#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL
#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
//SMU_GPIOPAD_INT_TYPE
#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL
#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
//SMU_GPIOPAD_INT_POLARITY
#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL
#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
//SMUIO_PCC_GPIO_SELECT
#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL
//SMU_GPIOPAD_S0
#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0
#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_S1
#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0
#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_SCHMEN
#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0
#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_SCL_EN
#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0
#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL
//SMU_GPIOPAD_SDA_EN
#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0
#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL
//SMUIO_GPIO_INT0_SELECT
#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0
#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL
//SMUIO_GPIO_INT1_SELECT
#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0
#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL
//SMUIO_GPIO_INT2_SELECT
#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0
#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL
//SMUIO_GPIO_INT3_SELECT
#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0
#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL
//SMU_GPIOPAD_MP_INT0_STAT
#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0
#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL
//SMU_GPIOPAD_MP_INT1_STAT
#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0
#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL
//SMU_GPIOPAD_MP_INT2_STAT
#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0
#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL
//SMU_GPIOPAD_MP_INT3_STAT
#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0
#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL
//SMIO_INDEX
#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0
#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L
//S0_VID_SMIO_CNTL
#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0
#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL
//S1_VID_SMIO_CNTL
#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0
#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL
//OPEN_DRAIN_SELECT
#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0
#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f
#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL
#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L
//SMIO_ENABLE
#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0
#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2024, Intel Corporation
*
* Author: Rafael J. Wysocki <[email protected]>
*
* Thermal zone tempalates handling for thermal core testing.
*/
#define pr_fmt(fmt) "thermal-testing: " fmt
#include <linux/debugfs.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/thermal.h>
#include <linux/workqueue.h>
#include "thermal_testing.h"
#define TT_MAX_FILE_NAME_LENGTH 16
/**
* struct tt_thermal_zone - Testing thermal zone template
*
* Represents a template of a thermal zone that can be used for registering
* a test thermal zone with the thermal core.
*
* @list_node: Node in the list of all testing thermal zone templates.
* @trips: List of trip point templates for this thermal zone template.
* @d_tt_zone: Directory in debugfs representing this template.
* @tz: Test thermal zone based on this template, if present.
* @lock: Mutex for synchronizing changes of this template.
* @ida: IDA for trip point IDs.
* @id: The ID of this template for the debugfs interface.
* @temp: Temperature value.
* @tz_temp: Current thermal zone temperature (after registration).
* @num_trips: Number of trip points in the @trips list.
* @refcount: Reference counter for usage and removal synchronization.
*/
struct tt_thermal_zone {
struct list_head list_node;
struct list_head trips;
struct dentry *d_tt_zone;
struct thermal_zone_device *tz;
struct mutex lock;
struct ida ida;
int id;
int temp;
int tz_temp;
unsigned int num_trips;
unsigned int refcount;
};
DEFINE_GUARD(tt_zone, struct tt_thermal_zone *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock))
/**
* struct tt_trip - Testing trip point template
*
* Represents a template of a trip point to be used for populating a trip point
* during the registration of a thermal zone based on a given zone template.
*
* @list_node: Node in the list of all trip templates in the zone template.
* @trip: Trip point data to use for thernal zone registration.
* @id: The ID of this trip template for the debugfs interface.
*/
struct tt_trip {
struct list_head list_node;
struct thermal_trip trip;
int id;
};
/*
* It is both questionable and potentially problematic from the sychnronization
* perspective to attempt to manipulate debugfs from within a debugfs file
* "write" operation, so auxiliary work items are used for that. The majority
* of zone-related command functions have a part that runs from a workqueue and
* make changes in debugs, among other things.
*/
struct tt_work {
struct work_struct work;
struct tt_thermal_zone *tt_zone;
struct tt_trip *tt_trip;
};
static inline struct tt_work *tt_work_of_work(struct work_struct *work)
{
return container_of(work, struct tt_work, work);
}
static LIST_HEAD(tt_thermal_zones);
static DEFINE_IDA(tt_thermal_zones_ida);
static DEFINE_MUTEX(tt_thermal_zones_lock);
static int tt_int_get(void *data, u64 *val)
{
*val = *(int *)data;
return 0;
}
static int tt_int_set(void *data, u64 val)
{
if ((int)val < THERMAL_TEMP_INVALID)
return -EINVAL;
*(int *)data = val;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_int_attr, tt_int_get, tt_int_set, "%lld\n");
DEFINE_DEBUGFS_ATTRIBUTE(tt_unsigned_int_attr, tt_int_get, tt_int_set, "%llu\n");
static int tt_zone_tz_temp_get(void *data, u64 *val)
{
struct tt_thermal_zone *tt_zone = data;
guard(tt_zone)(tt_zone);
if (!tt_zone->tz)
return -EBUSY;
*val = tt_zone->tz_temp;
return 0;
}
static int tt_zone_tz_temp_set(void *data, u64 val)
{
struct tt_thermal_zone *tt_zone = data;
guard(tt_zone)(tt_zone);
if (!tt_zone->tz)
return -EBUSY;
WRITE_ONCE(tt_zone->tz_temp, val);
thermal_zone_device_update(tt_zone->tz, THERMAL_EVENT_TEMP_SAMPLE);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(tt_zone_tz_temp_attr, tt_zone_tz_temp_get,
tt_zone_tz_temp_set, "%lld\n");
static void tt_zone_free_trips(struct tt_thermal_zone *tt_zone)
{
struct tt_trip *tt_trip, *aux;
list_for_each_entry_safe(tt_trip, aux, &tt_zone->trips, list_node) {
list_del(&tt_trip->list_node);
ida_free(&tt_zone->ida, tt_trip->id);
kfree(tt_trip);
}
}
static void tt_zone_free(struct tt_thermal_zone *tt_zone)
{
tt_zone_free_trips(tt_zone);
ida_free(&tt_thermal_zones_ida, tt_zone->id);
ida_destroy(&tt_zone->ida);
kfree(tt_zone);
}
static void tt_add_tz_work_fn(struct work_struct *work)
{
struct tt_work *tt_work = tt_work_of_work(work);
struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
char f_name[TT_MAX_FILE_NAME_LENGTH];
kfree(tt_work);
snprintf(f_name, TT_MAX_FILE_NAME_LENGTH, "tz%d", tt_zone->id);
tt_zone->d_tt_zone = debugfs_create_dir(f_name, d_testing);
if (IS_ERR(tt_zone->d_tt_zone)) {
tt_zone_free(tt_zone);
return;
}
debugfs_create_file_unsafe("temp", 0600, tt_zone->d_tt_zone, tt_zone,
&tt_zone_tz_temp_attr);
debugfs_create_file_unsafe("init_temp", 0600, tt_zone->d_tt_zone,
&tt_zone->temp, &tt_int_attr);
guard(mutex)(&tt_thermal_zones_lock);
list_add_tail(&tt_zone->list_node, &tt_thermal_zones);
}
int tt_add_tz(void)
{
struct tt_thermal_zone *tt_zone __free(kfree);
struct tt_work *tt_work __free(kfree) = NULL;
int ret;
tt_zone = kzalloc(sizeof(*tt_zone), GFP_KERNEL);
if (!tt_zone)
return -ENOMEM;
tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
INIT_LIST_HEAD(&tt_zone->trips);
mutex_init(&tt_zone->lock);
ida_init(&tt_zone->ida);
tt_zone->temp = THERMAL_TEMP_INVALID;
ret = ida_alloc(&tt_thermal_zones_ida, GFP_KERNEL);
if (ret < 0)
return ret;
tt_zone->id = ret;
INIT_WORK(&tt_work->work, tt_add_tz_work_fn);
tt_work->tt_zone = no_free_ptr(tt_zone);
schedule_work(&(no_free_ptr(tt_work)->work));
return 0;
}
static void tt_del_tz_work_fn(struct work_struct *work)
{
struct tt_work *tt_work = tt_work_of_work(work);
struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
kfree(tt_work);
debugfs_remove(tt_zone->d_tt_zone);
tt_zone_free(tt_zone);
}
static void tt_zone_unregister_tz(struct tt_thermal_zone *tt_zone)
{
guard(tt_zone)(tt_zone);
if (tt_zone->tz) {
thermal_zone_device_unregister(tt_zone->tz);
tt_zone->tz = NULL;
}
}
int tt_del_tz(const char *arg)
{
struct tt_work *tt_work __free(kfree) = NULL;
struct tt_thermal_zone *tt_zone, *aux;
int ret;
int id;
ret = sscanf(arg, "%d", &id);
if (ret != 1)
return -EINVAL;
tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
guard(mutex)(&tt_thermal_zones_lock);
ret = -EINVAL;
list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) {
if (tt_zone->id == id) {
if (tt_zone->refcount) {
ret = -EBUSY;
} else {
list_del(&tt_zone->list_node);
ret = 0;
}
break;
}
}
if (ret)
return ret;
tt_zone_unregister_tz(tt_zone);
INIT_WORK(&tt_work->work, tt_del_tz_work_fn);
tt_work->tt_zone = tt_zone;
schedule_work(&(no_free_ptr(tt_work)->work));
return 0;
}
static struct tt_thermal_zone *tt_get_tt_zone(const char *arg)
{
struct tt_thermal_zone *tt_zone;
int ret, id;
ret = sscanf(arg, "%d", &id);
if (ret != 1)
return ERR_PTR(-EINVAL);
guard(mutex)(&tt_thermal_zones_lock);
list_for_each_entry(tt_zone, &tt_thermal_zones, list_node) {
if (tt_zone->id == id) {
tt_zone->refcount++;
return tt_zone;
}
}
return ERR_PTR(-EINVAL);
}
static void tt_put_tt_zone(struct tt_thermal_zone *tt_zone)
{
guard(mutex)(&tt_thermal_zones_lock);
tt_zone->refcount--;
}
DEFINE_FREE(put_tt_zone, struct tt_thermal_zone *,
if (!IS_ERR_OR_NULL(_T)) tt_put_tt_zone(_T))
static void tt_zone_add_trip_work_fn(struct work_struct *work)
{
struct tt_work *tt_work = tt_work_of_work(work);
struct tt_thermal_zone *tt_zone = tt_work->tt_zone;
struct tt_trip *tt_trip = tt_work->tt_trip;
char d_name[TT_MAX_FILE_NAME_LENGTH];
kfree(tt_work);
snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_temp", tt_trip->id);
debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone,
&tt_trip->trip.temperature, &tt_int_attr);
snprintf(d_name, TT_MAX_FILE_NAME_LENGTH, "trip_%d_hyst", tt_trip->id);
debugfs_create_file_unsafe(d_name, 0600, tt_zone->d_tt_zone,
&tt_trip->trip.hysteresis, &tt_unsigned_int_attr);
tt_put_tt_zone(tt_zone);
}
int tt_zone_add_trip(const char *arg)
{
struct tt_thermal_zone *tt_zone __free(put_tt_zone) = NULL;
struct tt_trip *tt_trip __free(kfree) = NULL;
struct tt_work *tt_work __free(kfree);
int id;
tt_work = kzalloc(sizeof(*tt_work), GFP_KERNEL);
if (!tt_work)
return -ENOMEM;
tt_trip = kzalloc(sizeof(*tt_trip), GFP_KERNEL);
if (!tt_trip)
return -ENOMEM;
tt_zone = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
id = ida_alloc(&tt_zone->ida, GFP_KERNEL);
if (id < 0)
return id;
tt_trip->trip.type = THERMAL_TRIP_ACTIVE;
tt_trip->trip.temperature = THERMAL_TEMP_INVALID;
tt_trip->trip.flags = THERMAL_TRIP_FLAG_RW;
tt_trip->id = id;
guard(tt_zone)(tt_zone);
list_add_tail(&tt_trip->list_node, &tt_zone->trips);
tt_zone->num_trips++;
INIT_WORK(&tt_work->work, tt_zone_add_trip_work_fn);
tt_work->tt_zone = no_free_ptr(tt_zone);
tt_work->tt_trip = no_free_ptr(tt_trip);
schedule_work(&(no_free_ptr(tt_work)->work));
return 0;
}
static int tt_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct tt_thermal_zone *tt_zone = thermal_zone_device_priv(tz);
*temp = READ_ONCE(tt_zone->tz_temp);
if (*temp < THERMAL_TEMP_INVALID)
return -ENODATA;
return 0;
}
static struct thermal_zone_device_ops tt_zone_ops = {
.get_temp = tt_zone_get_temp,
};
static int tt_zone_register_tz(struct tt_thermal_zone *tt_zone)
{
struct thermal_trip *trips __free(kfree) = NULL;
struct thermal_zone_device *tz;
struct tt_trip *tt_trip;
int i;
guard(tt_zone)(tt_zone);
if (tt_zone->tz)
return -EINVAL;
trips = kcalloc(tt_zone->num_trips, sizeof(*trips), GFP_KERNEL);
if (!trips)
return -ENOMEM;
i = 0;
list_for_each_entry(tt_trip, &tt_zone->trips, list_node)
trips[i++] = tt_trip->trip;
tt_zone->tz_temp = tt_zone->temp;
tz = thermal_zone_device_register_with_trips("test_tz", trips, i, tt_zone,
&tt_zone_ops, NULL, 0, 0);
if (IS_ERR(tz))
return PTR_ERR(tz);
tt_zone->tz = tz;
thermal_zone_device_enable(tz);
return 0;
}
int tt_zone_reg(const char *arg)
{
struct tt_thermal_zone *tt_zone __free(put_tt_zone);
tt_zone = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
return tt_zone_register_tz(tt_zone);
}
int tt_zone_unreg(const char *arg)
{
struct tt_thermal_zone *tt_zone __free(put_tt_zone);
tt_zone = tt_get_tt_zone(arg);
if (IS_ERR(tt_zone))
return PTR_ERR(tt_zone);
tt_zone_unregister_tz(tt_zone);
return 0;
}
void tt_zone_cleanup(void)
{
struct tt_thermal_zone *tt_zone, *aux;
list_for_each_entry_safe(tt_zone, aux, &tt_thermal_zones, list_node) {
tt_zone_unregister_tz(tt_zone);
list_del(&tt_zone->list_node);
tt_zone_free(tt_zone);
}
}
|
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
* Copyright 2022 Toradex
*/
/dts-v1/;
#include "imx7d-colibri.dtsi"
#include "imx7-colibri-iris-v2.dtsi"
/ {
model = "Toradex Colibri iMX7D on Iris V2 Carrier Board";
compatible = "toradex,colibri-imx7d-iris-v2",
"toradex,colibri-imx7d",
"fsl,imx7d";
};
&ad7879_ts {
status = "okay";
};
&atmel_mxt_ts {
status = "okay";
};
&backlight {
status = "okay";
};
&gpio2 {
/*
* This switches the LVDS transceiver to VESA color mapping mode.
*/
lvds-color-map-hog {
gpio-hog;
gpios = <13 GPIO_ACTIVE_HIGH>; /* SODIMM 95 */
line-name = "LVDS_COLOR_MAP";
output-low;
};
};
&gpio7 {
/*
* This switches the LVDS transceiver to the 24-bit RGB mode.
*/
lvds-rgb-mode-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>; /* SODIMM 63 */
line-name = "LVDS_RGB_MODE";
output-low;
};
/*
* This switches the LVDS transceiver to the single-channel
* output mode.
*/
lvds-ch-mode-hog {
gpio-hog;
gpios = <3 GPIO_ACTIVE_HIGH>; /* SODIMM 55 */
line-name = "LVDS_CH_MODE";
output-high;
};
/* This turns the LVDS transceiver on */
lvds-power-on-hog {
gpio-hog;
gpios = <11 GPIO_ACTIVE_HIGH>; /* SODIMM 99 */
line-name = "LVDS_POWER_ON";
output-high;
};
};
&lcdif {
status = "okay";
};
&panel_dpi {
status = "okay";
};
/* Colibri USBH */
&usbotg2 {
disable-over-current;
status = "okay";
};
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2014 Oleksij Rempel <[email protected]>
*/
#ifndef _ALPHASCALE_ASM9260_ICOLL_H
#define _ALPHASCALE_ASM9260_ICOLL_H
#define ASM9260_NUM_IRQS 64
/*
* this device provide 4 offsets for each register:
* 0x0 - plain read write mode
* 0x4 - set mode, OR logic.
* 0x8 - clr mode, XOR logic.
* 0xc - togle mode.
*/
#define ASM9260_HW_ICOLL_VECTOR 0x0000
/*
* bits 31:2
* This register presents the vector address for the interrupt currently
* active on the CPU IRQ input. Writing to this register notifies the
* interrupt collector that the interrupt service routine for the current
* interrupt has been entered.
* The exception trap should have a LDPC instruction from this address:
* LDPC ASM9260_HW_ICOLL_VECTOR_ADDR; IRQ exception at 0xffff0018
*/
/*
* The Interrupt Collector Level Acknowledge Register is used by software to
* indicate the completion of an interrupt on a specific level.
* This register is written at the very end of an interrupt service routine. If
* nesting is used then the CPU irq must be turned on before writing to this
* register to avoid a race condition in the CPU interrupt hardware.
*/
#define ASM9260_HW_ICOLL_LEVELACK 0x0010
#define ASM9260_BM_LEVELn(nr) BIT(nr)
#define ASM9260_HW_ICOLL_CTRL 0x0020
/*
* ASM9260_BM_CTRL_SFTRST and ASM9260_BM_CTRL_CLKGATE are not available on
* asm9260.
*/
#define ASM9260_BM_CTRL_SFTRST BIT(31)
#define ASM9260_BM_CTRL_CLKGATE BIT(30)
/* disable interrupt level nesting */
#define ASM9260_BM_CTRL_NO_NESTING BIT(19)
/*
* Set this bit to one enable the RISC32-style read side effect associated with
* the vector address register. In this mode, interrupt in-service is signaled
* by the read of the ASM9260_HW_ICOLL_VECTOR register to acquire the interrupt
* vector address. Set this bit to zero for normal operation, in which the ISR
* signals in-service explicitly by means of a write to the
* ASM9260_HW_ICOLL_VECTOR register.
* 0 - Must Write to Vector register to go in-service.
* 1 - Go in-service as a read side effect
*/
#define ASM9260_BM_CTRL_ARM_RSE_MODE BIT(18)
#define ASM9260_BM_CTRL_IRQ_ENABLE BIT(16)
#define ASM9260_HW_ICOLL_STAT_OFFSET 0x0030
/*
* bits 5:0
* Vector number of current interrupt. Multiply by 4 and add to vector base
* address to obtain the value in ASM9260_HW_ICOLL_VECTOR.
*/
/*
* RAW0 and RAW1 provides a read-only view of the raw interrupt request lines
* coming from various parts of the chip. Its purpose is to improve diagnostic
* observability.
*/
#define ASM9260_HW_ICOLL_RAW0 0x0040
#define ASM9260_HW_ICOLL_RAW1 0x0050
#define ASM9260_HW_ICOLL_INTERRUPT0 0x0060
#define ASM9260_HW_ICOLL_INTERRUPTn(n) (0x0060 + ((n) >> 2) * 0x10)
/*
* WARNING: Modifying the priority of an enabled interrupt may result in
* undefined behavior.
*/
#define ASM9260_BM_INT_PRIORITY_MASK 0x3
#define ASM9260_BM_INT_ENABLE BIT(2)
#define ASM9260_BM_INT_SOFTIRQ BIT(3)
#define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n) (((n) & 0x3) << 3)
#define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n) (1 << (2 + \
ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n)))
#define ASM9260_HW_ICOLL_VBASE 0x0160
/*
* bits 31:2
* This bitfield holds the upper 30 bits of the base address of the vector
* table.
*/
#define ASM9260_HW_ICOLL_CLEAR0 0x01d0
#define ASM9260_HW_ICOLL_CLEAR1 0x01e0
#define ASM9260_HW_ICOLL_CLEARn(n) (((n >> 5) * 0x10) \
+ SET_REG)
#define ASM9260_BM_CLEAR_BIT(n) BIT(n & 0x1f)
/* Scratchpad */
#define ASM9260_HW_ICOLL_UNDEF_VECTOR 0x01f0
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (c) International Business Machines Corp., 2000,2009
* Modified by Steve French ([email protected])
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifs_debug.h"
int cifs_remap(struct cifs_sb_info *cifs_sb)
{
int map_type;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
map_type = SFM_MAP_UNI_RSVD;
else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
map_type = SFU_MAP_UNI_RSVD;
else
map_type = NO_MAP_UNI_RSVD;
return map_type;
}
/* Convert character using the SFU - "Services for Unix" remapping range */
static bool
convert_sfu_char(const __u16 src_char, char *target)
{
/*
* BB: Cannot handle remapping UNI_SLASH until all the calls to
* build_path_from_dentry are modified, as they use slash as
* separator.
*/
switch (src_char) {
case UNI_COLON:
*target = ':';
break;
case UNI_ASTERISK:
*target = '*';
break;
case UNI_QUESTION:
*target = '?';
break;
case UNI_PIPE:
*target = '|';
break;
case UNI_GRTRTHAN:
*target = '>';
break;
case UNI_LESSTHAN:
*target = '<';
break;
default:
return false;
}
return true;
}
/* Convert character using the SFM - "Services for Mac" remapping range */
static bool
convert_sfm_char(const __u16 src_char, char *target)
{
if (src_char >= 0xF001 && src_char <= 0xF01F) {
*target = src_char - 0xF000;
return true;
}
switch (src_char) {
case SFM_COLON:
*target = ':';
break;
case SFM_DOUBLEQUOTE:
*target = '"';
break;
case SFM_ASTERISK:
*target = '*';
break;
case SFM_QUESTION:
*target = '?';
break;
case SFM_PIPE:
*target = '|';
break;
case SFM_GRTRTHAN:
*target = '>';
break;
case SFM_LESSTHAN:
*target = '<';
break;
case SFM_SPACE:
*target = ' ';
break;
case SFM_PERIOD:
*target = '.';
break;
default:
return false;
}
return true;
}
/*
* cifs_mapchar - convert a host-endian char to proper char in codepage
* @target - where converted character should be copied
* @src_char - 2 byte host-endian source character
* @cp - codepage to which character should be converted
* @map_type - How should the 7 NTFS/SMB reserved characters be mapped to UCS2?
*
* This function handles the conversion of a single character. It is the
* responsibility of the caller to ensure that the target buffer is large
* enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
*/
static int
cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
int maptype)
{
int len = 1;
__u16 src_char;
src_char = *from;
if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
return len;
else if ((maptype == SFU_MAP_UNI_RSVD) &&
convert_sfu_char(src_char, target))
return len;
/* if character not one of seven in special remap set */
len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
if (len <= 0)
goto surrogate_pair;
return len;
surrogate_pair:
/* convert SURROGATE_PAIR and IVS */
if (strcmp(cp->charset, "utf8"))
goto unknown;
len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
if (len <= 0)
goto unknown;
return len;
unknown:
*target = '?';
len = 1;
return len;
}
/*
* cifs_from_utf16 - convert utf16le string to local charset
* @to - destination buffer
* @from - source buffer
* @tolen - destination buffer size (in bytes)
* @fromlen - source buffer size (in bytes)
* @codepage - codepage to which characters should be converted
* @mapchar - should characters be remapped according to the mapchars option?
*
* Convert a little-endian utf16le string (as sent by the server) to a string
* in the provided codepage. The tolen and fromlen parameters are to ensure
* that the code doesn't walk off of the end of the buffer (which is always
* a danger if the alignment of the source buffer is off). The destination
* string is always properly null terminated and fits in the destination
* buffer. Returns the length of the destination string in bytes (including
* null terminator).
*
* Note that some windows versions actually send multiword UTF-16 characters
* instead of straight UTF16-2. The linux nls routines however aren't able to
* deal with those characters properly. In the event that we get some of
* those characters, they won't be translated properly.
*/
int
cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
const struct nls_table *codepage, int map_type)
{
int i, charlen, safelen;
int outlen = 0;
int nullsize = nls_nullsize(codepage);
int fromwords = fromlen / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
__u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
/*
* because the chars can be of varying widths, we need to take care
* not to overflow the destination buffer when we get close to the
* end of it. Until we get to this offset, we don't need to check
* for overflow however.
*/
safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
for (i = 0; i < fromwords; i++) {
ftmp[0] = get_unaligned_le16(&from[i]);
if (ftmp[0] == 0)
break;
if (i + 1 < fromwords)
ftmp[1] = get_unaligned_le16(&from[i + 1]);
else
ftmp[1] = 0;
if (i + 2 < fromwords)
ftmp[2] = get_unaligned_le16(&from[i + 2]);
else
ftmp[2] = 0;
/*
* check to see if converting this character might make the
* conversion bleed into the null terminator
*/
if (outlen >= safelen) {
charlen = cifs_mapchar(tmp, ftmp, codepage, map_type);
if ((outlen + charlen) > (tolen - nullsize))
break;
}
/* put converted char into 'to' buffer */
charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
outlen += charlen;
/* charlen (=bytes of UTF-8 for 1 character)
* 4bytes UTF-8(surrogate pair) is charlen=4
* (4bytes UTF-16 code)
* 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
* (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
if (charlen == 4)
i++;
else if (charlen >= 5)
/* 5-6bytes UTF-8 */
i += 2;
}
/* properly null-terminate string */
for (i = 0; i < nullsize; i++)
to[outlen++] = 0;
return outlen;
}
/*
* NAME: cifs_strtoUTF16()
*
* FUNCTION: Convert character string to unicode string
*
*/
int
cifs_strtoUTF16(__le16 *to, const char *from, int len,
const struct nls_table *codepage)
{
int charlen;
int i;
wchar_t wchar_to; /* needed to quiet sparse */
/* special case for utf8 to handle no plane0 chars */
if (!strcmp(codepage->charset, "utf8")) {
/*
* convert utf8 -> utf16, we assume we have enough space
* as caller should have assumed conversion does not overflow
* in destination len is length in wchar_t units (16bits)
*/
i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
(wchar_t *) to, len);
/* if success terminate and exit */
if (i >= 0)
goto success;
/*
* if fails fall back to UCS encoding as this
* function should not return negative values
* currently can fail only if source contains
* invalid encoded characters
*/
}
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
charlen = codepage->char2uni(from, len, &wchar_to);
if (charlen < 1) {
cifs_dbg(VFS, "strtoUTF16: char2uni of 0x%x returned %d\n",
*from, charlen);
/* A question mark */
wchar_to = 0x003f;
charlen = 1;
}
put_unaligned_le16(wchar_to, &to[i]);
}
success:
put_unaligned_le16(0, &to[i]);
return i;
}
/*
* cifs_utf16_bytes - how long will a string be after conversion?
* @utf16 - pointer to input string
* @maxbytes - don't go past this many bytes of input string
* @codepage - destination codepage
*
* Walk a utf16le string and return the number of bytes that the string will
* be after being converted to the given charset, not including any null
* termination required. Don't walk past maxbytes in the source buffer.
*/
int
cifs_utf16_bytes(const __le16 *from, int maxbytes,
const struct nls_table *codepage)
{
int i;
int charlen, outlen = 0;
int maxwords = maxbytes / 2;
char tmp[NLS_MAX_CHARSET_SIZE];
__u16 ftmp[3];
for (i = 0; i < maxwords; i++) {
ftmp[0] = get_unaligned_le16(&from[i]);
if (ftmp[0] == 0)
break;
if (i + 1 < maxwords)
ftmp[1] = get_unaligned_le16(&from[i + 1]);
else
ftmp[1] = 0;
if (i + 2 < maxwords)
ftmp[2] = get_unaligned_le16(&from[i + 2]);
else
ftmp[2] = 0;
charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
outlen += charlen;
}
return outlen;
}
/*
* cifs_strndup_from_utf16 - copy a string from wire format to the local
* codepage
* @src - source string
* @maxlen - don't walk past this many bytes in the source string
* @is_unicode - is this a unicode string?
* @codepage - destination codepage
*
* Take a string given by the server, convert it to the local codepage and
* put it in a new buffer. Returns a pointer to the new string or NULL on
* error.
*/
char *
cifs_strndup_from_utf16(const char *src, const int maxlen,
const bool is_unicode, const struct nls_table *codepage)
{
int len;
char *dst;
if (is_unicode) {
len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
len += nls_nullsize(codepage);
dst = kmalloc(len, GFP_KERNEL);
if (!dst)
return NULL;
cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
NO_MAP_UNI_RSVD);
} else {
dst = kstrndup(src, maxlen, GFP_KERNEL);
}
return dst;
}
static __le16 convert_to_sfu_char(char src_char)
{
__le16 dest_char;
switch (src_char) {
case ':':
dest_char = cpu_to_le16(UNI_COLON);
break;
case '*':
dest_char = cpu_to_le16(UNI_ASTERISK);
break;
case '?':
dest_char = cpu_to_le16(UNI_QUESTION);
break;
case '<':
dest_char = cpu_to_le16(UNI_LESSTHAN);
break;
case '>':
dest_char = cpu_to_le16(UNI_GRTRTHAN);
break;
case '|':
dest_char = cpu_to_le16(UNI_PIPE);
break;
default:
dest_char = 0;
}
return dest_char;
}
static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
{
__le16 dest_char;
if (src_char >= 0x01 && src_char <= 0x1F) {
dest_char = cpu_to_le16(src_char + 0xF000);
return dest_char;
}
switch (src_char) {
case ':':
dest_char = cpu_to_le16(SFM_COLON);
break;
case '"':
dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
break;
case '*':
dest_char = cpu_to_le16(SFM_ASTERISK);
break;
case '?':
dest_char = cpu_to_le16(SFM_QUESTION);
break;
case '<':
dest_char = cpu_to_le16(SFM_LESSTHAN);
break;
case '>':
dest_char = cpu_to_le16(SFM_GRTRTHAN);
break;
case '|':
dest_char = cpu_to_le16(SFM_PIPE);
break;
case '.':
if (end_of_string)
dest_char = cpu_to_le16(SFM_PERIOD);
else
dest_char = 0;
break;
case ' ':
if (end_of_string)
dest_char = cpu_to_le16(SFM_SPACE);
else
dest_char = 0;
break;
default:
dest_char = 0;
}
return dest_char;
}
/*
* Convert 16 bit Unicode pathname to wire format from string in current code
* page. Conversion may involve remapping up the six characters that are
* only legal in POSIX-like OS (if they are present in the string). Path
* names are little endian 16 bit Unicode on the wire
*/
int
cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
const struct nls_table *cp, int map_chars)
{
int i, charlen;
int j = 0;
char src_char;
__le16 dst_char;
wchar_t tmp;
wchar_t *wchar_to; /* UTF-16 */
int ret;
unicode_t u;
if (map_chars == NO_MAP_UNI_RSVD)
return cifs_strtoUTF16(target, source, PATH_MAX, cp);
wchar_to = kzalloc(6, GFP_KERNEL);
for (i = 0; i < srclen; j++) {
src_char = source[i];
charlen = 1;
/* check if end of string */
if (src_char == 0)
goto ctoUTF16_out;
/* see if we must remap this char */
if (map_chars == SFU_MAP_UNI_RSVD)
dst_char = convert_to_sfu_char(src_char);
else if (map_chars == SFM_MAP_UNI_RSVD) {
bool end_of_string;
/**
* Remap spaces and periods found at the end of every
* component of the path. The special cases of '.' and
* '..' are need to be handled because of symlinks.
* They are treated as non-end-of-string to avoid
* remapping and breaking symlinks pointing to . or ..
**/
if ((i == 0 || source[i-1] == '\\') &&
source[i] == '.' &&
(i == srclen-1 || source[i+1] == '\\'))
end_of_string = false; /* "." case */
else if (i >= 1 &&
(i == 1 || source[i-2] == '\\') &&
source[i-1] == '.' &&
source[i] == '.' &&
(i == srclen-1 || source[i+1] == '\\'))
end_of_string = false; /* ".." case */
else if ((i == srclen - 1) || (source[i+1] == '\\'))
end_of_string = true;
else
end_of_string = false;
dst_char = convert_to_sfm_char(src_char, end_of_string);
} else
dst_char = 0;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
* until all the calls to build_path_from_dentry are modified,
* as they use backslash as separator.
*/
if (dst_char == 0) {
charlen = cp->char2uni(source + i, srclen - i, &tmp);
dst_char = cpu_to_le16(tmp);
/*
* if no match, use question mark, which at least in
* some cases serves as wild card
*/
if (charlen > 0)
goto ctoUTF16;
/* convert SURROGATE_PAIR */
if (strcmp(cp->charset, "utf8") || !wchar_to)
goto unknown;
if (*(source + i) & 0x80) {
charlen = utf8_to_utf32(source + i, 6, &u);
if (charlen < 0)
goto unknown;
} else
goto unknown;
ret = utf8s_to_utf16s(source + i, charlen,
UTF16_LITTLE_ENDIAN,
wchar_to, 6);
if (ret < 0)
goto unknown;
i += charlen;
dst_char = cpu_to_le16(*wchar_to);
if (charlen <= 3)
/* 1-3bytes UTF-8 to 2bytes UTF-16 */
put_unaligned(dst_char, &target[j]);
else if (charlen == 4) {
/* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
* 7-8bytes UTF-8(IVS) divided to 2 UTF-16
* (charlen=3+4 or 4+4) */
put_unaligned(dst_char, &target[j]);
dst_char = cpu_to_le16(*(wchar_to + 1));
j++;
put_unaligned(dst_char, &target[j]);
} else if (charlen >= 5) {
/* 5-6bytes UTF-8 to 6bytes UTF-16 */
put_unaligned(dst_char, &target[j]);
dst_char = cpu_to_le16(*(wchar_to + 1));
j++;
put_unaligned(dst_char, &target[j]);
dst_char = cpu_to_le16(*(wchar_to + 2));
j++;
put_unaligned(dst_char, &target[j]);
}
continue;
unknown:
dst_char = cpu_to_le16(0x003f);
charlen = 1;
}
ctoUTF16:
/*
* character may take more than one byte in the source string,
* but will take exactly two bytes in the target string
*/
i += charlen;
put_unaligned(dst_char, &target[j]);
}
ctoUTF16_out:
put_unaligned(0, &target[j]); /* Null terminate target unicode string */
kfree(wchar_to);
return j;
}
/*
* cifs_local_to_utf16_bytes - how long will a string be after conversion?
* @from - pointer to input string
* @maxbytes - don't go past this many bytes of input string
* @codepage - source codepage
*
* Walk a string and return the number of bytes that the string will
* be after being converted to the given charset, not including any null
* termination required. Don't walk past maxbytes in the source buffer.
*/
static int
cifs_local_to_utf16_bytes(const char *from, int len,
const struct nls_table *codepage)
{
int charlen;
int i;
wchar_t wchar_to;
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
charlen = codepage->char2uni(from, len, &wchar_to);
/* Failed conversion defaults to a question mark */
if (charlen < 1)
charlen = 1;
}
return 2 * i; /* UTF16 characters are two bytes */
}
/*
* cifs_strndup_to_utf16 - copy a string to wire format from the local codepage
* @src - source string
* @maxlen - don't walk past this many bytes in the source string
* @utf16_len - the length of the allocated string in bytes (including null)
* @cp - source codepage
* @remap - map special chars
*
* Take a string convert it from the local codepage to UTF16 and
* put it in a new buffer. Returns a pointer to the new string or NULL on
* error.
*/
__le16 *
cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
const struct nls_table *cp, int remap)
{
int len;
__le16 *dst;
len = cifs_local_to_utf16_bytes(src, maxlen, cp);
len += 2; /* NULL */
dst = kmalloc(len, GFP_KERNEL);
if (!dst) {
*utf16_len = 0;
return NULL;
}
cifsConvertToUTF16(dst, src, strlen(src), cp, remap);
*utf16_len = len;
return dst;
}
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020 Kévin L'hôpital <[email protected]>
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include "sun8i_a83t_dphy.h"
#include "sun8i_a83t_mipi_csi2.h"
#include "sun8i_a83t_mipi_csi2_reg.h"
/* Format */
static const struct sun8i_a83t_mipi_csi2_format
sun8i_a83t_mipi_csi2_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
};
static const struct sun8i_a83t_mipi_csi2_format *
sun8i_a83t_mipi_csi2_format_find(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats); i++)
if (sun8i_a83t_mipi_csi2_formats[i].mbus_code == mbus_code)
return &sun8i_a83t_mipi_csi2_formats[i];
return NULL;
}
/* Controller */
static void
sun8i_a83t_mipi_csi2_init(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
/*
* The Allwinner BSP sets various magic values on a bunch of registers.
* This is apparently a necessary initialization process that will cause
* the capture to fail with unsolicited interrupts hitting if skipped.
*
* Most of the registers are set to proper values later, except for the
* two reserved registers. They are said to hold a "hardware lock"
* value, without more information available.
*/
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG,
SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0);
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG,
SUN8I_A83T_DPHY_CTRL_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG,
SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG,
SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE);
}
static void
sun8i_a83t_mipi_csi2_enable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN);
}
static void
sun8i_a83t_mipi_csi2_disable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
}
static void
sun8i_a83t_mipi_csi2_configure(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun8i_a83t_mipi_csi2_format *format;
struct device *dev = csi2_dev->dev;
u32 version = 0;
format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format))
return;
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N);
regmap_read(regmap, SUN8I_A83T_MIPI_CSI2_VERSION_REG, &version);
dev_dbg(dev, "A83T MIPI CSI-2 version: %04x\n", version);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN |
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(8) |
SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(1) |
SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(lanes_count));
/*
* Only a single virtual channel (index 0) is currently supported.
* While the registers do mention multiple physical channels being
* available (which can be configured to match a specific virtual
* channel or data type), it's unclear whether channels > 0 are actually
* connected and available and the reference source code only makes use
* of channel 0.
*
* Using extra channels would also require matching channels to be
* available on the CSI (and ISP) side, which is also unsure although
* some CSI implementations are said to support multiple channels for
* BT656 time-sharing.
*
* We still configure virtual channel numbers to ensure that virtual
* channel 0 only goes to channel 0.
*/
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_VCDT0_REG,
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(3, 3) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(2, 2) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(1, 1) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(0, 0) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(0, format->data_type));
}
/* V4L2 Subdev */
static int sun8i_a83t_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev;
union phy_configure_opts dphy_opts = { 0 };
struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun8i_a83t_mipi_csi2_format *format;
struct phy *dphy = csi2_dev->dphy;
struct device *dev = csi2_dev->dev;
struct v4l2_ctrl *ctrl;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
unsigned long pixel_rate;
int ret;
if (!source_subdev)
return -ENODEV;
if (!on) {
v4l2_subdev_call(source_subdev, video, s_stream, 0);
ret = 0;
goto disable;
}
/* Runtime PM */
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
/* Sensor pixel rate */
ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (!ctrl) {
dev_err(dev, "missing sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl);
if (!pixel_rate) {
dev_err(dev, "missing (zero) sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
/* D-PHY */
if (!lanes_count) {
dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n");
ret = -ENODEV;
goto error_pm;
}
format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format)) {
ret = -ENODEV;
goto error_pm;
}
phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count,
dphy_cfg);
/*
* Note that our hardware is using DDR, which is not taken in account by
* phy_mipi_dphy_get_default_config when calculating hs_clk_rate from
* the pixel rate, lanes count and bpp.
*
* The resulting clock rate is basically the symbol rate over the whole
* link. The actual clock rate is calculated with division by two since
* DDR samples both on rising and falling edges.
*/
dev_dbg(dev, "A83T MIPI CSI-2 config:\n");
dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n",
pixel_rate, format->bpp, lanes_count,
dphy_cfg->hs_clk_rate / 2);
ret = phy_reset(dphy);
if (ret) {
dev_err(dev, "failed to reset MIPI D-PHY\n");
goto error_pm;
}
ret = phy_configure(dphy, &dphy_opts);
if (ret) {
dev_err(dev, "failed to configure MIPI D-PHY\n");
goto error_pm;
}
/* Controller */
sun8i_a83t_mipi_csi2_configure(csi2_dev);
sun8i_a83t_mipi_csi2_enable(csi2_dev);
/* D-PHY */
ret = phy_power_on(dphy);
if (ret) {
dev_err(dev, "failed to power on MIPI D-PHY\n");
goto error_pm;
}
/* Source */
ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD)
goto disable;
return 0;
disable:
phy_power_off(dphy);
sun8i_a83t_mipi_csi2_disable(csi2_dev);
error_pm:
pm_runtime_put(dev);
return ret;
}
static const struct v4l2_subdev_video_ops
sun8i_a83t_mipi_csi2_video_ops = {
.s_stream = sun8i_a83t_mipi_csi2_s_stream,
};
static void
sun8i_a83t_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
{
if (!sun8i_a83t_mipi_csi2_format_find(mbus_format->code))
mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
mbus_format->field = V4L2_FIELD_NONE;
mbus_format->colorspace = V4L2_COLORSPACE_RAW;
mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static int sun8i_a83t_mipi_csi2_init_state(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
unsigned int pad = SUN8I_A83T_MIPI_CSI2_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
v4l2_subdev_state_get_format(state, pad);
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
mbus_format->width = 640;
mbus_format->height = 480;
sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
mutex_unlock(lock);
return 0;
}
static int
sun8i_a83t_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats))
return -EINVAL;
code_enum->code =
sun8i_a83t_mipi_csi2_formats[code_enum->index].mbus_code;
return 0;
}
static int sun8i_a83t_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*mbus_format = *v4l2_subdev_state_get_format(state,
format->pad);
else
*mbus_format = csi2_dev->bridge.mbus_format;
mutex_unlock(lock);
return 0;
}
static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*v4l2_subdev_state_get_format(state, format->pad) =
*mbus_format;
else
csi2_dev->bridge.mbus_format = *mbus_format;
mutex_unlock(lock);
return 0;
}
static const struct v4l2_subdev_pad_ops sun8i_a83t_mipi_csi2_pad_ops = {
.enum_mbus_code = sun8i_a83t_mipi_csi2_enum_mbus_code,
.get_fmt = sun8i_a83t_mipi_csi2_get_fmt,
.set_fmt = sun8i_a83t_mipi_csi2_set_fmt,
};
static const struct v4l2_subdev_ops sun8i_a83t_mipi_csi2_subdev_ops = {
.video = &sun8i_a83t_mipi_csi2_video_ops,
.pad = &sun8i_a83t_mipi_csi2_pad_ops,
};
static const struct v4l2_subdev_internal_ops sun8i_a83t_mipi_csi2_internal_ops = {
.init_state = sun8i_a83t_mipi_csi2_init_state,
};
/* Media Entity */
static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
/* V4L2 Async */
static int
sun8i_a83t_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
struct v4l2_async_connection *async_subdev)
{
struct v4l2_subdev *subdev = notifier->sd;
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
container_of(notifier, struct sun8i_a83t_mipi_csi2_device,
bridge.notifier);
struct media_entity *sink_entity = &subdev->entity;
struct media_entity *source_entity = &remote_subdev->entity;
struct device *dev = csi2_dev->dev;
int sink_pad_index = 0;
int source_pad_index;
int ret;
ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
MEDIA_PAD_FL_SOURCE);
if (ret < 0) {
dev_err(dev, "missing source pad in external entity %s\n",
source_entity->name);
return -EINVAL;
}
source_pad_index = ret;
dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
source_pad_index, sink_entity->name, sink_pad_index);
ret = media_create_pad_link(source_entity, source_pad_index,
sink_entity, sink_pad_index,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret) {
dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
source_entity->name, source_pad_index,
sink_entity->name, sink_pad_index);
return ret;
}
csi2_dev->bridge.source_subdev = remote_subdev;
return 0;
}
static const struct v4l2_async_notifier_operations
sun8i_a83t_mipi_csi2_notifier_ops = {
.bound = sun8i_a83t_mipi_csi2_notifier_bound,
};
/* Bridge */
static int
sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
struct v4l2_async_connection *subdev_async;
struct fwnode_handle *handle;
struct device *dev = csi2_dev->dev;
int ret;
handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!handle)
return -ENODEV;
endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
if (ret)
goto complete;
subdev_async =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
struct v4l2_async_connection);
if (IS_ERR(subdev_async))
ret = PTR_ERR(subdev_async);
complete:
fwnode_handle_put(handle);
return ret;
}
static int
sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct sun8i_a83t_mipi_csi2_bridge *bridge = &csi2_dev->bridge;
struct v4l2_subdev *subdev = &bridge->subdev;
struct v4l2_async_notifier *notifier = &bridge->notifier;
struct media_pad *pads = bridge->pads;
struct device *dev = csi2_dev->dev;
bool notifier_registered = false;
int ret;
mutex_init(&bridge->lock);
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun8i_a83t_mipi_csi2_subdev_ops);
subdev->internal_ops = &sun8i_a83t_mipi_csi2_internal_ops;
strscpy(subdev->name, SUN8I_A83T_MIPI_CSI2_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
subdev->dev = dev;
v4l2_set_subdevdata(subdev, csi2_dev);
/* Media Entity */
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
subdev->entity.ops = &sun8i_a83t_mipi_csi2_entity_ops;
/* Media Pads */
pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
ret = media_entity_pads_init(&subdev->entity,
SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads);
if (ret)
return ret;
/* V4L2 Async */
v4l2_async_subdev_nf_init(notifier, subdev);
notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
if (ret && ret != -ENODEV)
goto error_v4l2_notifier_cleanup;
/* Only register the notifier when a sensor is connected. */
if (ret != -ENODEV) {
ret = v4l2_async_nf_register(notifier);
if (ret < 0)
goto error_v4l2_notifier_cleanup;
notifier_registered = true;
}
/* V4L2 Subdev */
ret = v4l2_async_register_subdev(subdev);
if (ret < 0)
goto error_v4l2_notifier_unregister;
return 0;
error_v4l2_notifier_unregister:
if (notifier_registered)
v4l2_async_nf_unregister(notifier);
error_v4l2_notifier_cleanup:
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
return ret;
}
static void
sun8i_a83t_mipi_csi2_bridge_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev;
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
v4l2_async_unregister_subdev(subdev);
v4l2_async_nf_unregister(notifier);
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
}
/* Platform */
static int sun8i_a83t_mipi_csi2_suspend(struct device *dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
clk_disable_unprepare(csi2_dev->clock_misc);
clk_disable_unprepare(csi2_dev->clock_mipi);
clk_disable_unprepare(csi2_dev->clock_mod);
reset_control_assert(csi2_dev->reset);
return 0;
}
static int sun8i_a83t_mipi_csi2_resume(struct device *dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(csi2_dev->reset);
if (ret) {
dev_err(dev, "failed to deassert reset\n");
return ret;
}
ret = clk_prepare_enable(csi2_dev->clock_mod);
if (ret) {
dev_err(dev, "failed to enable module clock\n");
goto error_reset;
}
ret = clk_prepare_enable(csi2_dev->clock_mipi);
if (ret) {
dev_err(dev, "failed to enable MIPI clock\n");
goto error_clock_mod;
}
ret = clk_prepare_enable(csi2_dev->clock_misc);
if (ret) {
dev_err(dev, "failed to enable CSI misc clock\n");
goto error_clock_mipi;
}
sun8i_a83t_mipi_csi2_init(csi2_dev);
return 0;
error_clock_mipi:
clk_disable_unprepare(csi2_dev->clock_mipi);
error_clock_mod:
clk_disable_unprepare(csi2_dev->clock_mod);
error_reset:
reset_control_assert(csi2_dev->reset);
return ret;
}
static const struct dev_pm_ops sun8i_a83t_mipi_csi2_pm_ops = {
.runtime_suspend = sun8i_a83t_mipi_csi2_suspend,
.runtime_resume = sun8i_a83t_mipi_csi2_resume,
};
static const struct regmap_config sun8i_a83t_mipi_csi2_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x120,
};
static int
sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev,
struct platform_device *platform_dev)
{
struct device *dev = csi2_dev->dev;
void __iomem *io_base;
int ret;
/* Registers */
io_base = devm_platform_ioremap_resource(platform_dev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
csi2_dev->regmap =
devm_regmap_init_mmio_clk(dev, "bus", io_base,
&sun8i_a83t_mipi_csi2_regmap_config);
if (IS_ERR(csi2_dev->regmap)) {
dev_err(dev, "failed to init register map\n");
return PTR_ERR(csi2_dev->regmap);
}
/* Clocks */
csi2_dev->clock_mod = devm_clk_get(dev, "mod");
if (IS_ERR(csi2_dev->clock_mod)) {
dev_err(dev, "failed to acquire mod clock\n");
return PTR_ERR(csi2_dev->clock_mod);
}
ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000);
if (ret) {
dev_err(dev, "failed to set mod clock rate\n");
return ret;
}
csi2_dev->clock_mipi = devm_clk_get(dev, "mipi");
if (IS_ERR(csi2_dev->clock_mipi)) {
dev_err(dev, "failed to acquire mipi clock\n");
ret = PTR_ERR(csi2_dev->clock_mipi);
goto error_clock_rate_exclusive;
}
csi2_dev->clock_misc = devm_clk_get(dev, "misc");
if (IS_ERR(csi2_dev->clock_misc)) {
dev_err(dev, "failed to acquire misc clock\n");
ret = PTR_ERR(csi2_dev->clock_misc);
goto error_clock_rate_exclusive;
}
/* Reset */
csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(csi2_dev->reset)) {
dev_err(dev, "failed to get reset controller\n");
ret = PTR_ERR(csi2_dev->reset);
goto error_clock_rate_exclusive;
}
/* D-PHY */
ret = sun8i_a83t_dphy_register(csi2_dev);
if (ret) {
dev_err(dev, "failed to initialize MIPI D-PHY\n");
goto error_clock_rate_exclusive;
}
/* Runtime PM */
pm_runtime_enable(dev);
return 0;
error_clock_rate_exclusive:
clk_rate_exclusive_put(csi2_dev->clock_mod);
return ret;
}
static void
sun8i_a83t_mipi_csi2_resources_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
pm_runtime_disable(csi2_dev->dev);
phy_exit(csi2_dev->dphy);
clk_rate_exclusive_put(csi2_dev->clock_mod);
}
static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev;
struct device *dev = &platform_dev->dev;
int ret;
csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL);
if (!csi2_dev)
return -ENOMEM;
csi2_dev->dev = dev;
platform_set_drvdata(platform_dev, csi2_dev);
ret = sun8i_a83t_mipi_csi2_resources_setup(csi2_dev, platform_dev);
if (ret)
return ret;
ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev);
if (ret)
goto error_resources;
return 0;
error_resources:
sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev);
return ret;
}
static void sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
platform_get_drvdata(platform_dev);
sun8i_a83t_mipi_csi2_bridge_cleanup(csi2_dev);
sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev);
}
static const struct of_device_id sun8i_a83t_mipi_csi2_of_match[] = {
{ .compatible = "allwinner,sun8i-a83t-mipi-csi2" },
{},
};
MODULE_DEVICE_TABLE(of, sun8i_a83t_mipi_csi2_of_match);
static struct platform_driver sun8i_a83t_mipi_csi2_platform_driver = {
.probe = sun8i_a83t_mipi_csi2_probe,
.remove = sun8i_a83t_mipi_csi2_remove,
.driver = {
.name = SUN8I_A83T_MIPI_CSI2_NAME,
.of_match_table = sun8i_a83t_mipi_csi2_of_match,
.pm = &sun8i_a83t_mipi_csi2_pm_ops,
},
};
module_platform_driver(sun8i_a83t_mipi_csi2_platform_driver);
MODULE_DESCRIPTION("Allwinner A83T MIPI CSI-2 and D-PHY Controller Driver");
MODULE_AUTHOR("Paul Kocialkowski <[email protected]>");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 Facebook
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
#include <signal.h>
#include <errno.h>
#include <sys/resource.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "perf-sys.h"
#include "trace_helpers.h"
#define SAMPLE_FREQ 50
static int pid;
/* counts, stackmap */
static int map_fd[2];
struct bpf_program *prog;
static bool sys_read_seen, sys_write_seen;
static void print_ksym(__u64 addr)
{
struct ksym *sym;
if (!addr)
return;
sym = ksym_search(addr);
if (!sym) {
printf("ksym not found. Is kallsyms loaded?\n");
return;
}
printf("%s;", sym->name);
if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}
static void print_addr(__u64 addr)
{
if (!addr)
return;
printf("%llx;", addr);
}
#define TASK_COMM_LEN 16
struct key_t {
char comm[TASK_COMM_LEN];
__u32 kernstack;
__u32 userstack;
};
static void print_stack(struct key_t *key, __u64 count)
{
__u64 ip[PERF_MAX_STACK_DEPTH] = {};
static bool warned;
int i;
printf("%3lld %s;", count, key->comm);
if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
printf("---;");
} else {
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_ksym(ip[i]);
}
printf("-;");
if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
printf("---;");
} else {
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_addr(ip[i]);
}
if (count < 6)
printf("\r");
else
printf("\n");
if (key->kernstack == -EEXIST && !warned) {
printf("stackmap collisions seen. Consider increasing size\n");
warned = true;
} else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
printf("err stackid %d %d\n", key->kernstack, key->userstack);
}
}
static void err_exit(int err)
{
kill(pid, SIGKILL);
exit(err);
}
static void print_stacks(void)
{
struct key_t key = {}, next_key;
__u64 value;
__u32 stackid = 0, next_id;
int error = 1, fd = map_fd[0], stack_map = map_fd[1];
sys_read_seen = sys_write_seen = false;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
bpf_map_lookup_elem(fd, &next_key, &value);
print_stack(&next_key, value);
bpf_map_delete_elem(fd, &next_key);
key = next_key;
}
printf("\n");
if (!sys_read_seen || !sys_write_seen) {
printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
err_exit(error);
}
/* clear stack map */
while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
bpf_map_delete_elem(stack_map, &next_id);
stackid = next_id;
}
}
static inline int generate_load(void)
{
if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
printf("failed to generate some load with dd: %s\n", strerror(errno));
return -1;
}
return 0;
}
static void test_perf_event_all_cpu(struct perf_event_attr *attr)
{
int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
int i, pmu_fd, error = 1;
if (!links) {
printf("malloc of links failed\n");
goto err;
}
/* system wide perf event, no need to inherit */
attr->inherit = 0;
/* open perf_event on all cpus */
for (i = 0; i < nr_cpus; i++) {
pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
goto all_cpu_err;
}
links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
if (libbpf_get_error(links[i])) {
printf("bpf_program__attach_perf_event failed\n");
links[i] = NULL;
close(pmu_fd);
goto all_cpu_err;
}
}
if (generate_load() < 0)
goto all_cpu_err;
print_stacks();
error = 0;
all_cpu_err:
for (i--; i >= 0; i--)
bpf_link__destroy(links[i]);
err:
free(links);
if (error)
err_exit(error);
}
static void test_perf_event_task(struct perf_event_attr *attr)
{
struct bpf_link *link = NULL;
int pmu_fd, error = 1;
/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
* Enabling inherit will cause bpf_perf_prog_read_time helper failure.
*/
attr->inherit = 1;
/* open task bound event */
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
goto err;
}
link = bpf_program__attach_perf_event(prog, pmu_fd);
if (libbpf_get_error(link)) {
printf("bpf_program__attach_perf_event failed\n");
link = NULL;
close(pmu_fd);
goto err;
}
if (generate_load() < 0)
goto err;
print_stacks();
error = 0;
err:
bpf_link__destroy(link);
if (error)
err_exit(error);
}
static void test_bpf_perf_event(void)
{
struct perf_event_attr attr_type_hw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
struct perf_event_attr attr_type_sw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
struct perf_event_attr attr_hw_cache_l1d = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
};
struct perf_event_attr attr_hw_cache_branch_miss = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_BPU |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
};
struct perf_event_attr attr_type_raw = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_RAW,
/* Intel Instruction Retired */
.config = 0xc0,
};
struct perf_event_attr attr_type_raw_lock_load = {
.sample_freq = SAMPLE_FREQ,
.freq = 1,
.type = PERF_TYPE_RAW,
/* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
.config = 0x21d0,
/* Request to record lock address from PEBS */
.sample_type = PERF_SAMPLE_ADDR,
/* Record address value requires precise event */
.precise_ip = 2,
};
printf("Test HW_CPU_CYCLES\n");
test_perf_event_all_cpu(&attr_type_hw);
test_perf_event_task(&attr_type_hw);
printf("Test SW_CPU_CLOCK\n");
test_perf_event_all_cpu(&attr_type_sw);
test_perf_event_task(&attr_type_sw);
printf("Test HW_CACHE_L1D\n");
test_perf_event_all_cpu(&attr_hw_cache_l1d);
test_perf_event_task(&attr_hw_cache_l1d);
printf("Test HW_CACHE_BPU\n");
test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
test_perf_event_task(&attr_hw_cache_branch_miss);
printf("Test Instruction Retired\n");
test_perf_event_all_cpu(&attr_type_raw);
test_perf_event_task(&attr_type_raw);
printf("Test Lock Load\n");
test_perf_event_all_cpu(&attr_type_raw_lock_load);
test_perf_event_task(&attr_type_raw_lock_load);
printf("*** PASS ***\n");
}
int main(int argc, char **argv)
{
struct bpf_object *obj = NULL;
char filename[256];
int error = 1;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
signal(SIGINT, err_exit);
signal(SIGTERM, err_exit);
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
goto cleanup;
}
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
printf("opening BPF object file failed\n");
obj = NULL;
goto cleanup;
}
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
if (!prog) {
printf("finding a prog in obj file failed\n");
goto cleanup;
}
/* load BPF program */
if (bpf_object__load(obj)) {
printf("loading BPF object file failed\n");
goto cleanup;
}
map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
if (map_fd[0] < 0 || map_fd[1] < 0) {
printf("finding a counts/stackmap map in obj file failed\n");
goto cleanup;
}
pid = fork();
if (pid == 0) {
read_trace_pipe();
return 0;
} else if (pid == -1) {
printf("couldn't spawn process\n");
goto cleanup;
}
test_bpf_perf_event();
error = 0;
cleanup:
bpf_object__close(obj);
err_exit(error);
}
|
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include <net/netdev_queues.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
struct workqueue_struct *octep_vf_wq;
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
MODULE_AUTHOR("Veerasenareddy Burru <[email protected]>");
MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
MODULE_LICENSE("GPL");
/**
* octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
*
* @oct: Octeon device private data structure.
*
* Allocate resources to hold per Tx/Rx queue interrupt info.
* This is the information passed to interrupt handler, from which napi poll
* is scheduled and includes quick access to private data of Tx/Rx queue
* corresponding to the interrupt being handled.
*
* Return: 0, on successful allocation of resources for all queue interrupts.
* -1, if failed to allocate any resource.
*/
static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
{
struct octep_vf_ioq_vector *ioq_vector;
int i;
for (i = 0; i < oct->num_oqs; i++) {
oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
if (!oct->ioq_vector[i])
goto free_ioq_vector;
ioq_vector = oct->ioq_vector[i];
ioq_vector->iq = oct->iq[i];
ioq_vector->oq = oct->oq[i];
ioq_vector->octep_vf_dev = oct;
}
dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
return 0;
free_ioq_vector:
while (i) {
i--;
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
return -1;
}
/**
* octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
if (oct->ioq_vector[i]) {
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
}
netdev_info(oct->netdev, "Freed IOQ Vectors\n");
}
/**
* octep_vf_enable_msix_range() - enable MSI-x interrupts.
*
* @oct: Octeon device private data structure.
*
* Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
* for the Octeon device.
*
* Return: 0, on successfully enabling all MSI-x interrupts.
* -1, if failed to enable any MSI-x interrupt.
*/
static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
{
int num_msix, msix_allocated;
int i;
/* Generic interrupts apart from input/output queues */
//num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
num_msix = oct->num_oqs;
oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
if (!oct->msix_entries)
goto msix_alloc_err;
for (i = 0; i < num_msix; i++)
oct->msix_entries[i].entry = i;
msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
num_msix, num_msix);
if (msix_allocated != num_msix) {
dev_err(&oct->pdev->dev,
"Failed to enable %d msix irqs; got only %d\n",
num_msix, msix_allocated);
goto enable_msix_err;
}
oct->num_irqs = msix_allocated;
dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
return 0;
enable_msix_err:
if (msix_allocated > 0)
pci_disable_msix(oct->pdev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
msix_alloc_err:
return -1;
}
/**
* octep_vf_disable_msix() - disable MSI-x interrupts.
*
* @oct: Octeon device private data structure.
*
* Disable MSI-x on the Octeon device.
*/
static void octep_vf_disable_msix(struct octep_vf_device *oct)
{
pci_disable_msix(oct->pdev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
}
/**
* octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
*
* @irq: Interrupt number.
* @data: interrupt data contains pointers to Tx/Rx queue private data
* and correspong NAPI context.
*
* this is common handler for all non-queue (generic) interrupts.
*/
static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
{
struct octep_vf_ioq_vector *ioq_vector = data;
struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
return oct->hw_ops.ioq_intr_handler(ioq_vector);
}
/**
* octep_vf_request_irqs() - Register interrupt handlers.
*
* @oct: Octeon device private data structure.
*
* Register handlers for all queue and non-queue interrupts.
*
* Return: 0, on successful registration of all interrupt handlers.
* -1, on any error.
*/
static int octep_vf_request_irqs(struct octep_vf_device *oct)
{
struct net_device *netdev = oct->netdev;
struct octep_vf_ioq_vector *ioq_vector;
struct msix_entry *msix_entry;
int ret, i;
/* Request IRQs for Tx/Rx queues */
for (i = 0; i < oct->num_oqs; i++) {
ioq_vector = oct->ioq_vector[i];
msix_entry = &oct->msix_entries[i];
snprintf(ioq_vector->name, sizeof(ioq_vector->name),
"%s-q%d", netdev->name, i);
ret = request_irq(msix_entry->vector,
octep_vf_ioq_intr_handler, 0,
ioq_vector->name, ioq_vector);
if (ret) {
netdev_err(netdev,
"request_irq failed for Q-%d; err=%d",
i, ret);
goto ioq_irq_err;
}
cpumask_set_cpu(i % num_online_cpus(),
&ioq_vector->affinity_mask);
irq_set_affinity_hint(msix_entry->vector,
&ioq_vector->affinity_mask);
}
return 0;
ioq_irq_err:
while (i) {
--i;
free_irq(oct->msix_entries[i].vector, oct);
}
return -1;
}
/**
* octep_vf_free_irqs() - free all registered interrupts.
*
* @oct: Octeon device private data structure.
*
* Free all queue and non-queue interrupts of the Octeon device.
*/
static void octep_vf_free_irqs(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_irqs; i++) {
irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
}
netdev_info(oct->netdev, "IRQs freed\n");
}
/**
* octep_vf_setup_irqs() - setup interrupts for the Octeon device.
*
* @oct: Octeon device private data structure.
*
* Allocate data structures to hold per interrupt information, allocate/enable
* MSI-x interrupt and register interrupt handlers.
*
* Return: 0, on successful allocation and registration of all interrupts.
* -1, on any error.
*/
static int octep_vf_setup_irqs(struct octep_vf_device *oct)
{
if (octep_vf_alloc_ioq_vectors(oct))
goto ioq_vector_err;
if (octep_vf_enable_msix_range(oct))
goto enable_msix_err;
if (octep_vf_request_irqs(oct))
goto request_irq_err;
return 0;
request_irq_err:
octep_vf_disable_msix(oct);
enable_msix_err:
octep_vf_free_ioq_vectors(oct);
ioq_vector_err:
return -1;
}
/**
* octep_vf_clean_irqs() - free all interrupts and its resources.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_clean_irqs(struct octep_vf_device *oct)
{
octep_vf_free_irqs(oct);
octep_vf_disable_msix(oct);
octep_vf_free_ioq_vectors(oct);
}
/**
* octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
*
* @iq: Octeon Tx queue data structure.
* @oq: Octeon Rx queue data structure.
*/
static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
{
u32 pkts_pend = oq->pkts_pending;
netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
if (iq->pkts_processed) {
writel(iq->pkts_processed, iq->inst_cnt_reg);
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
}
if (oq->last_pkt_count - pkts_pend) {
writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
oq->last_pkt_count = pkts_pend;
}
/* Flush the previous wrties before writing to RESEND bit */
smp_wmb();
writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
}
/**
* octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
*
* @napi: pointer to napi context.
* @budget: max number of packets to be processed in single invocation.
*/
static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
{
struct octep_vf_ioq_vector *ioq_vector =
container_of(napi, struct octep_vf_ioq_vector, napi);
u32 tx_pending, rx_done;
tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
/* need more polling if tx completion processing is still pending or
* processed at least 'budget' number of rx packets.
*/
if (tx_pending || rx_done >= budget)
return budget;
if (likely(napi_complete_done(napi, rx_done)))
octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
return rx_done;
}
/**
* octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_add(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
/**
* octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_delete(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
netif_napi_del(&oct->ioq_vector[i]->napi);
oct->oq[i]->napi = NULL;
}
}
/**
* octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_enable(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
napi_enable(&oct->ioq_vector[i]->napi);
}
}
/**
* octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_disable(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
napi_disable(&oct->ioq_vector[i]->napi);
}
}
static void octep_vf_link_up(struct net_device *netdev)
{
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
}
static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
{
int err;
err = octep_vf_mbox_set_rx_state(oct, up);
if (err)
netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
}
static int octep_vf_get_link_status(struct octep_vf_device *oct)
{
int err;
err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
if (err)
netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
return oct->link_info.oper_up;
}
static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
{
int err;
err = octep_vf_mbox_set_link_status(oct, up);
if (err) {
netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
return;
}
oct->link_info.oper_up = up;
}
/**
* octep_vf_open() - start the octeon network device.
*
* @netdev: pointer to kernel network device.
*
* setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
* and interrupts..
*
* Return: 0, on successfully setting up device and bring it up.
* -1, on any error.
*/
static int octep_vf_open(struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
int err, ret;
netdev_info(netdev, "Starting netdev ...\n");
netif_carrier_off(netdev);
oct->hw_ops.reset_io_queues(oct);
if (octep_vf_setup_iqs(oct))
goto setup_iq_err;
if (octep_vf_setup_oqs(oct))
goto setup_oq_err;
if (octep_vf_setup_irqs(oct))
goto setup_irq_err;
err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
if (err)
goto set_queues_err;
err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
if (err)
goto set_queues_err;
octep_vf_napi_add(oct);
octep_vf_napi_enable(oct);
oct->link_info.admin_up = 1;
octep_vf_set_rx_state(oct, true);
ret = octep_vf_get_link_status(oct);
if (!ret)
octep_vf_set_link_status(oct, true);
/* Enable the input and output queues for this Octeon device */
oct->hw_ops.enable_io_queues(oct);
/* Enable Octeon device interrupts */
oct->hw_ops.enable_interrupts(oct);
octep_vf_oq_dbell_init(oct);
ret = octep_vf_get_link_status(oct);
if (ret)
octep_vf_link_up(netdev);
return 0;
set_queues_err:
octep_vf_napi_disable(oct);
octep_vf_napi_delete(oct);
octep_vf_clean_irqs(oct);
setup_irq_err:
octep_vf_free_oqs(oct);
setup_oq_err:
octep_vf_free_iqs(oct);
setup_iq_err:
return -1;
}
/**
* octep_vf_stop() - stop the octeon network device.
*
* @netdev: pointer to kernel network device.
*
* stop the device Tx/Rx operations, bring down the link and
* free up all resources allocated for Tx/Rx queues and interrupts.
*/
static int octep_vf_stop(struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_info(netdev, "Stopping the device ...\n");
/* Stop Tx from stack */
netif_carrier_off(netdev);
netif_tx_disable(netdev);
octep_vf_set_link_status(oct, false);
octep_vf_set_rx_state(oct, false);
oct->link_info.admin_up = 0;
oct->link_info.oper_up = 0;
oct->hw_ops.disable_interrupts(oct);
octep_vf_napi_disable(oct);
octep_vf_napi_delete(oct);
octep_vf_clean_irqs(oct);
octep_vf_clean_iqs(oct);
oct->hw_ops.disable_io_queues(oct);
oct->hw_ops.reset_io_queues(oct);
octep_vf_free_oqs(oct);
octep_vf_free_iqs(oct);
netdev_info(netdev, "Device stopped !!\n");
return 0;
}
/**
* octep_vf_iq_full_check() - check if a Tx queue is full.
*
* @iq: Octeon Tx queue data structure.
*
* Return: 0, if the Tx queue is not full.
* 1, if the Tx queue is full.
*/
static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
{
int ret;
ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
OCTEP_VF_WAKE_QUEUE_THRESHOLD,
OCTEP_VF_WAKE_QUEUE_THRESHOLD);
switch (ret) {
case 0: /* Stopped the queue, since IQ is full */
return 1;
case -1: /*
* Pending updates in write index from
* iq_process_completion in other cpus
* caused queues to get re-enabled after
* being stopped
*/
iq->stats.restart_cnt++;
fallthrough;
case 1: /* Queue left enabled, since IQ is not yet full*/
return 0;
}
return 1;
}
/**
* octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
*
* @skb: packet skbuff pointer.
* @netdev: kernel network device.
*
* Return: NETDEV_TX_BUSY, if Tx Queue is full.
* NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
*/
static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_features_t feat = netdev->features;
struct octep_vf_tx_sglist_desc *sglist;
struct octep_vf_tx_buffer *tx_buffer;
struct octep_vf_tx_desc_hw *hw_desc;
struct skb_shared_info *shinfo;
struct octep_vf_instr_hdr *ih;
struct octep_vf_iq *iq;
skb_frag_t *frag;
u16 nr_frags, si;
int xmit_more;
u16 q_no, wi;
if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
q_no = skb_get_queue_mapping(skb);
if (q_no >= oct->num_iqs) {
netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
q_no = q_no % oct->num_iqs;
}
iq = oct->iq[q_no];
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
wi = iq->host_write_index;
hw_desc = &iq->desc_ring[wi];
hw_desc->ih64 = 0;
tx_buffer = iq->buff_info + wi;
tx_buffer->skb = skb;
ih = &hw_desc->ih;
ih->tlen = skb->len;
ih->pkind = oct->fw_info.pkind;
ih->fsz = oct->fw_info.fsz;
ih->tlen = skb->len + ih->fsz;
if (!nr_frags) {
tx_buffer->gather = 0;
tx_buffer->dma = dma_map_single(iq->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, tx_buffer->dma))
goto dma_map_err;
hw_desc->dptr = tx_buffer->dma;
} else {
/* Scatter/Gather */
dma_addr_t dma;
u16 len;
sglist = tx_buffer->sglist;
ih->gsz = nr_frags + 1;
ih->gather = 1;
tx_buffer->gather = 1;
len = skb_headlen(skb);
dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, dma))
goto dma_map_err;
memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
sglist[0].len[3] = len;
sglist[0].dma_ptr[0] = dma;
si = 1; /* entry 0 is main skb, mapped above */
frag = &shinfo->frags[0];
while (nr_frags--) {
len = skb_frag_size(frag);
dma = skb_frag_dma_map(iq->dev, frag, 0,
len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, dma))
goto dma_map_sg_err;
sglist[si >> 2].len[3 - (si & 3)] = len;
sglist[si >> 2].dma_ptr[si & 3] = dma;
frag++;
si++;
}
hw_desc->dptr = tx_buffer->sglist_dma;
}
if (oct->fw_info.tx_ol_flags) {
if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
} else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
}
/* due to ESR txm will be swapped by hw */
hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
}
xmit_more = netdev_xmit_more();
netdev_tx_sent_queue(iq->netdev_q, skb->len);
skb_tx_timestamp(skb);
iq->fill_cnt++;
wi++;
iq->host_write_index = wi & iq->ring_size_mask;
/* octep_iq_full_check stops the queue and returns
* true if so, in case the queue has become full
* by inserting current packet. If so, we can
* go ahead and ring doorbell.
*/
if (!octep_vf_iq_full_check(iq) && xmit_more &&
iq->fill_cnt < iq->fill_threshold)
return NETDEV_TX_OK;
goto ring_dbell;
dma_map_sg_err:
if (si > 0) {
dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
sglist[0].len[0], DMA_TO_DEVICE);
sglist[0].len[0] = 0;
}
while (si > 1) {
dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
sglist[si >> 2].len[si & 3] = 0;
si--;
}
tx_buffer->gather = 0;
dma_map_err:
dev_kfree_skb_any(skb);
ring_dbell:
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
iq->stats.instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
}
int octep_vf_get_if_stats(struct octep_vf_device *oct)
{
struct octep_vf_iface_rxtx_stats vf_stats;
int ret, size;
memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
(u8 *)&vf_stats, &size);
if (ret)
return ret;
memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
sizeof(struct octep_vf_iface_rx_stats));
memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
sizeof(struct octep_vf_iface_tx_stats));
return 0;
}
int octep_vf_get_link_info(struct octep_vf_device *oct)
{
int ret, size;
ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
(u8 *)&oct->link_info, &size);
if (ret) {
dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
return ret;
}
return 0;
}
/**
* octep_vf_get_stats64() - Get Octeon network device statistics.
*
* @netdev: kernel network device.
* @stats: pointer to stats structure to be filled in.
*/
static void octep_vf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
for (q = 0; q < oct->num_oqs; q++) {
struct octep_vf_iq *iq = oct->iq[q];
struct octep_vf_oq *oq = oct->oq[q];
tx_packets += iq->stats.instr_completed;
tx_bytes += iq->stats.bytes_sent;
rx_packets += oq->stats.packets;
rx_bytes += oq->stats.bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
if (!octep_vf_get_if_stats(oct)) {
stats->multicast = oct->iface_rx_stats.mcast_pkts;
stats->rx_errors = oct->iface_rx_stats.err_pkts;
stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
oct->iface_rx_stats.err_pkts;
stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
stats->tx_dropped = oct->iface_tx_stats.dropped;
}
}
/**
* octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
*
* @work: pointer to Tx queue timeout work_struct
*
* Stop and start the device so that it frees up all queue resources
* and restarts the queues, that potentially clears a Tx queue timeout
* condition.
**/
static void octep_vf_tx_timeout_task(struct work_struct *work)
{
struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
tx_timeout_task);
struct net_device *netdev = oct->netdev;
rtnl_lock();
if (netif_running(netdev)) {
octep_vf_stop(netdev);
octep_vf_open(netdev);
}
rtnl_unlock();
netdev_put(netdev, NULL);
}
/**
* octep_vf_tx_timeout() - Handle Tx Queue timeout.
*
* @netdev: pointer to kernel network device.
* @txqueue: Timed out Tx queue number.
*
* Schedule a work to handle Tx queue timeout.
*/
static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
schedule_work(&oct->tx_timeout_task);
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct sockaddr *addr = (struct sockaddr *)p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
if (err)
return err;
memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct octep_vf_iface_link_info *link_info;
int err;
link_info = &oct->link_info;
if (link_info->mtu == new_mtu)
return 0;
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
}
static int octep_vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 rx_offloads = 0, tx_offloads = 0;
int err;
/* We only support features received from firmware */
if ((features & netdev->hw_features) != features)
return -EINVAL;
if (features & NETIF_F_TSO)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
if (features & NETIF_F_TSO6)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
if (features & NETIF_F_IP_CSUM)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
if (features & NETIF_F_IPV6_CSUM)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
if (features & NETIF_F_RXCSUM)
rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
if (!err)
netdev->features = features;
return err;
}
static const struct net_device_ops octep_vf_netdev_ops = {
.ndo_open = octep_vf_open,
.ndo_stop = octep_vf_stop,
.ndo_start_xmit = octep_vf_start_xmit,
.ndo_get_stats64 = octep_vf_get_stats64,
.ndo_tx_timeout = octep_vf_tx_timeout,
.ndo_set_mac_address = octep_vf_set_mac,
.ndo_change_mtu = octep_vf_change_mtu,
.ndo_set_features = octep_vf_set_features,
};
static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
{
switch (oct->chip_id) {
case OCTEP_PCI_DEVICE_ID_CN93_VF:
return "CN93XX";
case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
return "CNF95N";
case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
return "CN10KA";
case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
return "CNF10KA";
case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
return "CNF10KB";
case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
return "CN10KB";
default:
return "Unsupported";
}
}
/**
* octep_vf_device_setup() - Setup Octeon Device.
*
* @oct: Octeon device private data structure.
*
* Setup Octeon device hardware operations, configuration, etc ...
*/
int octep_vf_device_setup(struct octep_vf_device *oct)
{
struct pci_dev *pdev = oct->pdev;
/* allocate memory for oct->conf */
oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
if (!oct->conf)
return -ENOMEM;
/* Map BAR region 0 */
oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
pci_resource_len(oct->pdev, 0));
if (!oct->mmio.hw_addr) {
dev_err(&pdev->dev,
"Failed to remap BAR0; start=0x%llx len=0x%llx\n",
pci_resource_start(oct->pdev, 0),
pci_resource_len(oct->pdev, 0));
goto ioremap_err;
}
oct->mmio.mapped = 1;
oct->chip_id = pdev->device;
oct->rev_id = pdev->revision;
dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
switch (oct->chip_id) {
case OCTEP_PCI_DEVICE_ID_CN93_VF:
case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
case OCTEP_PCI_DEVICE_ID_CN98_VF:
dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
OCTEP_VF_MINOR_REV(oct));
octep_vf_device_setup_cn93(oct);
break;
case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
OCTEP_VF_MINOR_REV(oct));
octep_vf_device_setup_cnxk(oct);
break;
default:
dev_err(&pdev->dev, "Unsupported device\n");
goto unsupported_dev;
}
return 0;
unsupported_dev:
iounmap(oct->mmio.hw_addr);
ioremap_err:
kfree(oct->conf);
return -EOPNOTSUPP;
}
/**
* octep_vf_device_cleanup() - Cleanup Octeon Device.
*
* @oct: Octeon device private data structure.
*
* Cleanup Octeon device allocated resources.
*/
static void octep_vf_device_cleanup(struct octep_vf_device *oct)
{
dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
if (oct->mmio.mapped)
iounmap(oct->mmio.hw_addr);
kfree(oct->conf);
oct->conf = NULL;
}
static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
{
return octep_vf_mbox_get_mac_addr(oct, addr);
}
/**
* octep_vf_probe() - Octeon PCI device probe handler.
*
* @pdev: PCI device structure.
* @ent: entry in Octeon PCI device ID table.
*
* Initializes and enables the Octeon PCI device for network operations.
* Initializes Octeon private data structure and registers a network device.
*/
static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct octep_vf_device *octep_vf_dev;
struct net_device *netdev;
int err;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable PCI device\n");
return err;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
goto disable_pci_device;
}
err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
goto disable_pci_device;
}
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
OCTEP_VF_MAX_QUEUES);
if (!netdev) {
dev_err(&pdev->dev, "Failed to allocate netdev\n");
err = -ENOMEM;
goto mem_regions_release;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
octep_vf_dev = netdev_priv(netdev);
octep_vf_dev->netdev = netdev;
octep_vf_dev->pdev = pdev;
octep_vf_dev->dev = &pdev->dev;
pci_set_drvdata(pdev, octep_vf_dev);
err = octep_vf_device_setup(octep_vf_dev);
if (err) {
dev_err(&pdev->dev, "Device setup failed\n");
goto netdevice_free;
}
INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
netdev->netdev_ops = &octep_vf_netdev_ops;
octep_vf_set_ethtool_ops(netdev);
netif_carrier_off(netdev);
if (octep_vf_setup_mbox(octep_vf_dev)) {
dev_err(&pdev->dev, "VF Mailbox setup failed\n");
err = -ENOMEM;
goto device_cleanup;
}
if (octep_vf_mbox_version_check(octep_vf_dev)) {
dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
err = -EINVAL;
goto delete_mbox;
}
if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
dev_err(&pdev->dev, "unable to get fw info\n");
err = -EINVAL;
goto delete_mbox;
}
netdev->hw_features = NETIF_F_SG;
if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->min_mtu = OCTEP_VF_MIN_MTU;
netdev->max_mtu = OCTEP_VF_MAX_MTU;
netdev->mtu = OCTEP_VF_DEFAULT_MTU;
if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
netdev->hw_features |= NETIF_F_TSO;
netif_set_tso_max_size(netdev, netdev->max_mtu);
}
netdev->features |= netdev->hw_features;
octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
goto delete_mbox;
}
dev_info(&pdev->dev, "Device probe successful\n");
return 0;
delete_mbox:
octep_vf_delete_mbox(octep_vf_dev);
device_cleanup:
octep_vf_device_cleanup(octep_vf_dev);
netdevice_free:
free_netdev(netdev);
mem_regions_release:
pci_release_mem_regions(pdev);
disable_pci_device:
pci_disable_device(pdev);
dev_err(&pdev->dev, "Device probe failed\n");
return err;
}
/**
* octep_vf_remove() - Remove Octeon PCI device from driver control.
*
* @pdev: PCI device structure of the Octeon device.
*
* Cleanup all resources allocated for the Octeon device.
* Unregister from network device and disable the PCI device.
*/
static void octep_vf_remove(struct pci_dev *pdev)
{
struct octep_vf_device *oct = pci_get_drvdata(pdev);
struct net_device *netdev;
if (!oct)
return;
octep_vf_mbox_dev_remove(oct);
cancel_work_sync(&oct->tx_timeout_task);
netdev = oct->netdev;
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
octep_vf_delete_mbox(oct);
octep_vf_device_cleanup(oct);
pci_release_mem_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
}
static struct pci_driver octep_vf_driver = {
.name = OCTEP_VF_DRV_NAME,
.id_table = octep_vf_pci_id_tbl,
.probe = octep_vf_probe,
.remove = octep_vf_remove,
};
/**
* octep_vf_init_module() - Module initialization.
*
* create common resource for the driver and register PCI driver.
*/
static int __init octep_vf_init_module(void)
{
int ret;
pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
ret = pci_register_driver(&octep_vf_driver);
if (ret < 0) {
pr_err("%s: Failed to register PCI driver; err=%d\n",
OCTEP_VF_DRV_NAME, ret);
return ret;
}
return ret;
}
/**
* octep_vf_exit_module() - Module exit routine.
*
* unregister the driver with PCI subsystem and cleanup common resources.
*/
static void __exit octep_vf_exit_module(void)
{
pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
pci_unregister_driver(&octep_vf_driver);
pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
}
module_init(octep_vf_init_module);
module_exit(octep_vf_exit_module);
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Originally from efivars.c
*
* Copyright (C) 2001,2003,2004 Dell <[email protected]>
* Copyright (C) 2004 Intel Corporation <[email protected]>
*/
#define pr_fmt(fmt) "efivars: " fmt
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/ucs2_string.h>
/* Private pointer to registered efivars */
static struct efivars *__efivars;
static DEFINE_SEMAPHORE(efivars_lock, 1);
static efi_status_t check_var_size(bool nonblocking, u32 attributes,
unsigned long size)
{
const struct efivar_operations *fops;
efi_status_t status;
fops = __efivars->ops;
if (!fops->query_variable_store)
status = EFI_UNSUPPORTED;
else
status = fops->query_variable_store(attributes, size,
nonblocking);
if (status == EFI_UNSUPPORTED)
return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
return status;
}
/**
* efivar_is_available - check if efivars is available
*
* @return true iff evivars is currently registered
*/
bool efivar_is_available(void)
{
return __efivars != NULL;
}
EXPORT_SYMBOL_GPL(efivar_is_available);
/**
* efivars_register - register an efivars
* @efivars: efivars to register
* @ops: efivars operations
*
* Only a single efivars can be registered at any time.
*/
int efivars_register(struct efivars *efivars,
const struct efivar_operations *ops)
{
int rv;
int event;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (__efivars) {
pr_warn("efivars already registered\n");
rv = -EBUSY;
goto out;
}
efivars->ops = ops;
__efivars = efivars;
if (efivar_supports_writes())
event = EFIVAR_OPS_RDWR;
else
event = EFIVAR_OPS_RDONLY;
blocking_notifier_call_chain(&efivar_ops_nh, event, NULL);
pr_info("Registered efivars operations\n");
rv = 0;
out:
up(&efivars_lock);
return rv;
}
EXPORT_SYMBOL_GPL(efivars_register);
/**
* efivars_unregister - unregister an efivars
* @efivars: efivars to unregister
*
* The caller must have already removed every entry from the list,
* failure to do so is an error.
*/
int efivars_unregister(struct efivars *efivars)
{
int rv;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
pr_err("efivars not registered\n");
rv = -EINVAL;
goto out;
}
if (__efivars != efivars) {
rv = -EINVAL;
goto out;
}
pr_info("Unregistered efivars operations\n");
__efivars = NULL;
rv = 0;
out:
up(&efivars_lock);
return rv;
}
EXPORT_SYMBOL_GPL(efivars_unregister);
bool efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
EXPORT_SYMBOL_GPL(efivar_supports_writes);
/*
* efivar_lock() - obtain the efivar lock, wait for it if needed
* @return 0 on success, error code on failure
*/
int efivar_lock(void)
{
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars->ops) {
up(&efivars_lock);
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(efivar_lock, "EFIVAR");
/*
* efivar_lock() - obtain the efivar lock if it is free
* @return 0 on success, error code on failure
*/
int efivar_trylock(void)
{
if (down_trylock(&efivars_lock))
return -EBUSY;
if (!__efivars->ops) {
up(&efivars_lock);
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(efivar_trylock, "EFIVAR");
/*
* efivar_unlock() - release the efivar lock
*/
void efivar_unlock(void)
{
up(&efivars_lock);
}
EXPORT_SYMBOL_NS_GPL(efivar_unlock, "EFIVAR");
/*
* efivar_get_variable() - retrieve a variable identified by name/vendor
*
* Must be called with efivars_lock held.
*/
efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 *attr, unsigned long *size, void *data)
{
return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
EXPORT_SYMBOL_NS_GPL(efivar_get_variable, "EFIVAR");
/*
* efivar_get_next_variable() - enumerate the next name/vendor pair
*
* Must be called with efivars_lock held.
*/
efi_status_t efivar_get_next_variable(unsigned long *name_size,
efi_char16_t *name, efi_guid_t *vendor)
{
return __efivars->ops->get_next_variable(name_size, name, vendor);
}
EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, "EFIVAR");
/*
* efivar_set_variable_locked() - set a variable identified by name/vendor
*
* Must be called with efivars_lock held. If @nonblocking is set, it will use
* non-blocking primitives so it is guaranteed not to sleep.
*/
efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data, bool nonblocking)
{
efi_set_variable_t *setvar;
efi_status_t status;
if (data_size > 0) {
status = check_var_size(nonblocking, attr,
data_size + ucs2_strsize(name, EFI_VAR_NAME_LEN));
if (status != EFI_SUCCESS)
return status;
}
/*
* If no _nonblocking variant exists, the ordinary one
* is assumed to be non-blocking.
*/
setvar = __efivars->ops->set_variable_nonblocking;
if (!setvar || !nonblocking)
setvar = __efivars->ops->set_variable;
return setvar(name, vendor, attr, data_size, data);
}
EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, "EFIVAR");
/*
* efivar_set_variable() - set a variable identified by name/vendor
*
* Can be called without holding the efivars_lock. Will sleep on obtaining the
* lock, or on obtaining other locks that are needed in order to complete the
* call.
*/
efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data)
{
efi_status_t status;
if (efivar_lock())
return EFI_ABORTED;
status = efivar_set_variable_locked(name, vendor, attr, data_size,
data, false);
efivar_unlock();
return status;
}
EXPORT_SYMBOL_NS_GPL(efivar_set_variable, "EFIVAR");
efi_status_t efivar_query_variable_info(u32 attr,
u64 *storage_space,
u64 *remaining_space,
u64 *max_variable_size)
{
if (!__efivars->ops->query_variable_info)
return EFI_UNSUPPORTED;
return __efivars->ops->query_variable_info(attr, storage_space,
remaining_space, max_variable_size);
}
EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, "EFIVAR");
|
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
**************************************************************************/
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "gma_device.h"
#include "intel_bios.h"
#include "psb_device.h"
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static int psb_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_intel_lvds_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
}
/*
* Poulsbo Backlight Interfaces
*/
#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
#define BLC_PWM_FREQ_CALC_CONSTANT 32
#define MHz 1000000
#define PSB_BLC_PWM_PRECISION_FACTOR 10
#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
static int psb_backlight_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
/* u32 bl_max_freq; */
/* unsigned long value; */
u16 bl_max_freq;
uint32_t value;
uint32_t blc_pwm_precision_factor;
/* get bl_max_freq and pol from dev_priv*/
if (!dev_priv->lvds_bl) {
dev_err(dev->dev, "Has no valid LVDS backlight info\n");
return -ENOENT;
}
bl_max_freq = dev_priv->lvds_bl->freq;
blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
core_clock = dev_priv->core_freq;
value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
value *= blc_pwm_precision_factor;
value /= bl_max_freq;
value /= blc_pwm_precision_factor;
if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
return -ERANGE;
else {
value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
REG_WRITE(BLC_PWM_CTL,
(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
return 0;
}
/*
* Provide the Poulsbo specific chip logic and low level methods
* for power management
*/
static void psb_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
gating &= ~3; /* Disable 2D clock gating */
gating |= 1;
PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
PSB_RSGX32(PSB_CR_CLKGATECTL);
}
/**
* psb_save_display_registers - save registers lost on suspend
* @dev: our DRM device
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
*/
static int psb_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration control + watermarks */
regs->saveDSPARB = PSB_RVDC32(DSPARB);
regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Save crtc and output state */
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc))
dev_priv->ops->save_crtc(crtc);
}
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->save)
gma_connector->save(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
}
/**
* psb_restore_display_registers - restore lost register state
* @dev: our DRM device
*
* Restore register state that was lost during suspend and resume.
*/
static int psb_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration + watermarks */
PSB_WVDC32(regs->saveDSPARB, DSPARB);
PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
drm_modeset_lock_all(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (drm_helper_crtc_in_use(crtc))
dev_priv->ops->restore_crtc(crtc);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->restore)
gma_connector->restore(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
}
static int psb_power_down(struct drm_device *dev)
{
return 0;
}
static int psb_power_up(struct drm_device *dev)
{
return 0;
}
/* Poulsbo */
static const struct psb_offset psb_regmap[2] = {
{
.fp0 = FPA0,
.fp1 = FPA1,
.cntr = DSPACNTR,
.conf = PIPEACONF,
.src = PIPEASRC,
.dpll = DPLL_A,
.htotal = HTOTAL_A,
.hblank = HBLANK_A,
.hsync = HSYNC_A,
.vtotal = VTOTAL_A,
.vblank = VBLANK_A,
.vsync = VSYNC_A,
.stride = DSPASTRIDE,
.size = DSPASIZE,
.pos = DSPAPOS,
.base = DSPABASE,
.surf = DSPASURF,
.addr = DSPABASE,
.status = PIPEASTAT,
.linoff = DSPALINOFF,
.tileoff = DSPATILEOFF,
.palette = PALETTE_A,
},
{
.fp0 = FPB0,
.fp1 = FPB1,
.cntr = DSPBCNTR,
.conf = PIPEBCONF,
.src = PIPEBSRC,
.dpll = DPLL_B,
.htotal = HTOTAL_B,
.hblank = HBLANK_B,
.hsync = HSYNC_B,
.vtotal = VTOTAL_B,
.vblank = VBLANK_B,
.vsync = VSYNC_B,
.stride = DSPBSTRIDE,
.size = DSPBSIZE,
.pos = DSPBPOS,
.base = DSPBBASE,
.surf = DSPBSURF,
.addr = DSPBBASE,
.status = PIPEBSTAT,
.linoff = DSPBLINOFF,
.tileoff = DSPBTILEOFF,
.palette = PALETTE_B,
}
};
static int psb_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->regmap = psb_regmap;
gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
static void psb_chip_teardown(struct drm_device *dev)
{
gma_intel_teardown_gmbus(dev);
}
const struct psb_ops psb_chip_ops = {
.name = "Poulsbo",
.pipes = 2,
.crtcs = 2,
.hdmi_mask = (1 << 0),
.lvds_mask = (1 << 1),
.sdvo_mask = (1 << 0),
.cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
.clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,
.backlight_init = psb_backlight_setup,
.backlight_set = psb_intel_lvds_set_brightness,
.backlight_name = "psb-bl",
.init_pm = psb_init_pm,
.save_regs = psb_save_display_registers,
.restore_regs = psb_restore_display_registers,
.save_crtc = gma_crtc_save,
.restore_crtc = gma_crtc_restore,
.power_down = psb_power_down,
.power_up = psb_power_up,
};
|
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
#define _ICP_QAT_FW_INIT_ADMIN_H_
#include "icp_qat_fw.h"
#define RL_MAX_RP_IDS 16
enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_INIT_AE = 0,
ICP_QAT_FW_TRNG_ENABLE = 1,
ICP_QAT_FW_TRNG_DISABLE = 2,
ICP_QAT_FW_CONSTANTS_CFG = 3,
ICP_QAT_FW_STATUS_GET = 4,
ICP_QAT_FW_COUNTERS_GET = 5,
ICP_QAT_FW_LOOPBACK = 6,
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10,
ICP_QAT_FW_DC_CHAIN_INIT = 11,
ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
ICP_QAT_FW_RL_INIT = 15,
ICP_QAT_FW_TIMER_GET = 19,
ICP_QAT_FW_CNV_STATS_GET = 20,
ICP_QAT_FW_PM_STATE_CONFIG = 128,
ICP_QAT_FW_PM_INFO = 129,
ICP_QAT_FW_RL_ADD = 134,
ICP_QAT_FW_RL_UPDATE = 135,
ICP_QAT_FW_RL_REMOVE = 136,
ICP_QAT_FW_TL_START = 137,
ICP_QAT_FW_TL_STOP = 138,
};
enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
};
struct icp_qat_fw_init_admin_tl_rp_indexes {
__u8 rp_num_index_0;
__u8 rp_num_index_1;
__u8 rp_num_index_2;
__u8 rp_num_index_3;
};
struct icp_qat_fw_init_admin_slice_cnt {
__u8 cpr_cnt;
__u8 xlt_cnt;
__u8 dcpr_cnt;
__u8 pke_cnt;
__u8 wat_cnt;
__u8 wcp_cnt;
__u8 ucs_cnt;
__u8 cph_cnt;
__u8 ath_cnt;
};
struct icp_qat_fw_init_admin_sla_config_params {
__u32 pcie_in_cir;
__u32 pcie_in_pir;
__u32 pcie_out_cir;
__u32 pcie_out_pir;
__u32 slice_util_cir;
__u32 slice_util_pir;
__u32 ae_util_cir;
__u32 ae_util_pir;
__u16 rp_ids[RL_MAX_RP_IDS];
};
struct icp_qat_fw_init_admin_req {
__u16 init_cfg_sz;
__u8 resrvd1;
__u8 cmd_id;
__u32 resrvd2;
__u64 opaque_data;
__u64 init_cfg_ptr;
union {
struct {
__u16 ibuf_size_in_kb;
__u16 resrvd3;
};
struct {
__u32 int_timer_ticks;
};
struct {
__u32 heartbeat_ticks;
};
struct {
__u16 node_id;
__u8 node_type;
__u8 svc_type;
__u8 resrvd5[3];
__u8 rp_count;
};
__u32 idle_filter;
struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes;
};
__u32 resrvd4;
} __packed;
struct icp_qat_fw_init_admin_resp {
__u8 flags;
__u8 resrvd1;
__u8 status;
__u8 cmd_id;
union {
__u32 resrvd2;
struct {
__u16 version_minor_num;
__u16 version_major_num;
};
__u32 extended_features;
struct {
__u16 error_count;
__u16 latest_error;
};
};
__u64 opaque_data;
union {
__u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
struct {
__u32 version_patch_num;
__u8 context_id;
__u8 ae_id;
__u16 resrvd4;
__u64 resrvd5;
};
struct {
__u64 req_rec_count;
__u64 resp_sent_count;
};
struct {
__u16 compression_algos;
__u16 checksum_algos;
__u32 deflate_capabilities;
__u32 resrvd6;
__u32 lzs_capabilities;
};
struct {
__u32 cipher_algos;
__u32 hash_algos;
__u16 keygen_algos;
__u16 other;
__u16 public_key_algos;
__u16 prime_algos;
};
struct {
__u64 timestamp;
__u64 resrvd7;
};
struct {
__u32 successful_count;
__u32 unsuccessful_count;
__u64 resrvd8;
};
struct icp_qat_fw_init_admin_slice_cnt slices;
__u16 fw_capabilities;
};
} __packed;
#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET
#define ICP_QAT_NUMBER_OF_PM_EVENTS 8
struct icp_qat_fw_init_admin_pm_info {
__u16 max_pwrreq;
__u16 min_pwrreq;
__u16 resvrd1;
__u8 pwr_state;
__u8 resvrd2;
__u32 fusectl0;
struct_group(event_counters,
__u32 sys_pm;
__u32 host_msg;
__u32 unknown;
__u32 local_ssm;
__u32 timer;
);
__u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS];
struct_group(pm,
__u32 fw_init;
__u32 pwrreq;
__u32 status;
__u32 main;
__u32 thread;
);
struct_group(ssm,
__u32 pm_enable;
__u32 pm_active_status;
__u32 pm_managed_status;
__u32 pm_domain_status;
__u32 active_constraint;
);
__u32 resvrd3[6];
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0.
*
* Copyright (C) 2006 Antti Palosaari <[email protected]>
*/
#ifndef AU6610_H
#define AU6610_H
#include "dvb_usb.h"
#define AU6610_REQ_I2C_WRITE 0x14
#define AU6610_REQ_I2C_READ 0x13
#define AU6610_REQ_USB_WRITE 0x16
#define AU6610_REQ_USB_READ 0x15
#define AU6610_USB_TIMEOUT 1000
#endif
|
#include "locking-selftest-wlock.h"
#include "locking-selftest-softirq.h"
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* wm9713.c -- Codec touch driver for Wolfson WM9713 AC97 Codec.
*
* Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <[email protected]>
* Parts Copyright : Ian Molton <[email protected]>
* Andrew Zabolotny <[email protected]>
* Russell King <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/wm97xx.h>
#define TS_NAME "wm97xx"
#define WM9713_VERSION "1.00"
#define DEFAULT_PRESSURE 0xb0c0
/*
* Module parameters
*/
/*
* Set internal pull up for pen detect.
*
* Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive)
* i.e. pull up resistance = 64k Ohms / rpu.
*
* Adjust this value if you are having problems with pen detect not
* detecting any down event.
*/
static int rpu = 8;
module_param(rpu, int, 0);
MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect.");
/*
* Set current used for pressure measurement.
*
* Set pil = 2 to use 400uA
* pil = 1 to use 200uA and
* pil = 0 to disable pressure measurement.
*
* This is used to increase the range of values returned by the adc
* when measureing touchpanel pressure.
*/
static int pil;
module_param(pil, int, 0);
MODULE_PARM_DESC(pil, "Set current used for pressure measurement.");
/*
* Set threshold for pressure measurement.
*
* Pen down pressure below threshold is ignored.
*/
static int pressure = DEFAULT_PRESSURE & 0xfff;
module_param(pressure, int, 0);
MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement.");
/*
* Set adc sample delay.
*
* For accurate touchpanel measurements, some settling time may be
* required between the switch matrix applying a voltage across the
* touchpanel plate and the ADC sampling the signal.
*
* This delay can be set by setting delay = n, where n is the array
* position of the delay in the array delay_table below.
* Long delays > 1ms are supported for completeness, but are not
* recommended.
*/
static int delay = 4;
module_param(delay, int, 0);
MODULE_PARM_DESC(delay, "Set adc sample delay.");
/*
* Set five_wire = 1 to use a 5 wire touchscreen.
*
* NOTE: Five wire mode does not allow for readback of pressure.
*/
static int five_wire;
module_param(five_wire, int, 0);
MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen.");
/*
* Set adc mask function.
*
* Sources of glitch noise, such as signals driving an LCD display, may feed
* through to the touch screen plates and affect measurement accuracy. In
* order to minimise this, a signal may be applied to the MASK pin to delay or
* synchronise the sampling.
*
* 0 = No delay or sync
* 1 = High on pin stops conversions
* 2 = Edge triggered, edge on pin delays conversion by delay param (above)
* 3 = Edge triggered, edge on pin starts conversion after delay param
*/
static int mask;
module_param(mask, int, 0);
MODULE_PARM_DESC(mask, "Set adc mask function.");
/*
* Coordinate Polling Enable.
*
* Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together
* for every poll.
*/
static int coord;
module_param(coord, int, 0);
MODULE_PARM_DESC(coord, "Polling coordinate mode");
/*
* ADC sample delay times in uS
*/
static const int delay_table[] = {
21, /* 1 AC97 Link frames */
42, /* 2 */
84, /* 4 */
167, /* 8 */
333, /* 16 */
667, /* 32 */
1000, /* 48 */
1333, /* 64 */
2000, /* 96 */
2667, /* 128 */
3333, /* 160 */
4000, /* 192 */
4667, /* 224 */
5333, /* 256 */
6000, /* 288 */
0 /* No delay, switch matrix always on */
};
/*
* Delay after issuing a POLL command.
*
* The delay is 3 AC97 link frames + the touchpanel settling delay
*/
static inline void poll_delay(int d)
{
udelay(3 * AC97_LINK_FRAME + delay_table[d]);
}
/*
* set up the physical settings of the WM9713
*/
static void wm9713_phy_init(struct wm97xx *wm)
{
u16 dig1 = 0, dig2, dig3;
/* default values */
dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5);
dig3 = WM9712_RPU(1);
/* rpu */
if (rpu) {
dig3 &= 0xffc0;
dig3 |= WM9712_RPU(rpu);
dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n",
64000 / rpu);
}
/* Five wire panel? */
if (five_wire) {
dig3 |= WM9713_45W;
dev_info(wm->dev, "setting 5-wire touchscreen mode.");
if (pil) {
dev_warn(wm->dev,
"Pressure measurement not supported in 5 "
"wire mode, disabling\n");
pil = 0;
}
}
/* touchpanel pressure */
if (pil == 2) {
dig3 |= WM9712_PIL;
dev_info(wm->dev,
"setting pressure measurement current to 400uA.");
} else if (pil)
dev_info(wm->dev,
"setting pressure measurement current to 200uA.");
if (!pil)
pressure = 0;
/* sample settling delay */
if (delay < 0 || delay > 15) {
dev_info(wm->dev, "supplied delay out of range.");
delay = 4;
dev_info(wm->dev, "setting adc sample delay to %d u Secs.",
delay_table[delay]);
}
dig2 &= 0xff0f;
dig2 |= WM97XX_DELAY(delay);
/* mask */
dig3 |= ((mask & 0x3) << 4);
if (coord)
dig3 |= WM9713_WAIT;
wm->misc = wm97xx_reg_read(wm, 0x5a);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0);
}
static void wm9713_dig_enable(struct wm97xx *wm, int enable)
{
u16 val;
if (enable) {
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] |
WM97XX_PRP_DET_DIG);
wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */
} else {
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] &
~WM97XX_PRP_DET_DIG);
val = wm97xx_reg_read(wm, AC97_EXTENDED_MID);
wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000);
}
}
static void wm9713_dig_restore(struct wm97xx *wm)
{
wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]);
}
static void wm9713_aux_prepare(struct wm97xx *wm)
{
memcpy(wm->dig_save, wm->dig, sizeof(wm->dig));
wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG);
}
static inline int is_pden(struct wm97xx *wm)
{
return wm->dig[2] & WM9713_PDEN;
}
/*
* Read a sample from the WM9713 adc in polling mode.
*/
static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
u16 dig1;
int timeout = 5 * delay;
bool wants_pen = adcsel & WM97XX_PEN_DOWN;
if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
/* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) &&
timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
*sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
adcsel & WM97XX_ADCSEL_MASK,
*sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
}
/*
* Read a coordinate from the WM9713 adc in polling mode.
*/
static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data)
{
u16 dig1;
int timeout = 5 * delay;
if (!wm->pen_probably_down) {
u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(val & WM97XX_PEN_DOWN))
return RC_PENUP;
wm->pen_probably_down = 1;
}
/* set up digitiser */
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
wm97xx_reg_write(wm, AC97_WM9713_DIG1,
dig1 | WM9713_POLL | WM9713_COO);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
/* wait for POLL to go low */
while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL)
&& timeout) {
udelay(AC97_LINK_FRAME);
timeout--;
}
if (timeout <= 0) {
/* If PDEN is set, we can get a timeout when pen goes up */
if (is_pden(wm))
wm->pen_probably_down = 0;
else
dev_dbg(wm->dev, "adc sample timeout");
return RC_PENUP;
}
/* read back data */
data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (pil)
data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
else
data->p = DEFAULT_PRESSURE;
if (wm->mach_ops && wm->mach_ops->post_sample)
wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y);
/* check we have correct sample */
if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y))
goto err;
if (pil && !(data->p & WM97XX_ADCSEL_PRES))
goto err;
if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
return RC_VALID;
err:
return 0;
}
/*
* Sample the WM9713 touchscreen in polling mode
*/
static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
{
int rc;
if (coord) {
rc = wm9713_poll_coord(wm, data);
if (rc != RC_VALID)
return rc;
} else {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
if (rc != RC_VALID)
return rc;
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
if (rc != RC_VALID)
return rc;
if (pil) {
rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
} else
data->p = DEFAULT_PRESSURE;
}
return RC_VALID;
}
/*
* Enable WM9713 continuous mode, i.e. touch data is streamed across
* an AC97 slot
*/
static int wm9713_acc_enable(struct wm97xx *wm, int enable)
{
u16 dig1, dig2, dig3;
int ret = 0;
dig1 = wm->dig[0];
dig2 = wm->dig[1];
dig3 = wm->dig[2];
if (enable) {
/* continuous mode */
if (wm->mach_ops->acc_startup &&
(ret = wm->mach_ops->acc_startup(wm)) < 0)
return ret;
dig1 &= ~WM9713_ADCSEL_MASK;
dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X |
WM9713_ADCSEL_Y;
if (pil)
dig1 |= WM9713_ADCSEL_PRES;
dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK |
WM97XX_CM_RATE_MASK);
dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) |
WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate);
dig3 |= WM9713_PDEN;
} else {
dig1 &= ~(WM9713_CTC | WM9713_COO);
dig2 &= ~WM97XX_SLEN;
dig3 &= ~WM9713_PDEN;
if (wm->mach_ops->acc_shutdown)
wm->mach_ops->acc_shutdown(wm);
}
wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1);
wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2);
wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3);
return ret;
}
struct wm97xx_codec_drv wm9713_codec = {
.id = WM9713_ID2,
.name = "wm9713",
.poll_sample = wm9713_poll_sample,
.poll_touch = wm9713_poll_touch,
.acc_enable = wm9713_acc_enable,
.phy_init = wm9713_phy_init,
.dig_enable = wm9713_dig_enable,
.dig_restore = wm9713_dig_restore,
.aux_prepare = wm9713_aux_prepare,
};
EXPORT_SYMBOL_GPL(wm9713_codec);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <[email protected]>");
MODULE_DESCRIPTION("WM9713 Touch Screen Driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
* Module Author: Heinz Mauelshagen
*
* This file is released under the GPL.
*
* Round-robin path selector.
*/
#include <linux/device-mapper.h>
#include "dm-path-selector.h"
#include <linux/slab.h>
#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
#define RR_MIN_IO 1
#define RR_VERSION "1.2.0"
/*
*---------------------------------------------------------------
* Path-handling code, paths are held in lists
*---------------------------------------------------------------
*/
struct path_info {
struct list_head list;
struct dm_path *path;
unsigned int repeat_count;
};
static void free_paths(struct list_head *paths)
{
struct path_info *pi, *next;
list_for_each_entry_safe(pi, next, paths, list) {
list_del(&pi->list);
kfree(pi);
}
}
/*
*---------------------------------------------------------------
* Round-robin selector
*---------------------------------------------------------------
*/
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
spinlock_t lock;
};
static struct selector *alloc_selector(void)
{
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->invalid_paths);
spin_lock_init(&s->lock);
}
return s;
}
static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
s = alloc_selector();
if (!s)
return -ENOMEM;
ps->context = s;
return 0;
}
static void rr_destroy(struct path_selector *ps)
{
struct selector *s = ps->context;
free_paths(&s->valid_paths);
free_paths(&s->invalid_paths);
kfree(s);
ps->context = NULL;
}
static int rr_status(struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned int maxlen)
{
struct path_info *pi;
int sz = 0;
if (!path)
DMEMIT("0 ");
else {
switch (type) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
pi = path->pscontext;
DMEMIT("%u ", pi->repeat_count);
break;
case STATUSTYPE_IMA:
*result = '\0';
break;
}
}
return sz;
}
/*
* Called during initialisation to register each path with an
* optional repeat_count.
*/
static int rr_add_path(struct path_selector *ps, struct dm_path *path,
int argc, char **argv, char **error)
{
struct selector *s = ps->context;
struct path_info *pi;
unsigned int repeat_count = RR_MIN_IO;
char dummy;
unsigned long flags;
if (argc > 1) {
*error = "round-robin ps: incorrect number of arguments";
return -EINVAL;
}
/* First path argument is number of I/Os before switching path */
if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
*error = "round-robin ps: invalid repeat count";
return -EINVAL;
}
if (repeat_count > 1) {
DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
repeat_count = 1;
}
/* allocate the path */
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
if (!pi) {
*error = "round-robin ps: Error allocating path context";
return -ENOMEM;
}
pi->path = path;
pi->repeat_count = repeat_count;
path->pscontext = pi;
spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
{
unsigned long flags;
struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->invalid_paths);
spin_unlock_irqrestore(&s->lock, flags);
}
static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
{
unsigned long flags;
struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->valid_paths);
spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
{
unsigned long flags;
struct selector *s = ps->context;
struct path_info *pi = NULL;
spin_lock_irqsave(&s->lock, flags);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
}
spin_unlock_irqrestore(&s->lock, flags);
return pi ? pi->path : NULL;
}
static struct path_selector_type rr_ps = {
.name = "round-robin",
.module = THIS_MODULE,
.table_args = 1,
.info_args = 0,
.create = rr_create,
.destroy = rr_destroy,
.status = rr_status,
.add_path = rr_add_path,
.fail_path = rr_fail_path,
.reinstate_path = rr_reinstate_path,
.select_path = rr_select_path,
};
static int __init dm_rr_init(void)
{
int r = dm_register_path_selector(&rr_ps);
if (r < 0)
DMERR("register failed %d", r);
DMINFO("version " RR_VERSION " loaded");
return r;
}
static void __exit dm_rr_exit(void)
{
int r = dm_unregister_path_selector(&rr_ps);
if (r < 0)
DMERR("unregister failed %d", r);
}
module_init(dm_rr_init);
module_exit(dm_rr_exit);
MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector");
MODULE_AUTHOR("Sistina Software <[email protected]>");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2021, AngeloGioacchino Del Regno
* <[email protected]>
* Copyright (c) 2021, Konrad Dybcio <[email protected]>
*/
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include "msm8998.dtsi"
#include "pm8005.dtsi"
#include "pm8998.dtsi"
#include "pmi8998.dtsi"
/ {
/* required for bootloader to select correct board */
qcom,msm-id = <0x124 0x20000>, <0x124 0x20001>; /* 8998v2, v2.1 */
qcom,board-id = <8 0>;
clocks {
div1_mclk: divclk1 {
compatible = "gpio-gate-clock";
pinctrl-0 = <&div_clk1>;
pinctrl-names = "default";
clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
#clock-cells = <0>;
enable-gpios = <&pm8998_gpios 13 GPIO_ACTIVE_HIGH>;
};
};
board_vbat: vbat-regulator {
compatible = "regulator-fixed";
regulator-name = "VBAT";
regulator-min-microvolt = <4000000>;
regulator-max-microvolt = <4000000>;
regulator-always-on;
regulator-boot-on;
};
cam0_vdig_vreg: cam0-vdig {
compatible = "regulator-fixed";
regulator-name = "cam0_vdig";
startup-delay-us = <0>;
enable-active-high;
gpio = <&tlmm 21 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&main_cam_pwr_en>;
};
cam1_vdig_vreg: cam1-vdig {
compatible = "regulator-fixed";
regulator-name = "cam1_vdig";
startup-delay-us = <0>;
enable-active-high;
gpio = <&tlmm 25 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&chat_cam_pwr_en>;
vin-supply = <&vreg_s3a_1p35>;
};
cam_vio_vreg: cam-vio-vreg {
compatible = "regulator-fixed";
regulator-name = "cam_vio_vreg";
startup-delay-us = <0>;
enable-active-high;
gpio = <&pmi8998_gpios 1 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&main_cam_pwr_io_en>;
vin-supply = <&vreg_lvs1a_1p8>;
};
touch_vddio_vreg: touch-vddio-vreg {
compatible = "regulator-fixed";
regulator-name = "touch_vddio_vreg";
startup-delay-us = <10000>;
gpio = <&tlmm 133 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&ts_vddio_en>;
};
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
regulator-always-on;
regulator-boot-on;
};
extcon_usb: extcon-usb {
compatible = "linux,extcon-usb-gpio";
id-gpios = <&tlmm 38 GPIO_ACTIVE_HIGH>;
vbus-gpios = <&tlmm 128 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&cc_dir_default &usb_detect_en>;
};
gpio-keys {
compatible = "gpio-keys";
label = "Side buttons";
pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &vol_up_n>;
pinctrl-names = "default";
button-camera-focus {
label = "Camera Focus";
gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
linux,code = <KEY_CAMERA_FOCUS>;
debounce-interval = <15>;
};
button-camera-snapshot {
label = "Camera Snapshot";
gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
linux,code = <KEY_CAMERA>;
debounce-interval = <15>;
};
button-vol-down {
label = "Volume Down";
gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEDOWN>;
wakeup-source;
debounce-interval = <15>;
};
button-vol-up {
label = "Volume Up";
gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
linux,code = <KEY_VOLUMEUP>;
wakeup-source;
debounce-interval = <15>;
};
};
gpio-hall-sensor {
compatible = "gpio-keys";
label = "Hall sensors";
pinctrl-names = "default";
pinctrl-0 = <&acc_cover_open>;
event-hall-sensor0 {
label = "Cover Hall Sensor";
gpios = <&tlmm 124 GPIO_ACTIVE_LOW>;
linux,input-type = <EV_SW>;
linux,code = <SW_LID>;
wakeup-source;
debounce-interval = <30>;
};
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
hyp_mem: memory@85800000 {
reg = <0x0 0x85800000 0x0 0x3700000>;
no-map;
};
cont_splash_mem: memory@9d400000 {
reg = <0x0 0x9d400000 0x0 0x2400000>;
no-map;
};
zap_shader_region: memory@f6400000 {
compatible = "shared-dma-pool";
reg = <0x0 0xf6400000 0x0 0x2000>;
no-map;
};
adsp_region: memory@fe000000 {
reg = <0x0 0xfe000000 0x0 0x800000>;
no-map;
};
qseecom_region: memory@fe800000 {
reg = <0x0 0xfe800000 0x0 0x1400000>;
no-map;
};
ramoops@ffc00000 {
compatible = "ramoops";
reg = <0x0 0xffc00000 0x0 0x100000>;
record-size = <0x10000>;
console-size = <0x60000>;
ftrace-size = <0x10000>;
pmsg-size = <0x20000>;
ecc-size = <16>;
};
};
vibrator {
compatible = "gpio-vibrator";
enable-gpios = <&pmi8998_gpios 5 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&vib_ldo_en>;
};
};
&blsp1_i2c5 {
status = "okay";
clock-frequency = <355000>;
touchscreen@2c {
compatible = "syna,rmi4-i2c";
reg = <0x2c>;
#address-cells = <1>;
#size-cells = <0>;
interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>;
pinctrl-names = "default";
pinctrl-0 = <&ts_int_n>;
vdd-supply = <&vreg_l28_3p0>;
vio-supply = <&touch_vddio_vreg>;
syna,reset-delay-ms = <220>;
syna,startup-delay-ms = <1000>;
rmi4-f01@1 {
reg = <0x01>;
syna,nosleep-mode = <1>;
};
rmi4-f11@11 {
reg = <0x11>;
syna,sensor-type = <1>;
};
};
};
&blsp1_i2c5_sleep {
bias-disable;
};
&blsp1_uart3 {
status = "okay";
bluetooth {
compatible = "qcom,wcn3990-bt";
vddio-supply = <&vreg_s4a_1p8>;
vddxo-supply = <&vreg_l7a_1p8>;
vddrf-supply = <&vreg_l17a_1p3>;
vddch0-supply = <&vreg_l25a_3p3>;
max-speed = <3200000>;
clocks = <&rpmcc RPM_SMD_RF_CLK2_PIN>;
};
};
&blsp2_uart1 {
status = "okay";
};
&blsp2_i2c2 {
status = "okay";
proximity@29 {
compatible = "st,vl53l0x";
reg = <0x29>;
interrupt-parent = <&tlmm>;
interrupts = <22 IRQ_TYPE_EDGE_FALLING>;
reset-gpios = <&tlmm 27 GPIO_ACTIVE_LOW>;
vdd-supply = <&cam_vio_vreg>;
pinctrl-names = "default";
pinctrl-0 = <&tof_int_n &tof_reset>;
};
};
&ibb {
regulator-min-microamp = <800000>;
regulator-max-microamp = <800000>;
regulator-enable-ramp-delay = <200>;
regulator-over-current-protection;
regulator-pull-down;
regulator-ramp-delay = <1>;
regulator-settling-time-up-us = <600>;
regulator-settling-time-down-us = <1000>;
regulator-soft-start;
qcom,discharge-resistor-kohms = <300>;
};
&lab {
regulator-min-microamp = <200000>;
regulator-max-microamp = <200000>;
regulator-enable-ramp-delay = <500>;
regulator-over-current-protection;
regulator-pull-down;
regulator-ramp-delay = <1>;
regulator-settling-time-up-us = <50000>;
regulator-settling-time-down-us = <3000>;
regulator-soft-start;
};
&pm8005_gpios {
gpio-line-names = "NC", /* GPIO_1 */
"NC",
"SLB",
"OPTION_1_PM8005";
};
&pm8005_regulators {
/* VDD_GFX supply */
pm8005_s1: s1 {
regulator-min-microvolt = <524000>;
regulator-max-microvolt = <1088000>;
regulator-enable-ramp-delay = <500>;
/* Hack until we rig up the gpu consumer */
regulator-always-on;
};
};
&pm8998_gpios {
gpio-line-names = "UIM_BATT_ALARM", /* GPIO_1 */
"NC",
"WLAN_SW_CTRL (DISALLOWED)",
"SSC_PWR_EN",
"VOL_DOWN_N",
"VOL_UP_N",
"SNAPSHOT_N",
"FOCUS_N",
"FLASH_THERM",
"", /* GPIO_10 */
"",
"",
"DIV_CLK1",
"NC",
"NC (DISALLOWED)",
"DIV_CLK3",
"NC",
"NC",
"NC",
"NC (DISALLOWED)", /* GPIO_20 */
"NFC_CLK_REQ",
"NC (DISALLOWED)",
"WCSS_PWR_REQ",
"OPTION_1 (DISALLOWED)",
"OPTION_2 (DISALLOWED)",
"PM_SLB (DISALLOWED)";
vol_down_n: vol-down-n-state {
pins = "gpio5";
function = PMIC_GPIO_FUNC_NORMAL;
bias-pull-up;
input-enable;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
};
vol_up_n: vol-up-n-state {
pins = "gpio6";
function = PMIC_GPIO_FUNC_NORMAL;
bias-pull-up;
input-enable;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
};
focus_n: focus-n-state {
pins = "gpio7";
function = PMIC_GPIO_FUNC_NORMAL;
bias-pull-up;
input-enable;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
};
snapshot_n: snapshot-n-state {
pins = "gpio8";
function = PMIC_GPIO_FUNC_NORMAL;
bias-pull-up;
input-enable;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
};
div_clk1: div-clk1-state {
pins = "gpio13";
function = "func2";
power-source = <0>;
};
};
&pmi8998_gpios {
gpio-line-names = "MAIN_CAM_PWR_IO_EN", /* GPIO_1 */
"NC",
"NC",
"TYPEC_UUSB_SEL",
"VIB_LDO_EN",
"NC",
"DISPLAY_TYPE_SEL",
"NC",
"NC",
"NC", /* GPIO_10 */
"NC",
"DIV_CLK3",
"SPMI_I2C_SEL",
"NC";
main_cam_pwr_io_en: main-cam-pwr-io-en-state {
pins = "gpio1";
function = PMIC_GPIO_FUNC_NORMAL;
bias-disable;
drive-push-pull;
output-low;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
power-source = <1>;
};
vib_ldo_en: vib-ldo-en-state {
pins = "gpio5";
function = PMIC_GPIO_FUNC_NORMAL;
bias-disable;
drive-push-pull;
output-low;
qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
power-source = <0>;
};
};
&pmi8998_lpg {
qcom,power-source = <1>;
status = "okay";
multi-led {
color = <LED_COLOR_ID_RGB>;
function = LED_FUNCTION_STATUS;
#address-cells = <1>;
#size-cells = <0>;
led@3 {
reg = <3>;
color = <LED_COLOR_ID_BLUE>;
};
led@4 {
reg = <4>;
color = <LED_COLOR_ID_GREEN>;
};
led@5 {
reg = <5>;
color = <LED_COLOR_ID_RED>;
};
};
};
&qusb2phy {
status = "okay";
vdda-pll-supply = <&vreg_l12a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
};
&rpm_requests {
regulators-0 {
compatible = "qcom,rpm-pm8998-regulators";
vdd_s1-supply = <&vph_pwr>;
vdd_s2-supply = <&vph_pwr>;
vdd_s3-supply = <&vph_pwr>;
vdd_s4-supply = <&vph_pwr>;
vdd_s5-supply = <&vph_pwr>;
vdd_s6-supply = <&vph_pwr>;
vdd_s7-supply = <&vph_pwr>;
vdd_s8-supply = <&vph_pwr>;
vdd_s9-supply = <&vph_pwr>;
vdd_s10-supply = <&vph_pwr>;
vdd_s11-supply = <&vph_pwr>;
vdd_s12-supply = <&vph_pwr>;
vdd_s13-supply = <&vph_pwr>;
vdd_l1_l27-supply = <&vreg_s7a_1p025>;
vdd_l2_l8_l17-supply = <&vreg_s3a_1p35>;
vdd_l3_l11-supply = <&vreg_s7a_1p025>;
vdd_l4_l5-supply = <&vreg_s7a_1p025>;
vdd_l6-supply = <&vreg_s5a_2p04>;
vdd_l7_l12_l14_l15-supply = <&vreg_s5a_2p04>;
vdd_l9-supply = <&vreg_bob>;
vdd_l10_l23_l25-supply = <&vreg_bob>;
vdd_l13_l19_l21-supply = <&vreg_bob>;
vdd_l16_l28-supply = <&vreg_bob>;
vdd_l18_l22-supply = <&vreg_bob>;
vdd_l20_l24-supply = <&vreg_bob>;
vdd_l26-supply = <&vreg_s3a_1p35>;
vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>;
vreg_s3a_1p35: s3 {
regulator-min-microvolt = <1352000>;
regulator-max-microvolt = <1352000>;
};
vreg_s4a_1p8: s4 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-system-load = <100000>;
regulator-allow-set-load;
};
vreg_s5a_2p04: s5 {
regulator-min-microvolt = <1904000>;
regulator-max-microvolt = <2032000>;
};
vreg_s7a_1p025: s7 {
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1028000>;
};
vreg_l1a_0p875: l1 {
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <880000>;
regulator-system-load = <73400>;
regulator-allow-set-load;
};
vreg_l2a_1p2: l2 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-system-load = <12560>;
regulator-allow-set-load;
};
vreg_l3a_1p0: l3 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
};
vreg_l5a_0p8: l5 {
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <800000>;
};
vreg_l6a_1p8: l6 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vreg_l7a_1p8: l7 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vreg_l8a_1p2: l8 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
};
vreg_l9a_1p8: l9 {
regulator-min-microvolt = <1808000>;
regulator-max-microvolt = <2960000>;
};
vreg_l10a_1p8: l10 {
regulator-min-microvolt = <1808000>;
regulator-max-microvolt = <2960000>;
};
vreg_l11a_1p0: l11 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
};
vreg_l12a_1p8: l12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vreg_l13a_2p95: l13 {
regulator-min-microvolt = <1808000>;
regulator-max-microvolt = <2960000>;
regulator-allow-set-load;
};
vreg_l14a_1p85: l14 {
regulator-min-microvolt = <1848000>;
regulator-max-microvolt = <1856000>;
regulator-system-load = <32000>;
regulator-allow-set-load;
};
vreg_l15a_1p8: l15 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vreg_l16a_2p7: l16 {
regulator-min-microvolt = <2704000>;
regulator-max-microvolt = <2704000>;
};
vreg_l17a_1p3: l17 {
regulator-min-microvolt = <1304000>;
regulator-max-microvolt = <1304000>;
};
vreg_l18a_2p85: l18 { };
vreg_l19a_2p7: l19 {
regulator-min-microvolt = <2696000>;
regulator-max-microvolt = <2704000>;
};
vreg_l20a_2p95: l20 {
regulator-min-microvolt = <2960000>;
regulator-max-microvolt = <2960000>;
regulator-system-load = <10000>;
regulator-allow-set-load;
};
vreg_l21a_2p95: l21 {
regulator-min-microvolt = <2960000>;
regulator-max-microvolt = <2960000>;
regulator-system-load = <800000>;
regulator-allow-set-load;
};
vreg_l22a_2p85: l22 { };
vreg_l23a_3p3: l23 {
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3312000>;
};
vreg_l24a_3p075: l24 {
regulator-min-microvolt = <3088000>;
regulator-max-microvolt = <3088000>;
};
vreg_l25a_3p3: l25 {
regulator-min-microvolt = <3104000>;
regulator-max-microvolt = <3312000>;
};
vreg_l26a_1p2: l26 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-allow-set-load;
};
vreg_l28_3p0: l28 {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
};
vreg_lvs1a_1p8: lvs1 { };
vreg_lvs2a_1p8: lvs2 { };
};
regulators-1 {
compatible = "qcom,rpm-pmi8998-regulators";
vdd_bob-supply = <&vph_pwr>;
vreg_bob: bob {
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3600000>;
};
};
};
&sdhc2 {
status = "okay";
cd-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&vreg_l21a_2p95>;
vqmmc-supply = <&vreg_l13a_2p95>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdc2_on &sdc2_cd>;
pinctrl-1 = <&sdc2_off &sdc2_cd>;
};
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
gpio-line-names = "", /* GPIO_0 */
"",
"",
"",
"DEBUG_UART_TX",
"DEBUG_UART_RX",
"CAMSENSOR_I2C_SDA",
"CAMSENSOR_I2C_SCL",
"NC",
"NC",
"MDP_VSYNC_P", /* GPIO_10 */
"RGBC_IR_INT",
"NFC_VEN",
"CAM_MCLK0",
"CAM_MCLK1",
"NC",
"NC",
"CCI_I2C_SDA0",
"CCI_I2C_SCL0",
"CCI_I2C_SDA1",
"CCI_I2C_SCL1", /* GPIO_20 */
"MAIN_CAM_PWR_EN",
"TOF_INT_N",
"NC",
"NC",
"CHAT_CAM_PWR_EN",
"NC",
"TOF_RESET_N",
"CAM2_RSTN",
"NC",
"CAM1_RSTN", /* GPIO_30 */
"NC",
"NC",
"NC",
"NC",
"NC",
"NC",
"NC",
"CC_DIR",
"UIM2_DETECT_EN",
"FP_RESET_N", /* GPIO_40 */
"NC",
"NC",
"NC",
"NC",
"BT_HCI_UART_TXD",
"BT_HCI_UART_RXD",
"BT_HCI_UART_CTS_N",
"BT_HCI_UART_RFR_N",
"NC",
"NC", /* GPIO_50 */
"NC",
"NC",
"CODEC_INT2_N",
"CODEC_INT1_N",
"APPS_I2C_SDA",
"APPS_I2C_SCL",
"FORCED_USB_BOOT",
"NC",
"NC",
"NC", /* GPIO_60 */
"NC",
"NC",
"TRAY2_DET_DS",
"CODEC_RST_N",
"WSA_L_EN",
"WSA_R_EN",
"NC",
"NC",
"NC",
"LPASS_SLIMBUS_CLK", /* GPIO_70 */
"LPASS_SLIMBUS_DATA0",
"LPASS_SLIMBUS_DATA1",
"BT_FM_SLIMBUS_DATA",
"BT_FM_SLIMBUS_CLK",
"NC",
"RF_LCD_ID_EN",
"NC",
"NC",
"NC",
"NC", /* GPIO_80 */
"SW_SERVICE",
"TX_GTR_THRES_IN",
"HW_ID0",
"HW_ID1",
"NC",
"NC",
"TS_I2C_SDA",
"TS_I2C_SCL",
"TS_RESET_N",
"NC", /* GPIO_90 */
"NC",
"NFC_IRQ",
"NFC_DWLD_EN",
"DISP_RESET_N",
"TRAY2_DET",
"CAM_SOF",
"RFFE6_CLK",
"RFFE6_DATA",
"DEBUG_GPIO0",
"DEBUG_GPIO1", /* GPIO_100 */
"GRFC4",
"NC",
"NC",
"RSVD",
"UIM2_DATA",
"UIM2_CLK",
"UIM2_RESET",
"UIM2_PRESENT",
"UIM1_DATA",
"UIM1_CLK", /* GPIO_110 */
"UIM1_RST",
"UIM1_PRESENT",
"UIM_BATT_ALARM",
"RSVD",
"NC",
"NC",
"ACCEL_INT",
"GYRO_INT",
"COMPASS_INT",
"ALS_PROX_INT_N", /* GPIO_120 */
"FP_INT_N",
"NC",
"BAROMETER_INT",
"ACC_COVER_OPEN",
"TS_INT_N",
"NC",
"NC",
"USB_DETECT_EN",
"NC",
"QLINK_REQUEST", /* GPIO_130 */
"QLINK_ENABLE",
"NC",
"NC",
"WMSS_RESET_N",
"PA_INDICATOR_OR",
"NC",
"RFFE3_DATA",
"RFFE3_CLK",
"RFFE4_DATA",
"RFFE4_CLK", /* GPIO_140 */
"RFFE5_DATA",
"RFFE5_CLK",
"GNSS_EN",
"MSS_LTE_COXM_TXD",
"MSS_LTE_COXM_RXD",
"RFFE2_DATA",
"RFFE2_CLK",
"RFFE1_DATA",
"RFFE1_CLK";
mdp_vsync_p: mdp-vsync-p-state {
pins = "gpio10";
function = "mdp_vsync_a";
drive-strength = <2>;
bias-pull-down;
};
nfc_ven: nfc-ven-state {
pins = "gpio12";
function = "gpio";
bias-disable;
drive-strength = <2>;
output-low;
};
cam_mclk0_active: cam-mclk0-active-state {
pins = "gpio13";
function = "cam_mclk";
drive-strength = <2>;
bias-disable;
};
cam_mclk1_active: cam-mclk1-active-state {
pins = "gpio14";
function = "cam_mclk";
drive-strength = <2>;
bias-disable;
};
cci0_default: cci0-default-state {
pins = "gpio18", "gpio19";
function = "cci_i2c";
bias-disable;
drive-strength = <2>;
};
cci1_default: cci1-default-state {
pins = "gpio19", "gpio20";
function = "cci_i2c";
bias-disable;
drive-strength = <2>;
};
main_cam_pwr_en: main-cam-pwr-en-default-state {
pins = "gpio21";
function = "gpio";
bias-disable;
drive-strength = <2>;
};
tof_int_n: tof-int-n-state {
pins = "gpio22";
function = "gpio";
bias-pull-up;
drive-strength = <2>;
};
chat_cam_pwr_en: chat-cam-pwr-en-default-state {
pins = "gpio25";
function = "gpio";
bias-disable;
drive-strength = <2>;
};
tof_reset: tof-reset-state {
pins = "gpio27";
function = "gpio";
bias-disable;
drive-strength = <2>;
};
cc_dir_default: cc-dir-active-state {
pins = "gpio38";
function = "gpio";
bias-disable;
drive-strength = <16>;
};
acc_cover_open: acc-cover-open-state {
pins = "gpio124";
function = "gpio";
bias-disable;
drive-strength = <2>;
};
ts_int_n: ts-int-n-state {
pins = "gpio125";
function = "gpio";
drive-strength = <8>;
bias-pull-up;
};
usb_detect_en: usb-detect-en-active-state {
pins = "gpio128";
function = "gpio";
bias-disable;
drive-strength = <2>;
output-low;
};
ts_vddio_en: ts-vddio-en-default-state {
pins = "gpio133";
function = "gpio";
bias-disable;
drive-strength = <2>;
output-low;
};
};
/*
* WARNING:
* Disable UFS until card quirks are in to avoid unrecoverable hard-brick
* that would happen as soon as the UFS card gets probed as, without the
* required quirks, the bootloader will be erased right after card probe.
*/
&ufshc {
status = "disabled";
};
&ufsphy {
status = "disabled";
};
&usb3 {
status = "okay";
};
&usb3_dwc3 {
/* Force to peripheral until we have Type-C hooked up */
dr_mode = "peripheral";
extcon = <&extcon_usb>;
};
&usb3phy {
status = "okay";
vdda-phy-supply = <&vreg_l1a_0p875>;
vdda-pll-supply = <&vreg_l2a_1p2>;
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hisilicon Hi6220 clock driver
*
* Copyright (c) 2015 Hisilicon Limited.
*
* Author: Bintian Wang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <dt-bindings/clock/hi6220-clock.h>
#include "clk.h"
/* clocks in AO (always on) controller */
static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
{ HI6220_REF32K, "ref32k", NULL, 0, 32764, },
{ HI6220_CLK_TCXO, "clk_tcxo", NULL, 0, 19200000, },
{ HI6220_MMC1_PAD, "mmc1_pad", NULL, 0, 100000000, },
{ HI6220_MMC2_PAD, "mmc2_pad", NULL, 0, 100000000, },
{ HI6220_MMC0_PAD, "mmc0_pad", NULL, 0, 200000000, },
{ HI6220_PLL_BBP, "bbppll0", NULL, 0, 245760000, },
{ HI6220_PLL_GPU, "gpupll", NULL, 0, 1000000000,},
{ HI6220_PLL1_DDR, "ddrpll1", NULL, 0, 1066000000,},
{ HI6220_PLL_SYS, "syspll", NULL, 0, 1190400000,},
{ HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1190400000,},
{ HI6220_DDR_SRC, "ddr_sel_src", NULL, 0, 1200000000,},
{ HI6220_PLL_MEDIA, "media_pll", NULL, 0, 1440000000,},
{ HI6220_PLL_DDR, "ddrpll0", NULL, 0, 1600000000,},
};
static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
{ HI6220_300M, "clk_300m", "syspll", 1, 4, 0, },
{ HI6220_150M, "clk_150m", "clk_300m", 1, 2, 0, },
{ HI6220_PICOPHY_SRC, "picophy_src", "clk_150m", 1, 4, 0, },
{ HI6220_MMC0_SRC_SEL, "mmc0srcsel", "mmc0_sel", 1, 8, 0, },
{ HI6220_MMC1_SRC_SEL, "mmc1srcsel", "mmc1_sel", 1, 8, 0, },
{ HI6220_MMC2_SRC_SEL, "mmc2srcsel", "mmc2_sel", 1, 8, 0, },
{ HI6220_VPU_CODEC, "vpucodec", "codec_jpeg_aclk", 1, 2, 0, },
{ HI6220_MMC0_SMP, "mmc0_sample", "mmc0_sel", 1, 8, 0, },
{ HI6220_MMC1_SMP, "mmc1_sample", "mmc1_sel", 1, 8, 0, },
{ HI6220_MMC2_SMP, "mmc2_sample", "mmc2_sel", 1, 8, 0, },
};
static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
{ HI6220_WDT0_PCLK, "wdt0_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
{ HI6220_WDT1_PCLK, "wdt1_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
{ HI6220_WDT2_PCLK, "wdt2_pclk", "ref32k", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
{ HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, },
{ HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, },
{ HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, },
{ HI6220_TIMER3_PCLK, "timer3_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 18, 0, },
{ HI6220_TIMER4_PCLK, "timer4_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 19, 0, },
{ HI6220_TIMER5_PCLK, "timer5_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 20, 0, },
{ HI6220_TIMER6_PCLK, "timer6_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 21, 0, },
{ HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, },
{ HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, },
{ HI6220_UART0_PCLK, "uart0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, },
{ HI6220_RTC0_PCLK, "rtc0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 25, 0, },
{ HI6220_RTC1_PCLK, "rtc1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 26, 0, },
};
static void __init hi6220_clk_ao_init(struct device_node *np)
{
struct hisi_clock_data *clk_data_ao;
clk_data_ao = hisi_clk_init(np, HI6220_AO_NR_CLKS);
if (!clk_data_ao)
return;
hisi_clk_register_fixed_rate(hi6220_fixed_rate_clks,
ARRAY_SIZE(hi6220_fixed_rate_clks), clk_data_ao);
hisi_clk_register_fixed_factor(hi6220_fixed_factor_clks,
ARRAY_SIZE(hi6220_fixed_factor_clks), clk_data_ao);
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao,
ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao);
}
/* Allow reset driver to probe as well */
CLK_OF_DECLARE_DRIVER(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
/* clocks in sysctrl */
static const char *mmc0_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
static const char *mmc0_mux1_p[] __initdata = { "mmc0_mux0", "pll_media_gate", };
static const char *mmc0_src_p[] __initdata = { "mmc0srcsel", "mmc0_div", };
static const char *mmc1_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
static const char *mmc1_mux1_p[] __initdata = { "mmc1_mux0", "pll_media_gate", };
static const char *mmc1_src_p[] __initdata = { "mmc1srcsel", "mmc1_div", };
static const char *mmc2_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
static const char *mmc2_mux1_p[] __initdata = { "mmc2_mux0", "pll_media_gate", };
static const char *mmc2_src_p[] __initdata = { "mmc2srcsel", "mmc2_div", };
static const char *mmc0_sample_in[] __initdata = { "mmc0_sample", "mmc0_pad", };
static const char *mmc1_sample_in[] __initdata = { "mmc1_sample", "mmc1_pad", };
static const char *mmc2_sample_in[] __initdata = { "mmc2_sample", "mmc2_pad", };
static const char *uart1_src[] __initdata = { "clk_tcxo", "clk_150m", };
static const char *uart2_src[] __initdata = { "clk_tcxo", "clk_150m", };
static const char *uart3_src[] __initdata = { "clk_tcxo", "clk_150m", };
static const char *uart4_src[] __initdata = { "clk_tcxo", "clk_150m", };
static const char *hifi_src[] __initdata = { "syspll", "pll_media_gate", };
static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
{ HI6220_MMC0_CLK, "mmc0_clk", "mmc0_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, },
{ HI6220_MMC0_CIUCLK, "mmc0_ciuclk", "mmc0_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, },
{ HI6220_MMC1_CLK, "mmc1_clk", "mmc1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, },
{ HI6220_MMC1_CIUCLK, "mmc1_ciuclk", "mmc1_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, },
{ HI6220_MMC2_CLK, "mmc2_clk", "mmc2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, },
{ HI6220_MMC2_CIUCLK, "mmc2_ciuclk", "mmc2_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, },
{ HI6220_USBOTG_HCLK, "usbotg_hclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 4, 0, },
{ HI6220_CLK_PICOPHY, "clk_picophy", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 5, 0, },
{ HI6220_HIFI, "hifi_clk", "hifi_div", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 0, 0, },
{ HI6220_DACODEC_PCLK, "dacodec_pclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 5, 0, },
{ HI6220_EDMAC_ACLK, "edmac_aclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x220, 2, 0, },
{ HI6220_CS_ATB, "cs_atb", "cs_atb_div", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 0, 0, },
{ HI6220_I2C0_CLK, "i2c0_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 1, 0, },
{ HI6220_I2C1_CLK, "i2c1_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 2, 0, },
{ HI6220_I2C2_CLK, "i2c2_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 3, 0, },
{ HI6220_I2C3_CLK, "i2c3_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 4, 0, },
{ HI6220_UART1_PCLK, "uart1_pclk", "uart1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 5, 0, },
{ HI6220_UART2_PCLK, "uart2_pclk", "uart2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 6, 0, },
{ HI6220_UART3_PCLK, "uart3_pclk", "uart3_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 7, 0, },
{ HI6220_UART4_PCLK, "uart4_pclk", "uart4_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 8, 0, },
{ HI6220_SPI_CLK, "spi_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 9, 0, },
{ HI6220_TSENSOR_CLK, "tsensor_clk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 12, 0, },
{ HI6220_DAPB_CLK, "dapb_clk", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x230, 18, 0, },
{ HI6220_MMU_CLK, "mmu_clk", "ddrc_axi1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x240, 11, 0, },
{ HI6220_HIFI_SEL, "hifi_sel", "hifi_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 0, 0, },
{ HI6220_MMC0_SYSPLL, "mmc0_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 1, 0, },
{ HI6220_MMC1_SYSPLL, "mmc1_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 2, 0, },
{ HI6220_MMC2_SYSPLL, "mmc2_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 3, 0, },
{ HI6220_MMC0_SEL, "mmc0_sel", "mmc0_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 6, 0, },
{ HI6220_MMC1_SEL, "mmc1_sel", "mmc1_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 7, 0, },
{ HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, },
{ HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
{ HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, },
};
static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
{ HI6220_MMC0_SRC, "mmc0_src", mmc0_src_p, ARRAY_SIZE(mmc0_src_p), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, },
{ HI6220_MMC0_SMP_IN, "mmc0_smp_in", mmc0_sample_in, ARRAY_SIZE(mmc0_sample_in), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, },
{ HI6220_MMC1_SRC, "mmc1_src", mmc1_src_p, ARRAY_SIZE(mmc1_src_p), CLK_SET_RATE_PARENT, 0x4, 2, 1, 0, },
{ HI6220_MMC1_SMP_IN, "mmc1_smp_in", mmc1_sample_in, ARRAY_SIZE(mmc1_sample_in), CLK_SET_RATE_PARENT, 0x4, 2, 1, 0, },
{ HI6220_MMC2_SRC, "mmc2_src", mmc2_src_p, ARRAY_SIZE(mmc2_src_p), CLK_SET_RATE_PARENT, 0x4, 4, 1, 0, },
{ HI6220_MMC2_SMP_IN, "mmc2_smp_in", mmc2_sample_in, ARRAY_SIZE(mmc2_sample_in), CLK_SET_RATE_PARENT, 0x4, 4, 1, 0, },
{ HI6220_HIFI_SRC, "hifi_src", hifi_src, ARRAY_SIZE(hifi_src), CLK_SET_RATE_PARENT, 0x400, 0, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_UART1_SRC, "uart1_src", uart1_src, ARRAY_SIZE(uart1_src), CLK_SET_RATE_PARENT, 0x400, 1, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_UART2_SRC, "uart2_src", uart2_src, ARRAY_SIZE(uart2_src), CLK_SET_RATE_PARENT, 0x400, 2, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_UART3_SRC, "uart3_src", uart3_src, ARRAY_SIZE(uart3_src), CLK_SET_RATE_PARENT, 0x400, 3, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_UART4_SRC, "uart4_src", uart4_src, ARRAY_SIZE(uart4_src), CLK_SET_RATE_PARENT, 0x400, 4, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC0_MUX0, "mmc0_mux0", mmc0_mux0_p, ARRAY_SIZE(mmc0_mux0_p), CLK_SET_RATE_PARENT, 0x400, 5, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC1_MUX0, "mmc1_mux0", mmc1_mux0_p, ARRAY_SIZE(mmc1_mux0_p), CLK_SET_RATE_PARENT, 0x400, 11, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC2_MUX0, "mmc2_mux0", mmc2_mux0_p, ARRAY_SIZE(mmc2_mux0_p), CLK_SET_RATE_PARENT, 0x400, 12, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC0_MUX1, "mmc0_mux1", mmc0_mux1_p, ARRAY_SIZE(mmc0_mux1_p), CLK_SET_RATE_PARENT, 0x400, 13, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC1_MUX1, "mmc1_mux1", mmc1_mux1_p, ARRAY_SIZE(mmc1_mux1_p), CLK_SET_RATE_PARENT, 0x400, 14, 1, CLK_MUX_HIWORD_MASK,},
{ HI6220_MMC2_MUX1, "mmc2_mux1", mmc2_mux1_p, ARRAY_SIZE(mmc2_mux1_p), CLK_SET_RATE_PARENT, 0x400, 15, 1, CLK_MUX_HIWORD_MASK,},
};
static struct hi6220_divider_clock hi6220_div_clks_sys[] __initdata = {
{ HI6220_CLK_BUS, "clk_bus", "clk_300m", CLK_SET_RATE_PARENT, 0x490, 0, 4, 7, },
{ HI6220_MMC0_DIV, "mmc0_div", "mmc0_syspll", CLK_SET_RATE_PARENT, 0x494, 0, 6, 7, },
{ HI6220_MMC1_DIV, "mmc1_div", "mmc1_syspll", CLK_SET_RATE_PARENT, 0x498, 0, 6, 7, },
{ HI6220_MMC2_DIV, "mmc2_div", "mmc2_syspll", CLK_SET_RATE_PARENT, 0x49c, 0, 6, 7, },
{ HI6220_HIFI_DIV, "hifi_div", "hifi_sel", CLK_SET_RATE_PARENT, 0x4a0, 0, 4, 7, },
{ HI6220_BBPPLL0_DIV, "bbppll0_div", "bbppll_sel", CLK_SET_RATE_PARENT, 0x4a0, 8, 6, 15,},
{ HI6220_CS_DAPB, "cs_dapb", "picophy_src", CLK_SET_RATE_PARENT, 0x4a0, 24, 2, 31,},
{ HI6220_CS_ATB_DIV, "cs_atb_div", "cs_atb_syspll", CLK_SET_RATE_PARENT, 0x4a4, 0, 4, 7, },
};
static void __init hi6220_clk_sys_init(struct device_node *np)
{
struct hisi_clock_data *clk_data;
clk_data = hisi_clk_init(np, HI6220_SYS_NR_CLKS);
if (!clk_data)
return;
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_sys,
ARRAY_SIZE(hi6220_separated_gate_clks_sys), clk_data);
hisi_clk_register_mux(hi6220_mux_clks_sys,
ARRAY_SIZE(hi6220_mux_clks_sys), clk_data);
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
static const char *clk_1000_1200_src[] __initdata = { "pll_gpu_gate", "media_syspll_src", };
static const char *clk_1440_1200_src[] __initdata = { "media_syspll_src", "media_pll_src", };
static const char *clk_1000_1440_src[] __initdata = { "pll_gpu_gate", "media_pll_src", };
static struct hisi_gate_clock hi6220_separated_gate_clks_media[] __initdata = {
{ HI6220_DSI_PCLK, "dsi_pclk", "vpucodec", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 0, 0, },
{ HI6220_G3D_PCLK, "g3d_pclk", "vpucodec", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 1, 0, },
{ HI6220_ACLK_CODEC_VPU, "aclk_codec_vpu", "ade_core_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 3, 0, },
{ HI6220_ISP_SCLK, "isp_sclk", "isp_sclk_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 5, 0, },
{ HI6220_ADE_CORE, "ade_core", "ade_core_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 6, 0, },
{ HI6220_MED_MMU, "media_mmu", "mmu_clk", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 8, 0, },
{ HI6220_CFG_CSI4PHY, "cfg_csi4phy", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 9, 0, },
{ HI6220_CFG_CSI2PHY, "cfg_csi2phy", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 10, 0, },
{ HI6220_ISP_SCLK_GATE, "isp_sclk_gate", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 11, 0, },
{ HI6220_ISP_SCLK_GATE1, "isp_sclk_gate1", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 12, 0, },
{ HI6220_ADE_CORE_GATE, "ade_core_gate", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 14, 0, },
{ HI6220_CODEC_VPU_GATE, "codec_vpu_gate", "clk_1000_1440", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 15, 0, },
{ HI6220_MED_SYSPLL, "media_syspll_src", "media_syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 17, 0, },
};
static struct hisi_mux_clock hi6220_mux_clks_media[] __initdata = {
{ HI6220_1440_1200, "clk_1440_1200", clk_1440_1200_src, ARRAY_SIZE(clk_1440_1200_src), CLK_SET_RATE_PARENT, 0x51c, 0, 1, 0, },
{ HI6220_1000_1200, "clk_1000_1200", clk_1000_1200_src, ARRAY_SIZE(clk_1000_1200_src), CLK_SET_RATE_PARENT, 0x51c, 1, 1, 0, },
{ HI6220_1000_1440, "clk_1000_1440", clk_1000_1440_src, ARRAY_SIZE(clk_1000_1440_src), CLK_SET_RATE_PARENT, 0x51c, 6, 1, 0, },
};
static struct hi6220_divider_clock hi6220_div_clks_media[] __initdata = {
{ HI6220_CODEC_JPEG, "codec_jpeg_aclk", "media_pll_src", CLK_SET_RATE_PARENT, 0xcbc, 0, 4, 23, },
{ HI6220_ISP_SCLK_SRC, "isp_sclk_src", "isp_sclk_gate", CLK_SET_RATE_PARENT, 0xcbc, 8, 4, 15, },
{ HI6220_ISP_SCLK1, "isp_sclk1", "isp_sclk_gate1", CLK_SET_RATE_PARENT, 0xcbc, 24, 4, 31, },
{ HI6220_ADE_CORE_SRC, "ade_core_src", "ade_core_gate", CLK_SET_RATE_PARENT, 0xcc0, 16, 3, 23, },
{ HI6220_ADE_PIX_SRC, "ade_pix_src", "clk_1440_1200", CLK_SET_RATE_PARENT, 0xcc0, 24, 6, 31, },
{ HI6220_G3D_CLK, "g3d_clk", "clk_1000_1200", CLK_SET_RATE_PARENT, 0xcc4, 8, 4, 15, },
{ HI6220_CODEC_VPU_SRC, "codec_vpu_src", "codec_vpu_gate", CLK_SET_RATE_PARENT, 0xcc4, 24, 6, 31, },
};
static void __init hi6220_clk_media_init(struct device_node *np)
{
struct hisi_clock_data *clk_data;
clk_data = hisi_clk_init(np, HI6220_MEDIA_NR_CLKS);
if (!clk_data)
return;
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_media,
ARRAY_SIZE(hi6220_separated_gate_clks_media), clk_data);
hisi_clk_register_mux(hi6220_mux_clks_media,
ARRAY_SIZE(hi6220_mux_clks_media), clk_data);
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
static struct hisi_gate_clock hi6220_gate_clks_power[] __initdata = {
{ HI6220_PLL_GPU_GATE, "pll_gpu_gate", "gpupll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x8, 0, 0, },
{ HI6220_PLL1_DDR_GATE, "pll1_ddr_gate", "ddrpll1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x10, 0, 0, },
{ HI6220_PLL_DDR_GATE, "pll_ddr_gate", "ddrpll0", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x18, 0, 0, },
{ HI6220_PLL_MEDIA_GATE, "pll_media_gate", "media_pll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x38, 0, 0, },
{ HI6220_PLL0_BBP_GATE, "pll0_bbp_gate", "bbppll0", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x48, 0, 0, },
};
static struct hi6220_divider_clock hi6220_div_clks_power[] __initdata = {
{ HI6220_DDRC_SRC, "ddrc_src", "ddr_sel_src", CLK_SET_RATE_PARENT, 0x5a8, 0, 4, 0, },
{ HI6220_DDRC_AXI1, "ddrc_axi1", "ddrc_src", CLK_SET_RATE_PARENT, 0x5a8, 8, 2, 0, },
};
static void __init hi6220_clk_power_init(struct device_node *np)
{
struct hisi_clock_data *clk_data;
clk_data = hisi_clk_init(np, HI6220_POWER_NR_CLKS);
if (!clk_data)
return;
hisi_clk_register_gate(hi6220_gate_clks_power,
ARRAY_SIZE(hi6220_gate_clks_power), clk_data);
hi6220_clk_register_divider(hi6220_div_clks_power,
ARRAY_SIZE(hi6220_div_clks_power), clk_data);
}
CLK_OF_DECLARE(hi6220_clk_power, "hisilicon,hi6220-pmctrl", hi6220_clk_power_init);
/* clocks in acpu */
static const struct hisi_gate_clock hi6220_acpu_sc_gate_sep_clks[] = {
{ HI6220_ACPU_SFT_AT_S, "sft_at_s", "cs_atb",
CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0xc, 11, 0, },
};
static void __init hi6220_clk_acpu_init(struct device_node *np)
{
struct hisi_clock_data *clk_data;
int nr = ARRAY_SIZE(hi6220_acpu_sc_gate_sep_clks);
clk_data = hisi_clk_init(np, nr);
if (!clk_data)
return;
hisi_clk_register_gate_sep(hi6220_acpu_sc_gate_sep_clks,
ARRAY_SIZE(hi6220_acpu_sc_gate_sep_clks),
clk_data);
}
CLK_OF_DECLARE(hi6220_clk_acpu, "hisilicon,hi6220-acpu-sctrl", hi6220_clk_acpu_init);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hardware monitoring driver for Maxim MAX8688
*
* Copyright (c) 2011 Ericsson AB.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include "pmbus.h"
#define MAX8688_MFR_VOUT_PEAK 0xd4
#define MAX8688_MFR_IOUT_PEAK 0xd5
#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
#define MAX8688_MFG_STATUS 0xd8
#define MAX8688_STATUS_OC_FAULT BIT(4)
#define MAX8688_STATUS_OV_FAULT BIT(5)
#define MAX8688_STATUS_OV_WARNING BIT(8)
#define MAX8688_STATUS_UV_FAULT BIT(9)
#define MAX8688_STATUS_UV_WARNING BIT(10)
#define MAX8688_STATUS_UC_FAULT BIT(11)
#define MAX8688_STATUS_OC_WARNING BIT(12)
#define MAX8688_STATUS_OT_FAULT BIT(13)
#define MAX8688_STATUS_OT_WARNING BIT(14)
static int max8688_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
int ret;
if (page > 0)
return -ENXIO;
switch (reg) {
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFR_IOUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFR_TEMPERATURE_PEAK);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_TEMP_HISTORY:
ret = 0;
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
int ret;
switch (reg) {
case PMBUS_VIRT_RESET_VOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK,
0);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK,
0);
break;
case PMBUS_VIRT_RESET_TEMP_HISTORY:
ret = pmbus_write_word_data(client, 0,
MAX8688_MFR_TEMPERATURE_PEAK,
0xffff);
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret = 0;
int mfg_status;
if (page > 0)
return -ENXIO;
switch (reg) {
case PMBUS_STATUS_VOUT:
mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
if (mfg_status & MAX8688_STATUS_UV_WARNING)
ret |= PB_VOLTAGE_UV_WARNING;
if (mfg_status & MAX8688_STATUS_UV_FAULT)
ret |= PB_VOLTAGE_UV_FAULT;
if (mfg_status & MAX8688_STATUS_OV_WARNING)
ret |= PB_VOLTAGE_OV_WARNING;
if (mfg_status & MAX8688_STATUS_OV_FAULT)
ret |= PB_VOLTAGE_OV_FAULT;
break;
case PMBUS_STATUS_IOUT:
mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
if (mfg_status & MAX8688_STATUS_UC_FAULT)
ret |= PB_IOUT_UC_FAULT;
if (mfg_status & MAX8688_STATUS_OC_WARNING)
ret |= PB_IOUT_OC_WARNING;
if (mfg_status & MAX8688_STATUS_OC_FAULT)
ret |= PB_IOUT_OC_FAULT;
break;
case PMBUS_STATUS_TEMPERATURE:
mfg_status = pmbus_read_word_data(client, 0, 0xff,
MAX8688_MFG_STATUS);
if (mfg_status < 0)
return mfg_status;
if (mfg_status & MAX8688_STATUS_OT_WARNING)
ret |= PB_TEMP_OT_WARNING;
if (mfg_status & MAX8688_STATUS_OT_FAULT)
ret |= PB_TEMP_OT_FAULT;
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static struct pmbus_driver_info max8688_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = direct,
.format[PSC_VOLTAGE_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
.format[PSC_CURRENT_OUT] = direct,
.m[PSC_VOLTAGE_IN] = 19995,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = -1,
.m[PSC_VOLTAGE_OUT] = 19995,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = -1,
.m[PSC_CURRENT_OUT] = 23109,
.b[PSC_CURRENT_OUT] = 0,
.R[PSC_CURRENT_OUT] = -2,
.m[PSC_TEMPERATURE] = -7612,
.b[PSC_TEMPERATURE] = 335,
.R[PSC_TEMPERATURE] = -3,
.func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP
| PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max8688_read_byte_data,
.read_word_data = max8688_read_word_data,
.write_word_data = max8688_write_word_data,
};
static int max8688_probe(struct i2c_client *client)
{
return pmbus_do_probe(client, &max8688_info);
}
static const struct i2c_device_id max8688_id[] = {
{"max8688"},
{ }
};
MODULE_DEVICE_TABLE(i2c, max8688_id);
/* This is the driver that will be inserted */
static struct i2c_driver max8688_driver = {
.driver = {
.name = "max8688",
},
.probe = max8688_probe,
.id_table = max8688_id,
};
module_i2c_driver(max8688_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("PMBUS");
|
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
* Copyright (C) 2014 Seong-Woo Kim
*
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_bd663474"
#define WIDTH 240
#define HEIGHT 320
#define BPP 16
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* Initialization sequence from Lib_UTFT */
/* oscillator start */
write_reg(par, 0x000, 0x0001); /*oscillator 0: stop, 1: operation */
mdelay(10);
/* Power settings */
write_reg(par, 0x100, 0x0000); /* power supply setup */
write_reg(par, 0x101, 0x0000);
write_reg(par, 0x102, 0x3110);
write_reg(par, 0x103, 0xe200);
write_reg(par, 0x110, 0x009d);
write_reg(par, 0x111, 0x0022);
write_reg(par, 0x100, 0x0120);
mdelay(20);
write_reg(par, 0x100, 0x3120);
mdelay(80);
/* Display control */
write_reg(par, 0x001, 0x0100);
write_reg(par, 0x002, 0x0000);
write_reg(par, 0x003, 0x1230);
write_reg(par, 0x006, 0x0000);
write_reg(par, 0x007, 0x0101);
write_reg(par, 0x008, 0x0808);
write_reg(par, 0x009, 0x0000);
write_reg(par, 0x00b, 0x0000);
write_reg(par, 0x00c, 0x0000);
write_reg(par, 0x00d, 0x0018);
/* LTPS control settings */
write_reg(par, 0x012, 0x0000);
write_reg(par, 0x013, 0x0000);
write_reg(par, 0x018, 0x0000);
write_reg(par, 0x019, 0x0000);
write_reg(par, 0x203, 0x0000);
write_reg(par, 0x204, 0x0000);
write_reg(par, 0x210, 0x0000);
write_reg(par, 0x211, 0x00ef);
write_reg(par, 0x212, 0x0000);
write_reg(par, 0x213, 0x013f);
write_reg(par, 0x214, 0x0000);
write_reg(par, 0x215, 0x0000);
write_reg(par, 0x216, 0x0000);
write_reg(par, 0x217, 0x0000);
/* Gray scale settings */
write_reg(par, 0x300, 0x5343);
write_reg(par, 0x301, 0x1021);
write_reg(par, 0x302, 0x0003);
write_reg(par, 0x303, 0x0011);
write_reg(par, 0x304, 0x050a);
write_reg(par, 0x305, 0x4342);
write_reg(par, 0x306, 0x1100);
write_reg(par, 0x307, 0x0003);
write_reg(par, 0x308, 0x1201);
write_reg(par, 0x309, 0x050a);
/* RAM access settings */
write_reg(par, 0x400, 0x4027);
write_reg(par, 0x401, 0x0000);
write_reg(par, 0x402, 0x0000); /* First screen drive position (1) */
write_reg(par, 0x403, 0x013f); /* First screen drive position (2) */
write_reg(par, 0x404, 0x0000);
write_reg(par, 0x200, 0x0000);
write_reg(par, 0x201, 0x0000);
write_reg(par, 0x100, 0x7120);
write_reg(par, 0x007, 0x0103);
mdelay(10);
write_reg(par, 0x007, 0x0113);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
switch (par->info->var.rotate) {
/* R200h = Horizontal GRAM Start Address */
/* R201h = Vertical GRAM Start Address */
case 0:
write_reg(par, 0x0200, xs);
write_reg(par, 0x0201, ys);
break;
case 180:
write_reg(par, 0x0200, WIDTH - 1 - xs);
write_reg(par, 0x0201, HEIGHT - 1 - ys);
break;
case 270:
write_reg(par, 0x0200, WIDTH - 1 - ys);
write_reg(par, 0x0201, xs);
break;
case 90:
write_reg(par, 0x0200, ys);
write_reg(par, 0x0201, HEIGHT - 1 - xs);
break;
}
write_reg(par, 0x202); /* Write Data to GRAM */
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
/* AM: GRAM update direction */
case 0:
write_reg(par, 0x003, 0x1230);
break;
case 180:
write_reg(par, 0x003, 0x1200);
break;
case 270:
write_reg(par, 0x003, 0x1228);
break;
case 90:
write_reg(par, 0x003, 0x1218);
break;
}
return 0;
}
static struct fbtft_display display = {
.regwidth = 16,
.width = WIDTH,
.height = HEIGHT,
.bpp = BPP,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "hitachi,bd663474", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:bd663474");
MODULE_ALIAS("platform:bd663474");
MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller");
MODULE_AUTHOR("Seong-Woo Kim");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/misc/xillybus_of.c
*
* Copyright 2011 Xillybus Ltd, http://xillybus.com
*
* Driver for the Xillybus FPGA/host framework using Open Firmware.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/err.h>
#include "xillybus.h"
MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
MODULE_ALIAS("xillybus_of");
MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillybus_of";
/* Match table for of_platform binding */
static const struct of_device_id xillybus_of_match[] = {
{ .compatible = "xillybus,xillybus-1.00.a", },
{ .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
{}
};
MODULE_DEVICE_TABLE(of, xillybus_of_match);
static int xilly_drv_probe(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint;
int rc;
int irq;
endpoint = xillybus_init_endpoint(dev);
if (!endpoint)
return -ENOMEM;
dev_set_drvdata(dev, endpoint);
endpoint->owner = THIS_MODULE;
endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
irq = platform_get_irq(op, 0);
rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint);
if (rc) {
dev_err(endpoint->dev,
"Failed to register IRQ handler. Aborting.\n");
return -ENODEV;
}
return xillybus_endpoint_discovery(endpoint);
}
static void xilly_drv_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
xillybus_endpoint_remove(endpoint);
}
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
.remove = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
},
};
module_platform_driver(xillybus_platform_driver);
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*******************************************************************************
*
* Module Name: rsxface - Public interfaces to the resource manager
*
******************************************************************************/
#define EXPORT_ACPI_INTERFACES
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsxface")
/* Local macros for 16,32-bit to 64-bit conversion */
#define ACPI_COPY_FIELD(out, in, field) ((out)->field = (in)->field)
#define ACPI_COPY_ADDRESS(out, in) \
ACPI_COPY_FIELD(out, in, resource_type); \
ACPI_COPY_FIELD(out, in, producer_consumer); \
ACPI_COPY_FIELD(out, in, decode); \
ACPI_COPY_FIELD(out, in, min_address_fixed); \
ACPI_COPY_FIELD(out, in, max_address_fixed); \
ACPI_COPY_FIELD(out, in, info); \
ACPI_COPY_FIELD(out, in, address.granularity); \
ACPI_COPY_FIELD(out, in, address.minimum); \
ACPI_COPY_FIELD(out, in, address.maximum); \
ACPI_COPY_FIELD(out, in, address.translation_offset); \
ACPI_COPY_FIELD(out, in, address.address_length); \
ACPI_COPY_FIELD(out, in, resource_source);
/* Local prototypes */
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
static acpi_status
acpi_rs_validate_parameters(acpi_handle device_handle,
struct acpi_buffer *buffer,
struct acpi_namespace_node **return_node);
/*******************************************************************************
*
* FUNCTION: acpi_rs_validate_parameters
*
* PARAMETERS: device_handle - Handle to a device
* buffer - Pointer to a data buffer
* return_node - Pointer to where the device node is returned
*
* RETURN: Status
*
* DESCRIPTION: Common parameter validation for resource interfaces
*
******************************************************************************/
static acpi_status
acpi_rs_validate_parameters(acpi_handle device_handle,
struct acpi_buffer *buffer,
struct acpi_namespace_node **return_node)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(rs_validate_parameters);
/*
* Must have a valid handle to an ACPI device
*/
if (!device_handle) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
node = acpi_ns_validate_handle(device_handle);
if (!node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (node->type != ACPI_TYPE_DEVICE) {
return_ACPI_STATUS(AE_TYPE);
}
/*
* Validate the user buffer object
*
* if there is a non-zero buffer length we also need a valid pointer in
* the buffer. If it's a zero buffer length, we'll be returning the
* needed buffer size (later), so keep going.
*/
status = acpi_ut_validate_buffer(buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
*return_node = node;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_get_irq_routing_table
*
* PARAMETERS: device_handle - Handle to the Bus device we are querying
* ret_buffer - Pointer to a buffer to receive the
* current resources for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to get the IRQ routing table for a
* specific bus. The caller must first acquire a handle for the
* desired bus. The routine table is placed in the buffer pointed
* to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
* This function attempts to execute the _PRT method contained in
* the object indicated by the passed device_handle.
*
******************************************************************************/
acpi_status
acpi_get_irq_routing_table(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
/* Validate parameters then dispatch to internal routine */
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_rs_get_prt_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_current_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
* ret_buffer - Pointer to a buffer to receive the
* current resources for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to get the current resources for a
* specific device. The caller must first acquire a handle for
* the desired device. The resource data is placed in the buffer
* pointed to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
* This function attempts to execute the _CRS method contained in
* the object indicated by the passed device_handle.
*
******************************************************************************/
acpi_status
acpi_get_current_resources(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(acpi_get_current_resources);
/* Validate parameters then dispatch to internal routine */
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_rs_get_crs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
/*******************************************************************************
*
* FUNCTION: acpi_get_possible_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
* ret_buffer - Pointer to a buffer to receive the
* resources for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to get a list of the possible resources
* for a specific device. The caller must first acquire a handle
* for the desired device. The resource data is placed in the
* buffer pointed to by the ret_buffer variable.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
******************************************************************************/
acpi_status
acpi_get_possible_resources(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
/* Validate parameters then dispatch to internal routine */
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_rs_get_prs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
/*******************************************************************************
*
* FUNCTION: acpi_set_current_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are setting resources
* in_buffer - Pointer to a buffer containing the
* resources to be set for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to set the current resources for a
* specific device. The caller must first acquire a handle for
* the desired device. The resource data is passed to the routine
* the buffer pointed to by the in_buffer variable.
*
******************************************************************************/
acpi_status
acpi_set_current_resources(acpi_handle device_handle,
struct acpi_buffer *in_buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(acpi_set_current_resources);
/* Validate the buffer, don't allow zero length */
if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Validate parameters then dispatch to internal routine */
status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_rs_set_srs_method_data(node, in_buffer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
/*******************************************************************************
*
* FUNCTION: acpi_get_event_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are getting resources
* in_buffer - Pointer to a buffer containing the
* resources to be set for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to get the event resources for a
* specific device. The caller must first acquire a handle for
* the desired device. The resource data is passed to the routine
* the buffer pointed to by the in_buffer variable. Uses the
* _AEI method.
*
******************************************************************************/
acpi_status
acpi_get_event_resources(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(acpi_get_event_resources);
/* Validate parameters then dispatch to internal routine */
status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_rs_get_aei_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
*
* PARAMETERS: resource - Pointer to a resource
* out - Pointer to the users's return buffer
* (a struct acpi_resource_address64)
*
* RETURN: Status
*
* DESCRIPTION: If the resource is an address16, address32, or address64,
* copy it to the address64 return buffer. This saves the
* caller from having to duplicate code for different-sized
* addresses.
*
******************************************************************************/
acpi_status
acpi_resource_to_address64(struct acpi_resource *resource,
struct acpi_resource_address64 *out)
{
struct acpi_resource_address16 *address16;
struct acpi_resource_address32 *address32;
if (!resource || !out) {
return (AE_BAD_PARAMETER);
}
/* Convert 16 or 32 address descriptor to 64 */
switch (resource->type) {
case ACPI_RESOURCE_TYPE_ADDRESS16:
address16 =
ACPI_CAST_PTR(struct acpi_resource_address16,
&resource->data);
ACPI_COPY_ADDRESS(out, address16);
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
address32 =
ACPI_CAST_PTR(struct acpi_resource_address32,
&resource->data);
ACPI_COPY_ADDRESS(out, address32);
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
/* Simple copy for 64 bit source */
memcpy(out, &resource->data,
sizeof(struct acpi_resource_address64));
break;
default:
return (AE_BAD_PARAMETER);
}
return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
/*******************************************************************************
*
* FUNCTION: acpi_get_vendor_resource
*
* PARAMETERS: device_handle - Handle for the parent device object
* name - Method name for the parent resource
* (METHOD_NAME__CRS or METHOD_NAME__PRS)
* uuid - Pointer to the UUID to be matched.
* includes both subtype and 16-byte UUID
* ret_buffer - Where the vendor resource is returned
*
* RETURN: Status
*
* DESCRIPTION: Walk a resource template for the specified device to find a
* vendor-defined resource that matches the supplied UUID and
* UUID subtype. Returns a struct acpi_resource of type Vendor.
*
******************************************************************************/
acpi_status
acpi_get_vendor_resource(acpi_handle device_handle,
char *name,
struct acpi_vendor_uuid *uuid,
struct acpi_buffer *ret_buffer)
{
struct acpi_vendor_walk_info info;
acpi_status status;
/* Other parameters are validated by acpi_walk_resources */
if (!uuid || !ret_buffer) {
return (AE_BAD_PARAMETER);
}
info.uuid = uuid;
info.buffer = ret_buffer;
info.status = AE_NOT_EXIST;
/* Walk the _CRS or _PRS resource list for this device */
status =
acpi_walk_resources(device_handle, name,
acpi_rs_match_vendor_resource, &info);
if (ACPI_FAILURE(status)) {
return (status);
}
return (info.status);
}
ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
/*******************************************************************************
*
* FUNCTION: acpi_rs_match_vendor_resource
*
* PARAMETERS: acpi_walk_resource_callback
*
* RETURN: Status
*
* DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
*
******************************************************************************/
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
{
struct acpi_vendor_walk_info *info = context;
struct acpi_resource_vendor_typed *vendor;
struct acpi_buffer *buffer;
acpi_status status;
/* Ignore all descriptors except Vendor */
if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) {
return (AE_OK);
}
vendor = &resource->data.vendor_typed;
/*
* For a valid match, these conditions must hold:
*
* 1) Length of descriptor data must be at least as long as a UUID struct
* 2) The UUID subtypes must match
* 3) The UUID data must match
*/
if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
(vendor->uuid_subtype != info->uuid->subtype) ||
(memcmp(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
return (AE_OK);
}
/* Validate/Allocate/Clear caller buffer */
buffer = info->buffer;
status = acpi_ut_initialize_buffer(buffer, resource->length);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Found the correct resource, copy and return it */
memcpy(buffer->pointer, resource, resource->length);
buffer->length = resource->length;
/* Found the desired descriptor, terminate resource walk */
info->status = AE_OK;
return (AE_CTRL_TERMINATE);
}
/*******************************************************************************
*
* FUNCTION: acpi_walk_resource_buffer
*
* PARAMETERS: buffer - Formatted buffer returned by one of the
* various Get*Resource functions
* user_function - Called for each resource
* context - Passed to user_function
*
* RETURN: Status
*
* DESCRIPTION: Walks the input resource template. The user_function is called
* once for each resource in the list.
*
******************************************************************************/
acpi_status
acpi_walk_resource_buffer(struct acpi_buffer *buffer,
acpi_walk_resource_callback user_function,
void *context)
{
acpi_status status = AE_OK;
struct acpi_resource *resource;
struct acpi_resource *resource_end;
ACPI_FUNCTION_TRACE(acpi_walk_resource_buffer);
/* Parameter validation */
if (!buffer || !buffer->pointer || !user_function) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Buffer contains the resource list and length */
resource = ACPI_CAST_PTR(struct acpi_resource, buffer->pointer);
resource_end =
ACPI_ADD_PTR(struct acpi_resource, buffer->pointer, buffer->length);
/* Walk the resource list until the end_tag is found (or buffer end) */
while (resource < resource_end) {
/* Sanity check the resource type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
status = AE_AML_INVALID_RESOURCE_TYPE;
break;
}
/* Sanity check the length. It must not be zero, or we loop forever */
if (!resource->length) {
return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
}
/* Invoke the user function, abort on any error returned */
status = user_function(resource, context);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_TERMINATE) {
/* This is an OK termination by the user function */
status = AE_OK;
}
break;
}
/* end_tag indicates end-of-list */
if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
break;
}
/* Get the next resource descriptor */
resource = ACPI_NEXT_RESOURCE(resource);
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
/*******************************************************************************
*
* FUNCTION: acpi_walk_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
* name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
* METHOD_NAME__AEI or METHOD_NAME__DMA)
* user_function - Called for each resource
* context - Passed to user_function
*
* RETURN: Status
*
* DESCRIPTION: Retrieves the current or possible resource list for the
* specified device. The user_function is called once for
* each resource in the list.
*
******************************************************************************/
acpi_status
acpi_walk_resources(acpi_handle device_handle,
char *name,
acpi_walk_resource_callback user_function, void *context)
{
acpi_status status;
struct acpi_buffer buffer;
ACPI_FUNCTION_TRACE(acpi_walk_resources);
/* Parameter validation */
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__CRS) &&
!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__PRS) &&
!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__AEI) &&
!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Get the _CRS/_PRS/_AEI/_DMA resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Walk the resource list and cleanup */
status = acpi_walk_resource_buffer(&buffer, user_function, context);
ACPI_FREE(buffer.pointer);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_walk_resources)
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* TerraTec remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* TerraTec slim remote, 7 rows, 4 columns. */
/* Uses NEC extended 0x02bd. */
static struct rc_map_table terratec_slim[] = {
{ 0x02bd00, KEY_NUMERIC_1 },
{ 0x02bd01, KEY_NUMERIC_2 },
{ 0x02bd02, KEY_NUMERIC_3 },
{ 0x02bd03, KEY_NUMERIC_4 },
{ 0x02bd04, KEY_NUMERIC_5 },
{ 0x02bd05, KEY_NUMERIC_6 },
{ 0x02bd06, KEY_NUMERIC_7 },
{ 0x02bd07, KEY_NUMERIC_8 },
{ 0x02bd08, KEY_NUMERIC_9 },
{ 0x02bd09, KEY_NUMERIC_0 },
{ 0x02bd0a, KEY_MUTE },
{ 0x02bd0b, KEY_NEW }, /* symbol: PIP */
{ 0x02bd0e, KEY_VOLUMEDOWN },
{ 0x02bd0f, KEY_PLAYPAUSE },
{ 0x02bd10, KEY_RIGHT },
{ 0x02bd11, KEY_LEFT },
{ 0x02bd12, KEY_UP },
{ 0x02bd13, KEY_DOWN },
{ 0x02bd15, KEY_OK },
{ 0x02bd16, KEY_STOP },
{ 0x02bd17, KEY_CAMERA }, /* snapshot */
{ 0x02bd18, KEY_CHANNELUP },
{ 0x02bd19, KEY_RECORD },
{ 0x02bd1a, KEY_CHANNELDOWN },
{ 0x02bd1c, KEY_ESC },
{ 0x02bd1f, KEY_VOLUMEUP },
{ 0x02bd44, KEY_EPG },
{ 0x02bd45, KEY_POWER2 }, /* [red power button] */
};
static struct rc_map_list terratec_slim_map = {
.map = {
.scan = terratec_slim,
.size = ARRAY_SIZE(terratec_slim),
.rc_proto = RC_PROTO_NECX,
.name = RC_MAP_TERRATEC_SLIM,
}
};
static int __init init_rc_map_terratec_slim(void)
{
return rc_map_register(&terratec_slim_map);
}
static void __exit exit_rc_map_terratec_slim(void)
{
rc_map_unregister(&terratec_slim_map);
}
module_init(init_rc_map_terratec_slim)
module_exit(exit_rc_map_terratec_slim)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
MODULE_DESCRIPTION("TerraTec slim remote controller keytable");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <[email protected]>
*/
#ifndef __ARM64_KVM_HYP_SWITCH_H__
#define __ARM64_KVM_HYP_SWITCH_H__
#include <hyp/adjust_pc.h>
#include <hyp/fault.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/extable.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/traps.h>
struct kvm_exception_table_entry {
int insn, fixup;
};
extern struct kvm_exception_table_entry __start___kvm_ex_table;
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
/* Save the 32-bit only FPSIMD system register state */
static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
{
if (!vcpu_el1_is_32bit(vcpu))
return;
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
}
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
{
/*
* We are about to set CPTR_EL2.TFP to trap all floating point
* register accesses to EL2, however, the ARM ARM clearly states that
* traps are only taken to EL2 if the operation would not otherwise
* trap to EL1. Therefore, always make sure that for 32-bit guests,
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
* If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
* it will cause an exception.
*/
if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
}
#define compute_clr_set(vcpu, reg, clr, set) \
do { \
u64 hfg; \
hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
set |= hfg & __ ## reg ## _MASK; \
clr |= ~hfg & __ ## reg ## _nMASK; \
} while(0)
#define reg_to_fgt_group_id(reg) \
({ \
enum fgt_group_id id; \
switch(reg) { \
case HFGRTR_EL2: \
case HFGWTR_EL2: \
id = HFGxTR_GROUP; \
break; \
case HFGITR_EL2: \
id = HFGITR_GROUP; \
break; \
case HDFGRTR_EL2: \
case HDFGWTR_EL2: \
id = HDFGRTR_GROUP; \
break; \
case HAFGRTR_EL2: \
id = HAFGRTR_GROUP; \
break; \
default: \
BUILD_BUG_ON(1); \
} \
\
id; \
})
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
do { \
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
set |= hfg & __ ## reg ## _MASK; \
clr |= hfg & __ ## reg ## _nMASK; \
} while(0)
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \
u64 c = 0, s = 0; \
\
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
compute_clr_set(vcpu, reg, c, s); \
\
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
\
s |= set; \
c |= clr; \
if (c || s) { \
u64 val = __ ## reg ## _nMASK; \
val |= s; \
val &= ~c; \
write_sysreg_s(val, SYS_ ## reg); \
} \
} while(0)
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
/*
* Validate the fine grain trap masks.
* Check that the masks do not overlap and that all bits are accounted for.
*/
#define CHECK_FGT_MASKS(reg) \
do { \
BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK)); \
BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^ \
(__ ## reg ## _nMASK))); \
} while(0)
static inline bool cpu_has_amu(void)
{
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_EL1_AMU_SHIFT);
}
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2);
CHECK_FGT_MASKS(HFGWTR_EL2);
CHECK_FGT_MASKS(HFGITR_EL2);
CHECK_FGT_MASKS(HDFGRTR_EL2);
CHECK_FGT_MASKS(HDFGWTR_EL2);
CHECK_FGT_MASKS(HAFGRTR_EL2);
CHECK_FGT_MASKS(HCRX_EL2);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
HFGxTR_EL2_TCR_EL1_MASK : 0);
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
do { \
if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
SYS_ ## reg); \
} while(0)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT))
return;
__deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
else
__deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
__deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
__deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
__deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
if (cpu_has_amu())
__deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
{
u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
if (!system_supports_mpam())
return;
/* trap guest access to MPAMIDR_EL1 */
if (system_supports_mpam_hcr()) {
write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
} else {
/* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
r |= MPAM2_EL2_TIDR;
}
write_sysreg_s(r, SYS_MPAM2_EL2);
}
static inline void __deactivate_traps_mpam(void)
{
if (!system_supports_mpam())
return;
write_sysreg_s(0, SYS_MPAM2_EL2);
if (system_supports_mpam_hcr())
write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
* Make sure we trap PMU access from EL0 to EL2. Also sanitize
* PMSELR_EL0 to make sure it never contains the cycle
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
* EL1 instead of being trapped to EL2.
*/
if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt;
write_sysreg(0, pmselr_el0);
hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
}
*host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 clr = 0, set = 0;
compute_clr_set(vcpu, HCRX_EL2, clr, set);
hcrx |= set;
hcrx &= ~clr;
}
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}
__activate_traps_hfgxtr(vcpu);
__activate_traps_mpam(vcpu);
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt;
hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
write_sysreg(hcr, hcr_el2);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
{
/*
* If we pended a virtual abort, preserve it until it gets
* cleared. See D1.14.3 (Virtual Interrupts) for details, but
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
if (vcpu->arch.hcr_el2 & HCR_VSE) {
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
{
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
/*
* Finish potential single step before executing the prologue
* instruction.
*/
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
return true;
}
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
/*
* The vCPU's saved SVE state layout always matches the max VL of the
* vCPU. Start off with the max VL so we can load the SVE state.
*/
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
__sve_restore_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.fp_regs.fpsr,
true);
/*
* The effective VL for a VM could differ from the max VL when running a
* nested guest, as the guest hypervisor could select a smaller VL. Slap
* that into hardware before wrapping up.
*/
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
}
static inline void __hyp_sve_save_host(void)
{
struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr,
true);
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
/*
* We trap the first access to the FP/SIMD to save the host context and
* restore the guest context lazily.
* If FP/SIMD is not implemented, handle the trap and inject an undefined
* instruction exception to the guest. Similarly for trapped SVE accesses.
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
if (!system_supports_fpsimd())
return false;
sve_guest = vcpu_has_sve(vcpu);
esr_ec = kvm_vcpu_trap_get_class(vcpu);
/* Only handle traps the vCPU can support here: */
switch (esr_ec) {
case ESR_ELx_EC_FP_ASIMD:
/* Forward traps to the guest hypervisor as required */
if (guest_hyp_fpsimd_traps_enabled(vcpu))
return false;
break;
case ESR_ELx_EC_SYS64:
if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
return false;
fallthrough;
case ESR_ELx_EC_SVE:
if (!sve_guest)
return false;
if (guest_hyp_sve_traps_enabled(vcpu))
return false;
break;
default:
return false;
}
/* Valid trap. Switch the context: */
/* First disable enough traps to allow us to update the registers */
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
else
cpacr_clear_set(0, CPACR_ELx_FPEN);
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
if (sve_guest)
__hyp_sve_restore_guest(vcpu);
else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
*host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
return true;
}
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
u64 val = vcpu_get_reg(vcpu, rt);
/*
* The normal sysreg handling code expects to see the traps,
* let's not do anything here.
*/
if (vcpu->arch.hcr_el2 & HCR_TVM)
return false;
switch (sysreg) {
case SYS_SCTLR_EL1:
write_sysreg_el1(val, SYS_SCTLR);
break;
case SYS_TTBR0_EL1:
write_sysreg_el1(val, SYS_TTBR0);
break;
case SYS_TTBR1_EL1:
write_sysreg_el1(val, SYS_TTBR1);
break;
case SYS_TCR_EL1:
write_sysreg_el1(val, SYS_TCR);
break;
case SYS_ESR_EL1:
write_sysreg_el1(val, SYS_ESR);
break;
case SYS_FAR_EL1:
write_sysreg_el1(val, SYS_FAR);
break;
case SYS_AFSR0_EL1:
write_sysreg_el1(val, SYS_AFSR0);
break;
case SYS_AFSR1_EL1:
write_sysreg_el1(val, SYS_AFSR1);
break;
case SYS_MAIR_EL1:
write_sysreg_el1(val, SYS_MAIR);
break;
case SYS_AMAIR_EL1:
write_sysreg_el1(val, SYS_AMAIR);
break;
case SYS_CONTEXTIDR_EL1:
write_sysreg_el1(val, SYS_CONTEXTIDR);
break;
default:
return false;
}
__kvm_skip_instr(vcpu);
return true;
}
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *ctxt;
u32 sysreg;
u64 val;
/*
* We only get here for 64bit guests, 32bit guests will hit
* the long and winding road all the way to the standard
* handling. Yes, it sucks to be irrelevant.
*/
sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
switch (sysreg) {
case SYS_CNTPCT_EL0:
case SYS_CNTPCTSS_EL0:
if (vcpu_has_nv(vcpu)) {
if (is_hyp_ctxt(vcpu)) {
ctxt = vcpu_hptimer(vcpu);
break;
}
/* Check for guest hypervisor trapping */
val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
val = (val & CNTHCTL_EL1PCTEN) << 10;
if (!(val & (CNTHCTL_EL1PCTEN << 10)))
return false;
}
ctxt = vcpu_ptimer(vcpu);
break;
default:
return false;
}
val = arch_timer_read_cntpct_el0();
if (ctxt->offset.vm_offset)
val -= *kern_hyp_va(ctxt->offset.vm_offset);
if (ctxt->offset.vcpu_offset)
val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
__kvm_skip_instr(vcpu);
return true;
}
static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
int rt = kvm_vcpu_sys_get_rt(vcpu);
u64 val = vcpu_get_reg(vcpu, rt);
if (sysreg != SYS_TCR_EL1)
return false;
/*
* Affected parts do not advertise support for hardware Access Flag /
* Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
* control bits are still functional. The architecture requires these be
* RES0 on systems that do not implement FEAT_HAFDBS.
*
* Uphold the requirements of the architecture by masking guest writes
* to TCR_EL1.{HA,HD} here.
*/
val &= ~(TCR_HD | TCR_HA);
write_sysreg_el1(val, SYS_TCR);
__kvm_skip_instr(vcpu);
return true;
}
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
return true;
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
handle_ampere1_tcr(vcpu))
return true;
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
if (kvm_hyp_handle_cntpct(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
return true;
return false;
}
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
!kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
/* Promote an illegal access to an SError.*/
if (ret == -1)
*exit_code = ARM_EXCEPTION_EL1_SERROR;
}
}
return false;
}
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
* Returns true if the hypervisor handled the exit, and control should go back
* to the guest, or false if it hasn't.
*/
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
exit_handler_fn fn;
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn)
return fn(vcpu, exit_code);
return false;
}
static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Check for the conditions of Cortex-A510's #2077057. When these occur
* SPSR_EL2 can't be trusted, but isn't needed either as it is
* unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
* Are we single-stepping the guest, and took a PAC exception from the
* active-not-pending state?
*/
if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
*vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
}
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
if (ARM_SERROR_PENDING(*exit_code) &&
ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
* HVC already have an adjusted PC, which we need to
* correct in order to return to after having injected
* the SError.
*
* SMC, on the other hand, is *trapped*, meaning its
* preferred return address is the SMC itself.
*/
if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
}
/*
* We're using the raw exception code in order to only process
* the trap if no SError is pending. We will come back to the
* same PC once the SError has been injected, and replay the
* trapping instruction.
*/
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
/* Check if there's an exit handler and allow it to handle the exit. */
if (kvm_hyp_handle_exit(vcpu, exit_code))
goto guest;
exit:
/* Return to the host kernel and handle the exit */
return false;
guest:
/* Re-enter the guest */
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
return true;
}
static inline void __kvm_unexpected_el2_exception(void)
{
extern char __guest_exit_restore_elr_and_panic[];
unsigned long addr, fixup;
struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = &__start___kvm_ex_table;
end = &__stop___kvm_ex_table;
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (addr != elr_el2) {
entry++;
continue;
}
write_sysreg(fixup, elr_el2);
return;
}
/* Trigger a panic after restoring the hyp context. */
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
|
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
#include "cfg80211.h"
#include "commands.h"
#include "core.h"
#include "util.h"
#include "bus.h"
/* Supported rates to be advertised to the cfg80211 */
static struct ieee80211_rate qtnf_rates_2g[] = {
{.bitrate = 10, .hw_value = 2, },
{.bitrate = 20, .hw_value = 4, },
{.bitrate = 55, .hw_value = 11, },
{.bitrate = 110, .hw_value = 22, },
{.bitrate = 60, .hw_value = 12, },
{.bitrate = 90, .hw_value = 18, },
{.bitrate = 120, .hw_value = 24, },
{.bitrate = 180, .hw_value = 36, },
{.bitrate = 240, .hw_value = 48, },
{.bitrate = 360, .hw_value = 72, },
{.bitrate = 480, .hw_value = 96, },
{.bitrate = 540, .hw_value = 108, },
};
/* Supported rates to be advertised to the cfg80211 */
static struct ieee80211_rate qtnf_rates_5g[] = {
{.bitrate = 60, .hw_value = 12, },
{.bitrate = 90, .hw_value = 18, },
{.bitrate = 120, .hw_value = 24, },
{.bitrate = 180, .hw_value = 36, },
{.bitrate = 240, .hw_value = 48, },
{.bitrate = 360, .hw_value = 72, },
{.bitrate = 480, .hw_value = 96, },
{.bitrate = 540, .hw_value = 108, },
};
/* Supported crypto cipher suits to be advertised to cfg80211 */
static const u32 qtnf_cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC,
};
/* Supported mgmt frame types to be advertised to cfg80211 */
static const struct ieee80211_txrx_stypes
qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4),
},
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4),
},
};
static int
qtnf_validate_iface_combinations(struct wiphy *wiphy,
struct qtnf_vif *change_vif,
enum nl80211_iftype new_type)
{
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
int i;
int ret = 0;
struct iface_combination_params params = {
.num_different_channels = 1,
};
mac = wiphy_priv(wiphy);
if (!mac)
return -EFAULT;
for (i = 0; i < QTNF_MAX_INTF; i++) {
vif = &mac->iflist[i];
if (vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
params.iftype_num[vif->wdev.iftype]++;
}
if (change_vif) {
params.iftype_num[new_type]++;
params.iftype_num[change_vif->wdev.iftype]--;
} else {
params.iftype_num[new_type]++;
}
ret = cfg80211_check_combinations(wiphy, ¶ms);
if (ret)
return ret;
/* Check repeater interface combination: primary VIF should be STA only.
* STA (primary) + AP (secondary) is OK.
* AP (primary) + STA (secondary) is not supported.
*/
vif = qtnf_mac_get_base_vif(mac);
if (vif && vif->wdev.iftype == NL80211_IFTYPE_AP &&
vif != change_vif && new_type == NL80211_IFTYPE_STATION) {
ret = -EINVAL;
pr_err("MAC%u invalid combination: AP as primary repeater interface is not supported\n",
mac->macid);
}
return ret;
}
static int
qtnf_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type,
struct vif_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
u8 *mac_addr = NULL;
int use4addr = 0;
int ret;
ret = qtnf_validate_iface_combinations(wiphy, vif, type);
if (ret) {
pr_err("VIF%u.%u combination check: failed to set type %d\n",
vif->mac->macid, vif->vifid, type);
return ret;
}
if (params) {
mac_addr = params->macaddr;
use4addr = params->use_4addr;
}
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_change_intf_type(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to change type to %d\n",
vif->mac->macid, vif->vifid, type);
return ret;
}
vif->wdev.iftype = type;
return 0;
}
int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct net_device *netdev = wdev->netdev;
struct qtnf_vif *vif;
struct sk_buff *skb;
if (WARN_ON(!netdev))
return -EFAULT;
vif = qtnf_netdev_get_priv(wdev->netdev);
qtnf_scan_done(vif->mac, true);
/* Stop data */
netif_tx_stop_all_queues(netdev);
if (netif_carrier_ok(netdev))
netif_carrier_off(netdev);
while ((skb = skb_dequeue(&vif->high_pri_tx_queue)))
dev_kfree_skb_any(skb);
cancel_work_sync(&vif->high_pri_tx_work);
if (netdev->reg_state == NETREG_REGISTERED)
cfg80211_unregister_netdevice(netdev);
if (qtnf_cmd_send_del_intf(vif))
pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
vif->vifid);
vif->netdev->ieee80211_ptr = NULL;
vif->netdev = NULL;
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
return 0;
}
static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
const char *name,
unsigned char name_assign_t,
enum nl80211_iftype type,
struct vif_params *params)
{
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
u8 *mac_addr = NULL;
int use4addr = 0;
int ret;
mac = wiphy_priv(wiphy);
if (!mac)
return ERR_PTR(-EFAULT);
ret = qtnf_validate_iface_combinations(wiphy, NULL, type);
if (ret) {
pr_err("MAC%u invalid combination: failed to add type %d\n",
mac->macid, type);
return ERR_PTR(ret);
}
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
vif = qtnf_mac_get_free_vif(mac);
if (!vif) {
pr_err("MAC%u: no free VIF available\n", mac->macid);
return ERR_PTR(-EFAULT);
}
eth_zero_addr(vif->mac_addr);
eth_zero_addr(vif->bssid);
vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
memset(&vif->wdev, 0, sizeof(vif->wdev));
vif->wdev.wiphy = wiphy;
vif->wdev.iftype = type;
break;
default:
pr_err("MAC%u: unsupported IF type %d\n", mac->macid, type);
return ERR_PTR(-ENOTSUPP);
}
if (params) {
mac_addr = params->macaddr;
use4addr = params->use_4addr;
}
ret = qtnf_cmd_send_add_intf(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to add VIF %pM\n",
mac->macid, vif->vifid, mac_addr);
goto err_cmd;
}
if (!is_valid_ether_addr(vif->mac_addr)) {
pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
mac->macid, vif->vifid, vif->mac_addr);
ret = -EINVAL;
goto error_del_vif;
}
ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
if (ret) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
goto error_del_vif;
}
if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
cfg80211_unregister_netdevice(vif->netdev);
vif->netdev = NULL;
goto error_del_vif;
}
}
vif->wdev.netdev = vif->netdev;
return &vif->wdev;
error_del_vif:
qtnf_cmd_send_del_intf(vif);
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
return ERR_PTR(ret);
}
static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
const struct cfg80211_beacon_data *info)
{
int ret = 0;
if (!info->beacon_ies || !info->beacon_ies_len) {
ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_IE_SET_BEACON_IES,
NULL, 0);
} else {
ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_IE_SET_BEACON_IES,
info->beacon_ies,
info->beacon_ies_len);
}
if (ret)
goto out;
if (!info->proberesp_ies || !info->proberesp_ies_len) {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
QLINK_IE_SET_PROBE_RESP_IES,
NULL, 0);
} else {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
QLINK_IE_SET_PROBE_RESP_IES,
info->proberesp_ies,
info->proberesp_ies_len);
}
if (ret)
goto out;
if (!info->assocresp_ies || !info->assocresp_ies_len) {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
QLINK_IE_SET_ASSOC_RESP,
NULL, 0);
} else {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
QLINK_IE_SET_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
}
out:
return ret;
}
static int qtnf_change_beacon(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_update *info)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
return qtnf_mgmt_set_appie(vif, &info->beacon);
}
static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_start_ap(vif, settings);
if (ret)
pr_err("VIF%u.%u: failed to start AP\n", vif->mac->macid,
vif->vifid);
return ret;
}
static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_stop_ap(vif);
if (ret)
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
vif->mac->macid, vif->vifid);
netif_carrier_off(vif->netdev);
return ret;
}
static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif;
int ret;
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
return -EFAULT;
}
ret = qtnf_cmd_send_update_phy_params(mac, changed);
if (ret)
pr_err("MAC%u: failed to update PHY params\n", mac->macid);
return ret;
}
static void
qtnf_update_mgmt_frame_registrations(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct mgmt_frame_regs *upd)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
u16 new_mask = upd->interface_stypes;
u16 old_mask = vif->mgmt_frames_bitmask;
static const struct {
u16 mask, qlink_type;
} updates[] = {
{
.mask = BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4),
.qlink_type = QLINK_MGMT_FRAME_ASSOC_REQ,
},
{
.mask = BIT(IEEE80211_STYPE_AUTH >> 4),
.qlink_type = QLINK_MGMT_FRAME_AUTH,
},
{
.mask = BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
.qlink_type = QLINK_MGMT_FRAME_PROBE_REQ,
},
{
.mask = BIT(IEEE80211_STYPE_ACTION >> 4),
.qlink_type = QLINK_MGMT_FRAME_ACTION,
},
};
unsigned int i;
if (new_mask == old_mask)
return;
for (i = 0; i < ARRAY_SIZE(updates); i++) {
u16 mask = updates[i].mask;
u16 qlink_frame_type = updates[i].qlink_type;
bool reg;
/* the ! are here due to the assoc/reassoc merge */
if (!(new_mask & mask) == !(old_mask & mask))
continue;
reg = new_mask & mask;
if (qtnf_cmd_send_register_mgmt(vif, qlink_frame_type, reg))
pr_warn("VIF%u.%u: failed to %sregister qlink frame type 0x%x\n",
vif->mac->macid, vif->vifid, reg ? "" : "un",
qlink_frame_type);
}
vif->mgmt_frames_bitmask = new_mask;
}
static int
qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
u32 short_cookie = get_random_u32();
u16 flags = 0;
u16 freq;
*cookie = short_cookie;
if (params->offchan)
flags |= QLINK_FRAME_TX_FLAG_OFFCHAN;
if (params->no_cck)
flags |= QLINK_FRAME_TX_FLAG_NO_CCK;
if (params->dont_wait_for_ack)
flags |= QLINK_FRAME_TX_FLAG_ACK_NOWAIT;
/* If channel is not specified, pass "freq = 0" to tell device
* firmware to use current channel.
*/
if (params->chan)
freq = params->chan->center_freq;
else
freq = 0;
pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
wdev->netdev->name, freq,
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
params->len, short_cookie, flags);
return qtnf_cmd_send_frame(vif, short_cookie, flags,
freq, params->buf, params->len);
}
static int
qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
sinfo->generation = vif->generation;
return qtnf_cmd_get_sta_info(vif, mac, sinfo);
}
static int
qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
const struct qtnf_sta_node *sta_node;
int ret;
switch (vif->wdev.iftype) {
case NL80211_IFTYPE_STATION:
if (idx != 0 || !vif->wdev.connected)
return -ENOENT;
ether_addr_copy(mac, vif->bssid);
break;
case NL80211_IFTYPE_AP:
sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
if (unlikely(!sta_node))
return -ENOENT;
ether_addr_copy(mac, sta_node->mac_addr);
break;
default:
return -ENOTSUPP;
}
ret = qtnf_cmd_get_sta_info(vif, mac, sinfo);
if (vif->wdev.iftype == NL80211_IFTYPE_AP) {
if (ret == -ENOENT) {
cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
sinfo->filled = 0;
}
}
sinfo->generation = vif->generation;
return ret;
}
static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, struct key_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_add_key(vif, key_index, pairwise, mac_addr, params);
if (ret)
pr_err("VIF%u.%u: failed to add key: cipher=%x idx=%u pw=%u\n",
vif->mac->macid, vif->vifid, params->cipher, key_index,
pairwise);
return ret;
}
static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
if (ret) {
if (ret == -ENOENT) {
pr_debug("VIF%u.%u: key index %d out of bounds\n",
vif->mac->macid, vif->vifid, key_index);
} else {
pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
vif->mac->macid, vif->vifid,
key_index, pairwise);
}
}
return ret;
}
static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
int link_id, u8 key_index, bool unicast,
bool multicast)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_set_default_key(vif, key_index, unicast, multicast);
if (ret)
pr_err("VIF%u.%u: failed to set dflt key: idx=%u uc=%u mc=%u\n",
vif->mac->macid, vif->vifid, key_index, unicast,
multicast);
return ret;
}
static int
qtnf_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev,
int link_id, u8 key_index)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_set_default_mgmt_key(vif, key_index);
if (ret)
pr_err("VIF%u.%u: failed to set default MGMT key: idx=%u\n",
vif->mac->macid, vif->vifid, key_index);
return ret;
}
static int
qtnf_change_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_parameters *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_change_sta(vif, mac, params);
if (ret)
pr_err("VIF%u.%u: failed to change STA %pM\n",
vif->mac->macid, vif->vifid, mac);
return ret;
}
static int
qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
struct station_del_parameters *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
if (params->mac &&
(vif->wdev.iftype == NL80211_IFTYPE_AP) &&
!is_broadcast_ether_addr(params->mac) &&
!qtnf_sta_list_lookup(&vif->sta_list, params->mac))
return 0;
ret = qtnf_cmd_send_del_sta(vif, params);
if (ret)
pr_err("VIF%u.%u: failed to delete STA %pM\n",
vif->mac->macid, vif->vifid, params->mac);
return ret;
}
static int
qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
int ret;
cancel_delayed_work_sync(&mac->scan_timeout);
mac->scan_req = request;
ret = qtnf_cmd_send_scan(mac);
if (ret) {
pr_err("MAC%u: failed to start scan\n", mac->macid);
mac->scan_req = NULL;
goto out;
}
pr_debug("MAC%u: scan started\n", mac->macid);
queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
QTNF_SCAN_TIMEOUT_SEC * HZ);
out:
return ret;
}
static int
qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (sme->auth_type == NL80211_AUTHTYPE_SAE &&
!(sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) {
pr_err("can not offload authentication to userspace\n");
return -EOPNOTSUPP;
}
if (sme->bssid)
ether_addr_copy(vif->bssid, sme->bssid);
else
eth_zero_addr(vif->bssid);
ret = qtnf_cmd_send_connect(vif, sme);
if (ret)
pr_err("VIF%u.%u: failed to connect\n",
vif->mac->macid, vif->vifid);
return ret;
}
static int
qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_external_auth_params *auth)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
!ether_addr_equal(vif->bssid, auth->bssid))
pr_warn("unexpected bssid: %pM", auth->bssid);
ret = qtnf_cmd_send_external_auth(vif, auth);
if (ret)
pr_err("VIF%u.%u: failed to report external auth\n",
vif->mac->macid, vif->vifid);
return ret;
}
static int
qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif;
int ret = 0;
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
return -EFAULT;
}
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
ret = qtnf_cmd_send_disconnect(vif, reason_code);
if (ret)
pr_err("VIF%u.%u: failed to disconnect\n",
mac->macid, vif->vifid);
if (vif->wdev.connected) {
netif_carrier_off(vif->netdev);
cfg80211_disconnected(vif->netdev, reason_code,
NULL, 0, true, GFP_KERNEL);
}
return ret;
}
static int
qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
int idx, struct survey_info *survey)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_supported_band *sband;
const struct cfg80211_chan_def *chandef = wdev_chandef(wdev, 0);
struct ieee80211_channel *chan;
int ret;
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
sband = wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels)
return -ENOENT;
chan = &sband->channels[idx];
survey->channel = chan;
survey->filled = 0x0;
if (chandef && chan == chandef->chan)
survey->filled = SURVEY_INFO_IN_USE;
ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
if (ret)
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
return ret;
}
static int
qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
unsigned int link_id, struct cfg80211_chan_def *chandef)
{
struct net_device *ndev = wdev->netdev;
struct qtnf_vif *vif;
int ret;
if (!ndev)
return -ENODEV;
vif = qtnf_netdev_get_priv(wdev->netdev);
ret = qtnf_cmd_get_channel(vif, chandef);
if (ret) {
pr_err("%s: failed to get channel: %d\n", ndev->name, ret);
ret = -ENODATA;
goto out;
}
if (!cfg80211_chandef_valid(chandef)) {
pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
ndev->name, chandef->chan->center_freq,
chandef->center_freq1, chandef->center_freq2,
chandef->width);
ret = -ENODATA;
goto out;
}
out:
return ret;
}
static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
pr_debug("%s: chan(%u) count(%u) radar(%u) block_tx(%u)\n", dev->name,
params->chandef.chan->hw_value, params->count,
params->radar_required, params->block_tx);
if (!cfg80211_chandef_valid(¶ms->chandef)) {
pr_err("%s: invalid channel\n", dev->name);
return -EINVAL;
}
ret = qtnf_cmd_send_chan_switch(vif, params);
if (ret)
pr_warn("%s: failed to switch to channel (%u)\n",
dev->name, params->chandef.chan->hw_value);
return ret;
}
static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_chan_def *chandef,
u32 cac_time_ms, int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
return -ENOTSUPP;
ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
if (ret)
pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
return ret;
}
static int qtnf_set_mac_acl(struct wiphy *wiphy,
struct net_device *dev,
const struct cfg80211_acl_data *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_set_mac_acl(vif, params);
if (ret)
pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret);
return ret;
}
static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
QLINK_PM_OFF, timeout);
if (ret)
pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
return ret;
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;
ret = qtnf_cmd_get_tx_power(vif, dbm);
if (ret)
pr_err("MAC%u: failed to get Tx power\n", vif->mac->macid);
return ret;
}
static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
struct qtnf_vif *vif;
int ret;
if (wdev) {
vif = qtnf_netdev_get_priv(wdev->netdev);
} else {
struct qtnf_wmac *mac = wiphy_priv(wiphy);
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n",
mac->macid);
return -EFAULT;
}
}
ret = qtnf_cmd_set_tx_power(vif, type, mbm);
if (ret)
pr_err("MAC%u: failed to set Tx power\n", vif->mac->macid);
return ret;
}
static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_owe_info *owe_info)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
if (vif->wdev.iftype != NL80211_IFTYPE_AP)
return -EOPNOTSUPP;
ret = qtnf_cmd_send_update_owe(vif, owe_info);
if (ret)
pr_err("VIF%u.%u: failed to update owe info\n",
vif->mac->macid, vif->vifid);
return ret;
}
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif;
int ret = 0;
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
ret = -EFAULT;
goto exit;
}
if (!wowlan) {
pr_debug("WoWLAN triggers are not enabled\n");
qtnf_virtual_intf_cleanup(vif->netdev);
goto exit;
}
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
if (ret) {
pr_err("MAC%u: failed to set WoWLAN triggers\n",
mac->macid);
goto exit;
}
exit:
return ret;
}
static int qtnf_resume(struct wiphy *wiphy)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif;
int ret = 0;
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
return -EFAULT;
}
ret = qtnf_cmd_send_wowlan_set(vif, NULL);
if (ret)
pr_err("MAC%u: failed to reset WoWLAN triggers\n",
mac->macid);
return ret;
}
static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_bus *bus = mac->bus;
device_set_wakeup_enable(bus->dev, enabled);
}
#endif
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
.del_virtual_intf = qtnf_del_virtual_intf,
.start_ap = qtnf_start_ap,
.change_beacon = qtnf_change_beacon,
.stop_ap = qtnf_stop_ap,
.set_wiphy_params = qtnf_set_wiphy_params,
.update_mgmt_frame_registrations =
qtnf_update_mgmt_frame_registrations,
.mgmt_tx = qtnf_mgmt_tx,
.change_station = qtnf_change_station,
.del_station = qtnf_del_station,
.get_station = qtnf_get_station,
.dump_station = qtnf_dump_station,
.add_key = qtnf_add_key,
.del_key = qtnf_del_key,
.set_default_key = qtnf_set_default_key,
.set_default_mgmt_key = qtnf_set_default_mgmt_key,
.scan = qtnf_scan,
.connect = qtnf_connect,
.external_auth = qtnf_external_auth,
.disconnect = qtnf_disconnect,
.dump_survey = qtnf_dump_survey,
.get_channel = qtnf_get_channel,
.channel_switch = qtnf_channel_switch,
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
.set_power_mgmt = qtnf_set_power_mgmt,
.get_tx_power = qtnf_get_tx_power,
.set_tx_power = qtnf_set_tx_power,
.update_owe_info = qtnf_update_owe_info,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
.set_wakeup = qtnf_set_wakeup,
#endif
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
enum nl80211_band band;
int ret;
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
ret = qtnf_cmd_reg_notify(mac, req, qtnf_slave_radar_get(),
qtnf_dfs_offload_get());
if (ret) {
pr_err("MAC%u: failed to update region to %c%c: %d\n",
mac->macid, req->alpha2[0], req->alpha2[1], ret);
return;
}
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
if (!wiphy->bands[band])
continue;
ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
if (ret)
pr_err("MAC%u: failed to update band %u\n",
mac->macid, band);
}
}
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus,
struct platform_device *pdev)
{
struct wiphy *wiphy;
if (qtnf_dfs_offload_get() &&
qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
if (!qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_PWR_MGMT))
qtn_cfg80211_ops.set_power_mgmt = NULL;
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
if (pdev)
set_wiphy_dev(wiphy, &pdev->dev);
else
set_wiphy_dev(wiphy, bus->dev);
return wiphy;
}
static int
qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info)
{
struct ieee80211_iface_combination *if_comb;
size_t n_if_comb;
u16 interface_modes = 0;
size_t i, j;
if_comb = mac_info->if_comb;
n_if_comb = mac_info->n_if_comb;
if (!if_comb || !n_if_comb)
return -ENOENT;
for (i = 0; i < n_if_comb; i++) {
if_comb[i].radar_detect_widths = mac_info->radar_detect_widths;
for (j = 0; j < if_comb[i].n_limits; j++)
interface_modes |= if_comb[i].limits[j].types;
}
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = n_if_comb;
wiphy->interface_modes = interface_modes;
return 0;
}
int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_mac_info *macinfo = &mac->macinfo;
int ret;
bool regdomain_is_known;
if (!wiphy) {
pr_err("invalid wiphy pointer\n");
return -EFAULT;
}
wiphy->frag_threshold = macinfo->frag_thr;
wiphy->rts_threshold = macinfo->rts_thr;
wiphy->retry_short = macinfo->sretry_limit;
wiphy->retry_long = macinfo->lretry_limit;
wiphy->coverage_class = macinfo->coverage_class;
wiphy->max_scan_ssids =
(macinfo->max_scan_ssids) ? macinfo->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
wiphy->max_acl_mac_addrs = macinfo->max_acl_mac_addrs;
wiphy->max_num_csa_counters = 2;
ret = qtnf_wiphy_setup_if_comb(wiphy, macinfo);
if (ret)
goto out;
/* Initialize cipher suits */
wiphy->cipher_suites = qtnf_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_4ADDR_STATION |
WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (qtnf_dfs_offload_get() &&
qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_DWELL))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
wiphy->available_antennas_tx = macinfo->num_tx_chain;
wiphy->available_antennas_rx = macinfo->num_rx_chain;
wiphy->max_ap_assoc_sta = macinfo->max_ap_assoc_sta;
wiphy->ht_capa_mod_mask = &macinfo->ht_cap_mod_mask;
wiphy->vht_capa_mod_mask = &macinfo->vht_cap_mod_mask;
ether_addr_copy(wiphy->perm_addr, mac->macaddr);
if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_STA_INACT_TIMEOUT))
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
if (!qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_OBSS_SCAN))
wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SAE))
wiphy->features |= NL80211_FEATURE_SAE;
#ifdef CONFIG_PM
if (macinfo->wowlan)
wiphy->wowlan = macinfo->wowlan;
#endif
regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
isalpha(mac->rd->alpha2[1]);
if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_REG_UPDATE)) {
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_STRICT_REG;
wiphy_apply_custom_regulatory(wiphy, mac->rd);
} else if (regdomain_is_known) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
}
} else {
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
if (mac->macinfo.extended_capabilities_len) {
wiphy->extended_capabilities =
mac->macinfo.extended_capabilities;
wiphy->extended_capabilities_mask =
mac->macinfo.extended_capabilities_mask;
wiphy->extended_capabilities_len =
mac->macinfo.extended_capabilities_len;
}
strscpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
ret = wiphy_register(wiphy);
if (ret < 0)
goto out;
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
ret = regulatory_set_wiphy_regd(wiphy, mac->rd);
else if (regdomain_is_known)
ret = regulatory_hint(wiphy, mac->rd->alpha2);
out:
return ret;
}
void qtnf_netdev_updown(struct net_device *ndev, bool up)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
if (qtnf_cmd_send_updown_intf(vif, up))
pr_err("failed to send %s command to VIF%u.%u\n",
up ? "UP" : "DOWN", vif->mac->macid, vif->vifid);
}
void qtnf_virtual_intf_cleanup(struct net_device *ndev)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy);
if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
qtnf_disconnect(vif->wdev.wiphy, ndev,
WLAN_REASON_DEAUTH_LEAVING);
qtnf_scan_done(mac, true);
}
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
{
if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING,
NULL, 0, 1, GFP_KERNEL);
cfg80211_shutdown_all_interfaces(vif->wdev.wiphy);
}
void qtnf_band_init_rates(struct ieee80211_supported_band *band)
{
switch (band->band) {
case NL80211_BAND_2GHZ:
band->bitrates = qtnf_rates_2g;
band->n_bitrates = ARRAY_SIZE(qtnf_rates_2g);
break;
case NL80211_BAND_5GHZ:
band->bitrates = qtnf_rates_5g;
band->n_bitrates = ARRAY_SIZE(qtnf_rates_5g);
break;
default:
band->bitrates = NULL;
band->n_bitrates = 0;
break;
}
}
|
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
// Copyright(c) 2015-2021 Intel Corporation.
/*
* SDW Intel ACPI scan helpers
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fwnode.h>
#include <linux/module.h>
#include <linux/soundwire/sdw_intel.h>
#include <linux/string.h>
#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */
static int ctrl_link_mask;
module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
static ulong ctrl_addr = 0x40000000;
module_param_named(sdw_ctrl_addr, ctrl_addr, ulong, 0444);
MODULE_PARM_DESC(sdw_ctrl_addr, "Intel SoundWire Controller _ADR");
static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
{
struct fwnode_handle *link;
char name[32];
u32 quirk_mask = 0;
/* Find master handle */
snprintf(name, sizeof(name),
"mipi-sdw-link-%hhu-subproperties", idx);
link = fwnode_get_named_child_node(fw_node, name);
if (!link)
return false;
fwnode_property_read_u32(link,
"intel-quirk-mask",
&quirk_mask);
fwnode_handle_put(link);
if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
return false;
return true;
}
static int
sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
struct fwnode_handle *fwnode;
unsigned long list;
unsigned int i;
u32 count;
u32 tmp;
int ret;
if (!adev)
return -EINVAL;
fwnode = acpi_fwnode_handle(adev);
/*
* Found controller, find links supported
*
* In theory we could check the number of links supported in
* hardware, but in that step we cannot assume SoundWire IP is
* powered.
*
* In addition, if the BIOS doesn't even provide this
* 'master-count' property then all the inits based on link
* masks will fail as well.
*
* We will check the hardware capabilities in the startup() step
*/
ret = fwnode_property_read_u32(fwnode, "mipi-sdw-manager-list", &tmp);
if (ret) {
ret = fwnode_property_read_u32(fwnode, "mipi-sdw-master-count", &count);
if (ret) {
dev_err(&adev->dev,
"Failed to read mipi-sdw-master-count: %d\n",
ret);
return ret;
}
list = GENMASK(count - 1, 0);
} else {
list = tmp;
count = hweight32(list);
}
/* Check count is within bounds */
if (count > SDW_INTEL_MAX_LINKS) {
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_INTEL_MAX_LINKS);
return -EINVAL;
}
if (!count) {
dev_warn(&adev->dev, "No SoundWire links detected\n");
return -EINVAL;
}
dev_dbg(&adev->dev, "ACPI reports %d SDW Link devices\n", count);
info->count = count;
info->link_mask = 0;
for_each_set_bit(i, &list, SDW_INTEL_MAX_LINKS) {
if (ctrl_link_mask && !(ctrl_link_mask & BIT(i))) {
dev_dbg(&adev->dev,
"Link %d masked, will not be enabled\n", i);
continue;
}
if (!is_link_enabled(fwnode, i)) {
dev_dbg(&adev->dev,
"Link %d not selected in firmware\n", i);
continue;
}
info->link_mask |= BIT(i);
}
return 0;
}
static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
void *cdata, void **return_value)
{
struct sdw_intel_acpi_info *info = cdata;
u64 adr;
int ret;
ret = acpi_get_local_u64_address(handle, &adr);
if (ret < 0)
return AE_OK; /* keep going */
if (!acpi_fetch_acpi_dev(handle)) {
pr_err("%s: Couldn't find ACPI handle\n", __func__);
return AE_NOT_FOUND;
}
/*
* On some Intel platforms, multiple children of the HDAS
* device can be found, but only one of them is the SoundWire
* controller. The SNDW device is always exposed with
* Name(_ADR, 0x40000000), with bits 31..28 representing the
* SoundWire link so filter accordingly
*/
if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
return AE_OK; /* keep going */
if (adr != ctrl_addr)
return AE_OK; /* keep going */
/* found the correct SoundWire controller */
info->handle = handle;
/* device found, stop namespace walk */
return AE_CTRL_TERMINATE;
}
/**
* sdw_intel_acpi_scan() - SoundWire Intel init routine
* @parent_handle: ACPI parent handle
* @info: description of what firmware/DSDT tables expose
*
* This scans the namespace and queries firmware to figure out which
* links to enable. A follow-up use of sdw_intel_probe() and
* sdw_intel_startup() is required for creation of devices and bus
* startup
*/
int sdw_intel_acpi_scan(acpi_handle *parent_handle,
struct sdw_intel_acpi_info *info)
{
acpi_status status;
info->handle = NULL;
/*
* In the HDAS ACPI scope, 'SNDW' may be either the child of
* 'HDAS' or the grandchild of 'HDAS'. So let's go through
* the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
* device.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
parent_handle, 2,
sdw_intel_acpi_cb,
NULL, info, NULL);
if (ACPI_FAILURE(status) || info->handle == NULL)
return -ENODEV;
return sdw_intel_scan_controller(info);
}
EXPORT_SYMBOL_NS(sdw_intel_acpi_scan, "SND_INTEL_SOUNDWIRE_ACPI");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Intel Soundwire ACPI helpers");
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.