code
stringlengths 0
23.9M
|
---|
/*
* PQ3 eTSEC2 device tree stub [ @ offsets 0x24000/0xb0000 ]
*
* Copyright 2011 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
mdio@24000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,etsec2-mdio";
reg = <0x24000 0x1000 0xb0030 0x4>;
};
ethernet@b0000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "network";
model = "eTSEC";
compatible = "fsl,etsec2";
fsl,num_rx_queues = <0x8>;
fsl,num_tx_queues = <0x8>;
fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
ranges;
queue-group@b0000 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb0000 0x1000>;
interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
};
};
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018-2019 NXP
* Dong Aisheng <[email protected]>
*/
/delete-node/ &adma_pwm;
/delete-node/ &adma_pwm_lpcg;
&dma_subsys {
uart4_lpcg: clock-controller@5a4a0000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x5a4a0000 0x10000>;
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_UART_4 IMX_SC_PM_CLK_PER>,
<&dma_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
clock-output-names = "uart4_lpcg_baud_clk",
"uart4_lpcg_ipg_clk";
power-domains = <&pd IMX_SC_R_UART_4>;
};
i2c4: i2c@5a840000 {
compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x5a840000 0x4000>;
interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&i2c4_lpcg 0>,
<&i2c4_lpcg 1>;
clock-names = "per", "ipg";
assigned-clocks = <&clk IMX_SC_R_I2C_4 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
power-domains = <&pd IMX_SC_R_I2C_4>;
status = "disabled";
};
i2c4_lpcg: clock-controller@5ac40000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x5ac40000 0x10000>;
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_I2C_4 IMX_SC_PM_CLK_PER>,
<&dma_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
clock-output-names = "i2c4_lpcg_clk",
"i2c4_lpcg_ipg_clk";
power-domains = <&pd IMX_SC_R_I2C_4>;
};
can1_lpcg: clock-controller@5ace0000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x5ace0000 0x10000>;
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>,
<&dma_ipg_clk>, <&dma_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>;
clock-output-names = "can1_lpcg_pe_clk",
"can1_lpcg_ipg_clk",
"can1_lpcg_chi_clk";
power-domains = <&pd IMX_SC_R_CAN_1>;
};
can2_lpcg: clock-controller@5acf0000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x5acf0000 0x10000>;
#clock-cells = <1>;
clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>,
<&dma_ipg_clk>, <&dma_ipg_clk>;
clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>;
clock-output-names = "can2_lpcg_pe_clk",
"can2_lpcg_ipg_clk",
"can2_lpcg_chi_clk";
power-domains = <&pd IMX_SC_R_CAN_2>;
};
};
&edma2 {
reg = <0x5a1f0000 0x170000>;
#dma-cells = <3>;
dma-channels = <22>;
dma-channel-mask = <0xf00>;
interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* unused */
<GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* unused */
<GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* unused */
<GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, /* unused */
<GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&pd IMX_SC_R_DMA_0_CH0>,
<&pd IMX_SC_R_DMA_0_CH1>,
<&pd IMX_SC_R_DMA_0_CH2>,
<&pd IMX_SC_R_DMA_0_CH3>,
<&pd IMX_SC_R_DMA_0_CH4>,
<&pd IMX_SC_R_DMA_0_CH5>,
<&pd IMX_SC_R_DMA_0_CH6>,
<&pd IMX_SC_R_DMA_0_CH7>,
<&pd IMX_SC_R_DMA_0_CH8>,
<&pd IMX_SC_R_DMA_0_CH9>,
<&pd IMX_SC_R_DMA_0_CH10>,
<&pd IMX_SC_R_DMA_0_CH11>,
<&pd IMX_SC_R_DMA_0_CH12>,
<&pd IMX_SC_R_DMA_0_CH13>,
<&pd IMX_SC_R_DMA_0_CH14>,
<&pd IMX_SC_R_DMA_0_CH15>,
<&pd IMX_SC_R_DMA_0_CH16>,
<&pd IMX_SC_R_DMA_0_CH17>,
<&pd IMX_SC_R_DMA_0_CH18>,
<&pd IMX_SC_R_DMA_0_CH19>,
<&pd IMX_SC_R_DMA_0_CH20>,
<&pd IMX_SC_R_DMA_0_CH21>;
status = "okay";
};
/* It is eDMA1 in 8QM RM, but 8QXP it is eDMA3 */
&edma3 {
reg = <0x5a9f0000 0x210000>;
dma-channels = <10>;
interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&pd IMX_SC_R_DMA_1_CH0>,
<&pd IMX_SC_R_DMA_1_CH1>,
<&pd IMX_SC_R_DMA_1_CH2>,
<&pd IMX_SC_R_DMA_1_CH3>,
<&pd IMX_SC_R_DMA_1_CH4>,
<&pd IMX_SC_R_DMA_1_CH5>,
<&pd IMX_SC_R_DMA_1_CH6>,
<&pd IMX_SC_R_DMA_1_CH7>,
<&pd IMX_SC_R_DMA_1_CH8>,
<&pd IMX_SC_R_DMA_1_CH9>;
};
&flexcan1 {
fsl,clk-source = /bits/ 8 <1>;
};
&flexcan2 {
clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
<&can1_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
&flexcan3 {
clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
<&can2_lpcg IMX_LPCG_CLK_0>;
assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
fsl,clk-source = /bits/ 8 <1>;
};
&lpuart0 {
compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
dmas = <&edma2 13 0 0>, <&edma2 12 0 1>;
dma-names = "rx","tx";
};
&lpuart1 {
compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
dmas = <&edma2 15 0 0>, <&edma2 14 0 1>;
dma-names = "rx","tx";
};
&lpuart2 {
compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
dmas = <&edma2 17 0 0>, <&edma2 16 0 1>;
dma-names = "rx","tx";
};
&lpuart3 {
compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
dmas = <&edma2 19 0 0>, <&edma2 18 0 1>;
dma-names = "rx","tx";
};
&i2c0 {
compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
};
&i2c1 {
compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
};
&i2c2 {
compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
};
&i2c3 {
compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
};
|
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
* Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/zstd.h>
#include "common/zstd_deps.h"
#include "common/zstd_internal.h"
#define ZSTD_FORWARD_IF_ERR(ret) \
do { \
size_t const __ret = (ret); \
if (ZSTD_isError(__ret)) \
return __ret; \
} while (0)
static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters,
unsigned long long pledged_src_size)
{
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset(
cctx, ZSTD_reset_session_and_parameters));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize(
cctx, pledged_src_size));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_windowLog, parameters->cParams.windowLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_hashLog, parameters->cParams.hashLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_chainLog, parameters->cParams.chainLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_searchLog, parameters->cParams.searchLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_minMatch, parameters->cParams.minMatch));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_targetLength, parameters->cParams.targetLength));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_strategy, parameters->cParams.strategy));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag));
return 0;
}
int zstd_min_clevel(void)
{
return ZSTD_minCLevel();
}
EXPORT_SYMBOL(zstd_min_clevel);
int zstd_max_clevel(void)
{
return ZSTD_maxCLevel();
}
EXPORT_SYMBOL(zstd_max_clevel);
int zstd_default_clevel(void)
{
return ZSTD_defaultCLevel();
}
EXPORT_SYMBOL(zstd_default_clevel);
size_t zstd_compress_bound(size_t src_size)
{
return ZSTD_compressBound(src_size);
}
EXPORT_SYMBOL(zstd_compress_bound);
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size)
{
return ZSTD_getParams(level, estimated_src_size, 0);
}
EXPORT_SYMBOL(zstd_get_params);
zstd_compression_parameters zstd_get_cparams(int level,
unsigned long long estimated_src_size, size_t dict_size)
{
return ZSTD_getCParams(level, estimated_src_size, dict_size);
}
EXPORT_SYMBOL(zstd_get_cparams);
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cctx_workspace_bound);
zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
return NULL;
return ZSTD_initStaticCCtx(workspace, workspace_size);
}
EXPORT_SYMBOL(zstd_init_cctx);
zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem)
{
return ZSTD_createCCtx_advanced(custom_mem);
}
EXPORT_SYMBOL(zstd_create_cctx_advanced);
size_t zstd_free_cctx(zstd_cctx *cctx)
{
return ZSTD_freeCCtx(cctx);
}
EXPORT_SYMBOL(zstd_free_cctx);
zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
zstd_compression_parameters cparams,
zstd_custom_mem custom_mem)
{
return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
ZSTD_dct_auto, cparams, custom_mem);
}
EXPORT_SYMBOL(zstd_create_cdict_byreference);
size_t zstd_free_cdict(zstd_cdict *cdict)
{
return ZSTD_freeCDict(cdict);
}
EXPORT_SYMBOL(zstd_free_cdict);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters)
{
ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size));
return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size);
}
EXPORT_SYMBOL(zstd_compress_cctx);
size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
size_t dst_capacity, const void *src, size_t src_size,
const ZSTD_CDict *cdict)
{
return ZSTD_compress_usingCDict(cctx, dst, dst_capacity,
src, src_size, cdict);
}
EXPORT_SYMBOL(zstd_compress_using_cdict);
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCStreamSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cstream_workspace_bound);
zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
{
zstd_cstream *cstream;
if (workspace == NULL)
return NULL;
cstream = ZSTD_initStaticCStream(workspace, workspace_size);
if (cstream == NULL)
return NULL;
/* 0 means unknown in linux zstd API but means 0 in new zstd API */
if (pledged_src_size == 0)
pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size)))
return NULL;
return cstream;
}
EXPORT_SYMBOL(zstd_init_cstream);
size_t zstd_reset_cstream(zstd_cstream *cstream,
unsigned long long pledged_src_size)
{
if (pledged_src_size == 0)
pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) );
ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) );
return 0;
}
EXPORT_SYMBOL(zstd_reset_cstream);
size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
zstd_in_buffer *input)
{
return ZSTD_compressStream(cstream, output, input);
}
EXPORT_SYMBOL(zstd_compress_stream);
size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
{
return ZSTD_flushStream(cstream, output);
}
EXPORT_SYMBOL(zstd_flush_stream);
size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
{
return ZSTD_endStream(cstream, output);
}
EXPORT_SYMBOL(zstd_end_stream);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Compressor");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* -*- linux-c -*-
*
* (C) 2003 [email protected]
*
* based on arch/arm/kernel/apm.c
* factor out the information needed by architectures to provide
* apm status
*/
#ifndef __LINUX_APM_EMULATION_H
#define __LINUX_APM_EMULATION_H
#include <linux/apm_bios.h>
/*
* This structure gets filled in by the machine specific 'get_power_status'
* implementation. Any fields which are not set default to a safe value.
*/
struct apm_power_info {
unsigned char ac_line_status;
#define APM_AC_OFFLINE 0
#define APM_AC_ONLINE 1
#define APM_AC_BACKUP 2
#define APM_AC_UNKNOWN 0xff
unsigned char battery_status;
#define APM_BATTERY_STATUS_HIGH 0
#define APM_BATTERY_STATUS_LOW 1
#define APM_BATTERY_STATUS_CRITICAL 2
#define APM_BATTERY_STATUS_CHARGING 3
#define APM_BATTERY_STATUS_NOT_PRESENT 4
#define APM_BATTERY_STATUS_UNKNOWN 0xff
unsigned char battery_flag;
#define APM_BATTERY_FLAG_HIGH (1 << 0)
#define APM_BATTERY_FLAG_LOW (1 << 1)
#define APM_BATTERY_FLAG_CRITICAL (1 << 2)
#define APM_BATTERY_FLAG_CHARGING (1 << 3)
#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7)
#define APM_BATTERY_FLAG_UNKNOWN 0xff
int battery_life;
int time;
int units;
#define APM_UNITS_MINS 0
#define APM_UNITS_SECS 1
#define APM_UNITS_UNKNOWN -1
};
/*
* This allows machines to provide their own "apm get power status" function.
*/
extern void (*apm_get_power_status)(struct apm_power_info *);
/*
* Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND)
*/
void apm_queue_event(apm_event_t event);
#endif /* __LINUX_APM_EMULATION_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Huawei HiNIC PCI Express Linux driver
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/semaphore.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <asm/byteorder.h>
#include "hinic_hw_if.h"
#include "hinic_hw_wqe.h"
#include "hinic_hw_wq.h"
#include "hinic_hw_cmdq.h"
#define WQS_BLOCKS_PER_PAGE 4
#define WQ_BLOCK_SIZE 4096
#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
#define WQS_MAX_NUM_BLOCKS 128
#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
sizeof((wqs)->free_blocks[0]))
#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
#define WQ_PAGE_ADDR_SIZE sizeof(u64)
#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
#define CMDQ_BLOCK_SIZE 512
#define CMDQ_PAGE_SIZE 4096
#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
#define WQ_BASE_VADDR(wqs, wq) \
((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_PADDR(wqs, wq) \
((wqs)->page_paddr[(wq)->page_idx] \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define WQ_BASE_ADDR(wqs, wq) \
((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
+ (wq)->block_idx * WQ_BLOCK_SIZE)
#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
((void *)((cmdq_pages)->page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
((cmdq_pages)->page_paddr \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
#define WQE_IN_RANGE(wqe, start, end) \
(((unsigned long)(wqe) >= (unsigned long)(start)) && \
((unsigned long)(wqe) < (unsigned long)(end)))
#define WQE_SHADOW_PAGE(wq, wqe) \
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)
static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
{
return (((idx) & ((wq)->num_wqebbs_per_page - 1))
<< (wq)->wqebb_size_shift);
}
static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
{
return (((idx) >> ((wq)->wqebbs_per_page_shift))
& ((wq)->num_q_pages - 1));
}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
* @vaddr: virtual address will be returned in this address
* @paddr: physical address will be returned in this address
* @shadow_vaddr: VM area will be return here for holding WQ page addresses
* @page_sz: page size of each WQ page
*
* Return 0 - Success, negative - Failure
**/
static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
void ***shadow_vaddr, size_t page_sz)
{
struct pci_dev *pdev = hwif->pdev;
dma_addr_t dma_addr;
*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
return -ENOMEM;
}
*paddr = (u64)dma_addr;
/* use vzalloc for big mem */
*shadow_vaddr = vzalloc(page_sz);
if (!*shadow_vaddr)
goto err_shadow_vaddr;
return 0;
err_shadow_vaddr:
dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
return -ENOMEM;
}
/**
* wqs_allocate_page - allocate page for WQ set
* @wqs: Work Queue Set
* @page_idx: the page index of the page will be allocated
*
* Return 0 - Success, negative - Failure
**/
static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
{
return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
&wqs->page_paddr[page_idx],
&wqs->shadow_page_vaddr[page_idx],
WQS_PAGE_SIZE);
}
/**
* wqs_free_page - free page of WQ set
* @wqs: Work Queue Set
* @page_idx: the page index of the page will be freed
**/
static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
wqs->page_vaddr[page_idx],
(dma_addr_t)wqs->page_paddr[page_idx]);
vfree(wqs->shadow_page_vaddr[page_idx]);
}
/**
* cmdq_allocate_page - allocate page for cmdq
* @cmdq_pages: the pages of the cmdq queue struct to hold the page
*
* Return 0 - Success, negative - Failure
**/
static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
{
return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
&cmdq_pages->page_paddr,
&cmdq_pages->shadow_page_vaddr,
CMDQ_PAGE_SIZE);
}
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
struct hinic_hwif *hwif = cmdq_pages->hwif;
struct pci_dev *pdev = hwif->pdev;
dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
cmdq_pages->page_vaddr,
(dma_addr_t)cmdq_pages->page_paddr);
vfree(cmdq_pages->shadow_page_vaddr);
}
static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
sizeof(*wqs->shadow_page_vaddr),
GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
return 0;
err_page_shadow_vaddr:
devm_kfree(&pdev->dev, wqs->page_vaddr);
err_page_vaddr:
devm_kfree(&pdev->dev, wqs->page_paddr);
return -ENOMEM;
}
static void free_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
devm_kfree(&pdev->dev, wqs->page_vaddr);
devm_kfree(&pdev->dev, wqs->page_paddr);
}
static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
int *block_idx)
{
int pos;
down(&wqs->alloc_blocks_lock);
wqs->num_free_blks--;
if (wqs->num_free_blks < 0) {
wqs->num_free_blks++;
up(&wqs->alloc_blocks_lock);
return -ENOMEM;
}
pos = wqs->alloc_blk_pos++;
pos &= WQS_MAX_NUM_BLOCKS - 1;
*page_idx = wqs->free_blocks[pos].page_idx;
*block_idx = wqs->free_blocks[pos].block_idx;
wqs->free_blocks[pos].page_idx = -1;
wqs->free_blocks[pos].block_idx = -1;
up(&wqs->alloc_blocks_lock);
return 0;
}
static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
int block_idx)
{
int pos;
down(&wqs->alloc_blocks_lock);
pos = wqs->return_blk_pos++;
pos &= WQS_MAX_NUM_BLOCKS - 1;
wqs->free_blocks[pos].page_idx = page_idx;
wqs->free_blocks[pos].block_idx = block_idx;
wqs->num_free_blks++;
up(&wqs->alloc_blocks_lock);
}
static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
{
int page_idx, blk_idx, pos = 0;
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
wqs->free_blocks[pos].page_idx = page_idx;
wqs->free_blocks[pos].block_idx = blk_idx;
pos++;
}
}
wqs->alloc_blk_pos = 0;
wqs->return_blk_pos = pos;
wqs->num_free_blks = pos;
sema_init(&wqs->alloc_blocks_lock, 1);
}
/**
* hinic_wqs_alloc - allocate Work Queues set
* @wqs: Work Queue Set
* @max_wqs: maximum wqs to allocate
* @hwif: HW interface for use for the allocation
*
* Return 0 - Success, negative - Failure
**/
int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
struct hinic_hwif *hwif)
{
struct pci_dev *pdev = hwif->pdev;
int err, i, page_idx;
max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
if (max_wqs > WQS_MAX_NUM_BLOCKS) {
dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
return -EINVAL;
}
wqs->hwif = hwif;
wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
if (alloc_page_arrays(wqs)) {
dev_err(&pdev->dev,
"Failed to allocate mem for page addresses\n");
return -ENOMEM;
}
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
err = wqs_allocate_page(wqs, page_idx);
if (err) {
dev_err(&pdev->dev, "Failed wq page allocation\n");
goto err_wq_allocate_page;
}
}
wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
GFP_KERNEL);
if (!wqs->free_blocks) {
err = -ENOMEM;
goto err_alloc_blocks;
}
init_wqs_blocks_arr(wqs);
return 0;
err_alloc_blocks:
err_wq_allocate_page:
for (i = 0; i < page_idx; i++)
wqs_free_page(wqs, i);
free_page_arrays(wqs);
return err;
}
/**
* hinic_wqs_free - free Work Queues set
* @wqs: Work Queue Set
**/
void hinic_wqs_free(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
int page_idx;
devm_kfree(&pdev->dev, wqs->free_blocks);
for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
wqs_free_page(wqs, page_idx);
free_page_arrays(wqs);
}
/**
* alloc_wqes_shadow - allocate WQE shadows for WQ
* @wq: WQ to allocate shadows for
*
* Return 0 - Success, negative - Failure
**/
static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
sizeof(*wq->shadow_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
return 0;
err_shadow_idx:
devm_kfree(&pdev->dev, wq->shadow_wqe);
return -ENOMEM;
}
/**
* free_wqes_shadow - free WQE shadows of WQ
* @wq: WQ to free shadows from
**/
static void free_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
devm_kfree(&pdev->dev, wq->shadow_idx);
devm_kfree(&pdev->dev, wq->shadow_wqe);
}
/**
* free_wq_pages - free pages of WQ
* @hwif: HW interface for releasing dma addresses
* @wq: WQ to free pages from
* @num_q_pages: number pages to free
**/
static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
int num_q_pages)
{
struct pci_dev *pdev = hwif->pdev;
int i;
for (i = 0; i < num_q_pages; i++) {
void **vaddr = &wq->shadow_block_vaddr[i];
u64 *paddr = &wq->block_vaddr[i];
dma_addr_t dma_addr;
dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
dma_addr);
}
free_wqes_shadow(wq);
}
/**
* alloc_wq_pages - alloc pages for WQ
* @hwif: HW interface for allocating dma addresses
* @wq: WQ to allocate pages for
* @max_pages: maximum pages allowed
*
* Return 0 - Success, negative - Failure
**/
static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
int max_pages)
{
struct pci_dev *pdev = hwif->pdev;
int i, err, num_q_pages;
num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
if (num_q_pages > max_pages) {
dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
return -EINVAL;
}
if (num_q_pages & (num_q_pages - 1)) {
dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
return -EINVAL;
}
wq->num_q_pages = num_q_pages;
err = alloc_wqes_shadow(wq);
if (err) {
dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
return err;
}
for (i = 0; i < num_q_pages; i++) {
void **vaddr = &wq->shadow_block_vaddr[i];
u64 *paddr = &wq->block_vaddr[i];
dma_addr_t dma_addr;
*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
&dma_addr, GFP_KERNEL);
if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate wq page\n");
goto err_alloc_wq_pages;
}
/* HW uses Big Endian Format */
*paddr = cpu_to_be64(dma_addr);
}
return 0;
err_alloc_wq_pages:
free_wq_pages(wq, hwif, i);
return -ENOMEM;
}
/**
* hinic_wq_allocate - Allocate the WQ resources from the WQS
* @wqs: WQ set from which to allocate the WQ resources
* @wq: WQ to allocate resources for it from the WQ set
* @wqebb_size: Work Queue Block Byte Size
* @wq_page_size: the page size in the Work Queue
* @q_depth: number of wqebbs in WQ
* @max_wqe_size: maximum WQE size that will be used in the WQ
*
* Return 0 - Success, negative - Failure
**/
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift;
int err;
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
if (wq_page_size == 0) {
dev_err(&pdev->dev, "wq_page_size must be > 0\n");
return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
return -EINVAL;
}
wqebb_size_shift = ilog2(wqebb_size);
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
>> wqebb_size_shift;
if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
wq->hwif = hwif;
err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
if (err) {
dev_err(&pdev->dev, "Failed to get free wqs next block\n");
return err;
}
wq->wqebb_size = wqebb_size;
wq->wq_page_size = wq_page_size;
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
if (err) {
dev_err(&pdev->dev, "Failed to allocate wq pages\n");
goto err_alloc_wq_pages;
}
atomic_set(&wq->cons_idx, 0);
atomic_set(&wq->prod_idx, 0);
atomic_set(&wq->delta, q_depth);
wq->mask = q_depth - 1;
return 0;
err_alloc_wq_pages:
wqs_return_block(wqs, wq->page_idx, wq->block_idx);
return err;
}
/**
* hinic_wq_free - Free the WQ resources to the WQS
* @wqs: WQ set to free the WQ resources to it
* @wq: WQ to free its resources to the WQ set resources
**/
void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
{
free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
wqs_return_block(wqs, wq->page_idx, wq->block_idx);
}
/**
* hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
* @cmdq_pages: will hold the pages of the cmdq
* @wq: returned wqs
* @hwif: HW interface
* @cmdq_blocks: number of cmdq blocks/wq to allocate
* @wqebb_size: Work Queue Block Byte Size
* @wq_page_size: the page size in the Work Queue
* @q_depth: number of wqebbs in WQ
* @max_wqe_size: maximum WQE size that will be used in the WQ
*
* Return 0 - Success, negative - Failure
**/
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift;
int i, j, err = -ENOMEM;
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
if (wq_page_size == 0) {
dev_err(&pdev->dev, "wq_page_size must be > 0\n");
return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
return -EINVAL;
}
wqebb_size_shift = ilog2(wqebb_size);
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
>> wqebb_size_shift;
if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
cmdq_pages->hwif = hwif;
err = cmdq_allocate_page(cmdq_pages);
if (err) {
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
wq[i].page_idx = 0;
wq[i].block_idx = i;
wq[i].wqebb_size = wqebb_size;
wq[i].wq_page_size = wq_page_size;
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
CMDQ_WQ_MAX_PAGES);
if (err) {
dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
goto err_cmdq_block;
}
atomic_set(&wq[i].cons_idx, 0);
atomic_set(&wq[i].prod_idx, 0);
atomic_set(&wq[i].delta, q_depth);
wq[i].mask = q_depth - 1;
}
return 0;
err_cmdq_block:
for (j = 0; j < i; j++)
free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
cmdq_free_page(cmdq_pages);
return err;
}
/**
* hinic_wqs_cmdq_free - Free wqs from cmdqs
* @cmdq_pages: hold the pages of the cmdq
* @wq: wqs to free
* @cmdq_blocks: number of wqs to free
**/
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, int cmdq_blocks)
{
int i;
for (i = 0; i < cmdq_blocks; i++)
free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
cmdq_free_page(cmdq_pages);
}
static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
int num_wqebbs, u16 idx)
{
void *wqebb_addr;
int i;
for (i = 0; i < num_wqebbs; i++, idx++) {
idx = MASKED_WQE_IDX(wq, idx);
wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
WQE_PAGE_OFF(wq, idx);
memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
shadow_addr += wq->wqebb_size;
}
}
static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
int num_wqebbs, u16 idx)
{
void *wqebb_addr;
int i;
for (i = 0; i < num_wqebbs; i++, idx++) {
idx = MASKED_WQE_IDX(wq, idx);
wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
WQE_PAGE_OFF(wq, idx);
memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
shadow_addr += wq->wqebb_size;
}
}
/**
* hinic_get_wqe - get wqe ptr in the current pi and update the pi
* @wq: wq to get wqe from
* @wqe_size: wqe size
* @prod_idx: returned pi
*
* Return wqe pointer
**/
struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *prod_idx)
{
int curr_pg, end_pg, num_wqebbs;
u16 curr_prod_idx, end_prod_idx;
*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
return ERR_PTR(-EBUSY);
}
end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
curr_prod_idx = end_prod_idx - num_wqebbs;
curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
/* end prod index points to the next wqebb, therefore minus 1 */
end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
*prod_idx = curr_prod_idx;
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
wq->shadow_idx[curr_pg] = *prod_idx;
return shadow_addr;
}
return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
}
/**
* hinic_return_wqe - return the wqe when transmit failed
* @wq: wq to return wqe
* @wqe_size: wqe size
**/
void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
atomic_sub(num_wqebbs, &wq->prod_idx);
atomic_add(num_wqebbs, &wq->delta);
}
/**
* hinic_put_wqe - return the wqe place to use for a new wqe
* @wq: wq to return wqe
* @wqe_size: wqe size
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
atomic_add(num_wqebbs, &wq->delta);
}
/**
* hinic_read_wqe - read wqe ptr in the current ci
* @wq: wq to get read from
* @wqe_size: wqe size
* @cons_idx: returned ci
*
* Return wqe pointer
**/
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;
if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
return ERR_PTR(-EBUSY);
curr_cons_idx = atomic_read(&wq->cons_idx);
curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
*cons_idx = curr_cons_idx;
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
return shadow_addr;
}
return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
}
/**
* hinic_read_wqe_direct - read wqe directly from ci position
* @wq: wq
* @cons_idx: ci position
*
* Return wqe
**/
struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
{
return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
}
/**
* wqe_shadow - check if a wqe is shadow
* @wq: wq of the wqe
* @wqe: the wqe for shadow checking
*
* Return true - shadow, false - Not shadow
**/
static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
{
size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
return WQE_IN_RANGE(wqe, wq->shadow_wqe,
&wq->shadow_wqe[wqe_shadow_size]);
}
/**
* hinic_write_wqe - write the wqe to the wq
* @wq: wq to write wqe to
* @wqe: wqe to write
* @wqe_size: wqe size
**/
void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
unsigned int wqe_size)
{
int curr_pg, num_wqebbs;
void *shadow_addr;
u16 prod_idx;
if (wqe_shadow(wq, wqe)) {
curr_pg = WQE_SHADOW_PAGE(wq, wqe);
prod_idx = wq->shadow_idx[curr_pg];
num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
}
}
|
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_IOB_DEFS_H__
#define __CVMX_IOB_DEFS_H__
#define CVMX_IOB_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00007F8ull))
#define CVMX_IOB_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0000050ull))
#define CVMX_IOB_DWB_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000028ull))
#define CVMX_IOB_FAU_TIMEOUT (CVMX_ADD_IO_SEG(0x00011800F0000000ull))
#define CVMX_IOB_I2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000010ull))
#define CVMX_IOB_INB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000078ull))
#define CVMX_IOB_INB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000088ull))
#define CVMX_IOB_INB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000070ull))
#define CVMX_IOB_INB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000080ull))
#define CVMX_IOB_INT_ENB (CVMX_ADD_IO_SEG(0x00011800F0000060ull))
#define CVMX_IOB_INT_SUM (CVMX_ADD_IO_SEG(0x00011800F0000058ull))
#define CVMX_IOB_N2C_L2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000020ull))
#define CVMX_IOB_N2C_RSP_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000008ull))
#define CVMX_IOB_OUTB_COM_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000040ull))
#define CVMX_IOB_OUTB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000098ull))
#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A8ull))
#define CVMX_IOB_OUTB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000090ull))
#define CVMX_IOB_OUTB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A0ull))
#define CVMX_IOB_OUTB_FPA_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000048ull))
#define CVMX_IOB_OUTB_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000038ull))
#define CVMX_IOB_P2C_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000018ull))
#define CVMX_IOB_PKT_ERR (CVMX_ADD_IO_SEG(0x00011800F0000068ull))
#define CVMX_IOB_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00000B0ull))
#define CVMX_IOB_TO_NCB_DID_00_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000800ull))
#define CVMX_IOB_TO_NCB_DID_111_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B78ull))
#define CVMX_IOB_TO_NCB_DID_223_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000EF8ull))
#define CVMX_IOB_TO_NCB_DID_24_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00008C0ull))
#define CVMX_IOB_TO_NCB_DID_32_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000900ull))
#define CVMX_IOB_TO_NCB_DID_40_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000940ull))
#define CVMX_IOB_TO_NCB_DID_55_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00009B8ull))
#define CVMX_IOB_TO_NCB_DID_64_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A00ull))
#define CVMX_IOB_TO_NCB_DID_79_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000A78ull))
#define CVMX_IOB_TO_NCB_DID_96_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B00ull))
#define CVMX_IOB_TO_NCB_DID_98_CREDITS (CVMX_ADD_IO_SEG(0x00011800F0000B10ull))
union cvmx_iob_bist_status {
uint64_t u64;
struct cvmx_iob_bist_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t ibd:1;
uint64_t icd:1;
#else
uint64_t icd:1;
uint64_t ibd:1;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_iob_bist_status_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t icnrcb:1;
uint64_t icr0:1;
uint64_t icr1:1;
uint64_t icnr1:1;
uint64_t icnr0:1;
uint64_t ibdr0:1;
uint64_t ibdr1:1;
uint64_t ibr0:1;
uint64_t ibr1:1;
uint64_t icnrt:1;
uint64_t ibrq0:1;
uint64_t ibrq1:1;
uint64_t icrn0:1;
uint64_t icrn1:1;
uint64_t icrp0:1;
uint64_t icrp1:1;
uint64_t ibd:1;
uint64_t icd:1;
#else
uint64_t icd:1;
uint64_t ibd:1;
uint64_t icrp1:1;
uint64_t icrp0:1;
uint64_t icrn1:1;
uint64_t icrn0:1;
uint64_t ibrq1:1;
uint64_t ibrq0:1;
uint64_t icnrt:1;
uint64_t ibr1:1;
uint64_t ibr0:1;
uint64_t ibdr1:1;
uint64_t ibdr0:1;
uint64_t icnr0:1;
uint64_t icnr1:1;
uint64_t icr1:1;
uint64_t icr0:1;
uint64_t icnrcb:1;
uint64_t reserved_18_63:46;
#endif
} cn30xx;
struct cvmx_iob_bist_status_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_23_63:41;
uint64_t xmdfif:1;
uint64_t xmcfif:1;
uint64_t iorfif:1;
uint64_t rsdfif:1;
uint64_t iocfif:1;
uint64_t icnrcb:1;
uint64_t icr0:1;
uint64_t icr1:1;
uint64_t icnr1:1;
uint64_t icnr0:1;
uint64_t ibdr0:1;
uint64_t ibdr1:1;
uint64_t ibr0:1;
uint64_t ibr1:1;
uint64_t icnrt:1;
uint64_t ibrq0:1;
uint64_t ibrq1:1;
uint64_t icrn0:1;
uint64_t icrn1:1;
uint64_t icrp0:1;
uint64_t icrp1:1;
uint64_t ibd:1;
uint64_t icd:1;
#else
uint64_t icd:1;
uint64_t ibd:1;
uint64_t icrp1:1;
uint64_t icrp0:1;
uint64_t icrn1:1;
uint64_t icrn0:1;
uint64_t ibrq1:1;
uint64_t ibrq0:1;
uint64_t icnrt:1;
uint64_t ibr1:1;
uint64_t ibr0:1;
uint64_t ibdr1:1;
uint64_t ibdr0:1;
uint64_t icnr0:1;
uint64_t icnr1:1;
uint64_t icr1:1;
uint64_t icr0:1;
uint64_t icnrcb:1;
uint64_t iocfif:1;
uint64_t rsdfif:1;
uint64_t iorfif:1;
uint64_t xmcfif:1;
uint64_t xmdfif:1;
uint64_t reserved_23_63:41;
#endif
} cn61xx;
struct cvmx_iob_bist_status_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t xmdfif:1;
uint64_t xmcfif:1;
uint64_t iorfif:1;
uint64_t rsdfif:1;
uint64_t iocfif:1;
uint64_t icnrcb:1;
uint64_t icr0:1;
uint64_t icr1:1;
uint64_t icnr0:1;
uint64_t ibr0:1;
uint64_t ibr1:1;
uint64_t icnrt:1;
uint64_t ibrq0:1;
uint64_t ibrq1:1;
uint64_t icrn0:1;
uint64_t icrn1:1;
uint64_t ibd:1;
uint64_t icd:1;
#else
uint64_t icd:1;
uint64_t ibd:1;
uint64_t icrn1:1;
uint64_t icrn0:1;
uint64_t ibrq1:1;
uint64_t ibrq0:1;
uint64_t icnrt:1;
uint64_t ibr1:1;
uint64_t ibr0:1;
uint64_t icnr0:1;
uint64_t icr1:1;
uint64_t icr0:1;
uint64_t icnrcb:1;
uint64_t iocfif:1;
uint64_t rsdfif:1;
uint64_t iorfif:1;
uint64_t xmcfif:1;
uint64_t xmdfif:1;
uint64_t reserved_18_63:46;
#endif
} cn68xx;
};
union cvmx_iob_ctl_status {
uint64_t u64;
struct cvmx_iob_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t fif_dly:1;
uint64_t xmc_per:4;
uint64_t reserved_5_5:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t reserved_5_5:1;
uint64_t xmc_per:4;
uint64_t fif_dly:1;
uint64_t reserved_11_63:53;
#endif
} s;
struct cvmx_iob_ctl_status_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t reserved_5_63:59;
#endif
} cn30xx;
struct cvmx_iob_ctl_status_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t rr_mode:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t rr_mode:1;
uint64_t reserved_6_63:58;
#endif
} cn52xx;
struct cvmx_iob_ctl_status_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t fif_dly:1;
uint64_t xmc_per:4;
uint64_t rr_mode:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t rr_mode:1;
uint64_t xmc_per:4;
uint64_t fif_dly:1;
uint64_t reserved_11_63:53;
#endif
} cn61xx;
struct cvmx_iob_ctl_status_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
uint64_t xmc_per:4;
uint64_t rr_mode:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t rr_mode:1;
uint64_t xmc_per:4;
uint64_t reserved_10_63:54;
#endif
} cn63xx;
struct cvmx_iob_ctl_status_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t fif_dly:1;
uint64_t xmc_per:4;
uint64_t rsvr5:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
#else
uint64_t fau_end:1;
uint64_t dwb_enb:1;
uint64_t pko_enb:1;
uint64_t inb_mat:1;
uint64_t outb_mat:1;
uint64_t rsvr5:1;
uint64_t xmc_per:4;
uint64_t fif_dly:1;
uint64_t reserved_11_63:53;
#endif
} cn68xx;
};
union cvmx_iob_dwb_pri_cnt {
uint64_t u64;
struct cvmx_iob_dwb_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_fau_timeout {
uint64_t u64;
struct cvmx_iob_fau_timeout_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t tout_enb:1;
uint64_t tout_val:12;
#else
uint64_t tout_val:12;
uint64_t tout_enb:1;
uint64_t reserved_13_63:51;
#endif
} s;
};
union cvmx_iob_i2c_pri_cnt {
uint64_t u64;
struct cvmx_iob_i2c_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_inb_control_match {
uint64_t u64;
struct cvmx_iob_inb_control_match_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t mask:8;
uint64_t opc:4;
uint64_t dst:9;
uint64_t src:8;
#else
uint64_t src:8;
uint64_t dst:9;
uint64_t opc:4;
uint64_t mask:8;
uint64_t reserved_29_63:35;
#endif
} s;
};
union cvmx_iob_inb_control_match_enb {
uint64_t u64;
struct cvmx_iob_inb_control_match_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t mask:8;
uint64_t opc:4;
uint64_t dst:9;
uint64_t src:8;
#else
uint64_t src:8;
uint64_t dst:9;
uint64_t opc:4;
uint64_t mask:8;
uint64_t reserved_29_63:35;
#endif
} s;
};
union cvmx_iob_inb_data_match {
uint64_t u64;
struct cvmx_iob_inb_data_match_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
#else
uint64_t data:64;
#endif
} s;
};
union cvmx_iob_inb_data_match_enb {
uint64_t u64;
struct cvmx_iob_inb_data_match_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
#else
uint64_t data:64;
#endif
} s;
};
union cvmx_iob_int_enb {
uint64_t u64;
struct cvmx_iob_int_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t p_dat:1;
uint64_t np_dat:1;
uint64_t p_eop:1;
uint64_t p_sop:1;
uint64_t np_eop:1;
uint64_t np_sop:1;
#else
uint64_t np_sop:1;
uint64_t np_eop:1;
uint64_t p_sop:1;
uint64_t p_eop:1;
uint64_t np_dat:1;
uint64_t p_dat:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_iob_int_enb_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t p_eop:1;
uint64_t p_sop:1;
uint64_t np_eop:1;
uint64_t np_sop:1;
#else
uint64_t np_sop:1;
uint64_t np_eop:1;
uint64_t p_sop:1;
uint64_t p_eop:1;
uint64_t reserved_4_63:60;
#endif
} cn30xx;
struct cvmx_iob_int_enb_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} cn68xx;
};
union cvmx_iob_int_sum {
uint64_t u64;
struct cvmx_iob_int_sum_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t p_dat:1;
uint64_t np_dat:1;
uint64_t p_eop:1;
uint64_t p_sop:1;
uint64_t np_eop:1;
uint64_t np_sop:1;
#else
uint64_t np_sop:1;
uint64_t np_eop:1;
uint64_t p_sop:1;
uint64_t p_eop:1;
uint64_t np_dat:1;
uint64_t p_dat:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_iob_int_sum_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t p_eop:1;
uint64_t p_sop:1;
uint64_t np_eop:1;
uint64_t np_sop:1;
#else
uint64_t np_sop:1;
uint64_t np_eop:1;
uint64_t p_sop:1;
uint64_t p_eop:1;
uint64_t reserved_4_63:60;
#endif
} cn30xx;
struct cvmx_iob_int_sum_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} cn68xx;
};
union cvmx_iob_n2c_l2c_pri_cnt {
uint64_t u64;
struct cvmx_iob_n2c_l2c_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_n2c_rsp_pri_cnt {
uint64_t u64;
struct cvmx_iob_n2c_rsp_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_outb_com_pri_cnt {
uint64_t u64;
struct cvmx_iob_outb_com_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_outb_control_match {
uint64_t u64;
struct cvmx_iob_outb_control_match_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_26_63:38;
uint64_t mask:8;
uint64_t eot:1;
uint64_t dst:8;
uint64_t src:9;
#else
uint64_t src:9;
uint64_t dst:8;
uint64_t eot:1;
uint64_t mask:8;
uint64_t reserved_26_63:38;
#endif
} s;
};
union cvmx_iob_outb_control_match_enb {
uint64_t u64;
struct cvmx_iob_outb_control_match_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_26_63:38;
uint64_t mask:8;
uint64_t eot:1;
uint64_t dst:8;
uint64_t src:9;
#else
uint64_t src:9;
uint64_t dst:8;
uint64_t eot:1;
uint64_t mask:8;
uint64_t reserved_26_63:38;
#endif
} s;
};
union cvmx_iob_outb_data_match {
uint64_t u64;
struct cvmx_iob_outb_data_match_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
#else
uint64_t data:64;
#endif
} s;
};
union cvmx_iob_outb_data_match_enb {
uint64_t u64;
struct cvmx_iob_outb_data_match_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
#else
uint64_t data:64;
#endif
} s;
};
union cvmx_iob_outb_fpa_pri_cnt {
uint64_t u64;
struct cvmx_iob_outb_fpa_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_outb_req_pri_cnt {
uint64_t u64;
struct cvmx_iob_outb_req_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_p2c_req_pri_cnt {
uint64_t u64;
struct cvmx_iob_p2c_req_pri_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t cnt_enb:1;
uint64_t cnt_val:15;
#else
uint64_t cnt_val:15;
uint64_t cnt_enb:1;
uint64_t reserved_16_63:48;
#endif
} s;
};
union cvmx_iob_pkt_err {
uint64_t u64;
struct cvmx_iob_pkt_err_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t vport:6;
uint64_t port:6;
#else
uint64_t port:6;
uint64_t vport:6;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_iob_pkt_err_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t port:6;
#else
uint64_t port:6;
uint64_t reserved_6_63:58;
#endif
} cn30xx;
};
union cvmx_iob_to_cmb_credits {
uint64_t u64;
struct cvmx_iob_to_cmb_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t ncb_rd:3;
uint64_t ncb_wr:3;
#else
uint64_t ncb_wr:3;
uint64_t ncb_rd:3;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_iob_to_cmb_credits_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t pko_rd:3;
uint64_t ncb_rd:3;
uint64_t ncb_wr:3;
#else
uint64_t ncb_wr:3;
uint64_t ncb_rd:3;
uint64_t pko_rd:3;
uint64_t reserved_9_63:55;
#endif
} cn52xx;
struct cvmx_iob_to_cmb_credits_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t dwb:3;
uint64_t ncb_rd:3;
uint64_t ncb_wr:3;
#else
uint64_t ncb_wr:3;
uint64_t ncb_rd:3;
uint64_t dwb:3;
uint64_t reserved_9_63:55;
#endif
} cn68xx;
};
union cvmx_iob_to_ncb_did_00_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_00_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_111_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_111_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_223_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_223_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_24_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_24_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_32_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_32_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_40_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_40_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_55_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_55_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_64_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_64_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_79_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_79_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_96_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_96_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
union cvmx_iob_to_ncb_did_98_credits {
uint64_t u64;
struct cvmx_iob_to_ncb_did_98_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t crd:7;
#else
uint64_t crd:7;
uint64_t reserved_7_63:57;
#endif
} s;
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
*/
#ifndef __LINUX_MFD_S2MPA01_H
#define __LINUX_MFD_S2MPA01_H
/* S2MPA01 registers */
enum s2mpa01_reg {
S2MPA01_REG_ID,
S2MPA01_REG_INT1,
S2MPA01_REG_INT2,
S2MPA01_REG_INT3,
S2MPA01_REG_INT1M,
S2MPA01_REG_INT2M,
S2MPA01_REG_INT3M,
S2MPA01_REG_ST1,
S2MPA01_REG_ST2,
S2MPA01_REG_PWRONSRC,
S2MPA01_REG_OFFSRC,
S2MPA01_REG_RTC_BUF,
S2MPA01_REG_CTRL1,
S2MPA01_REG_ETC_TEST,
S2MPA01_REG_RSVD1,
S2MPA01_REG_BU_CHG,
S2MPA01_REG_RAMP1,
S2MPA01_REG_RAMP2,
S2MPA01_REG_LDO_DSCH1,
S2MPA01_REG_LDO_DSCH2,
S2MPA01_REG_LDO_DSCH3,
S2MPA01_REG_LDO_DSCH4,
S2MPA01_REG_OTP_ADRL,
S2MPA01_REG_OTP_ADRH,
S2MPA01_REG_OTP_DATA,
S2MPA01_REG_MON1SEL,
S2MPA01_REG_MON2SEL,
S2MPA01_REG_LEE,
S2MPA01_REG_RSVD2,
S2MPA01_REG_RSVD3,
S2MPA01_REG_RSVD4,
S2MPA01_REG_RSVD5,
S2MPA01_REG_RSVD6,
S2MPA01_REG_TOP_RSVD,
S2MPA01_REG_DVS_SEL,
S2MPA01_REG_DVS_PTR,
S2MPA01_REG_DVS_DATA,
S2MPA01_REG_RSVD_NO,
S2MPA01_REG_UVLO,
S2MPA01_REG_LEE_NO,
S2MPA01_REG_B1CTRL1,
S2MPA01_REG_B1CTRL2,
S2MPA01_REG_B2CTRL1,
S2MPA01_REG_B2CTRL2,
S2MPA01_REG_B3CTRL1,
S2MPA01_REG_B3CTRL2,
S2MPA01_REG_B4CTRL1,
S2MPA01_REG_B4CTRL2,
S2MPA01_REG_B5CTRL1,
S2MPA01_REG_B5CTRL2,
S2MPA01_REG_B5CTRL3,
S2MPA01_REG_B5CTRL4,
S2MPA01_REG_B5CTRL5,
S2MPA01_REG_B5CTRL6,
S2MPA01_REG_B6CTRL1,
S2MPA01_REG_B6CTRL2,
S2MPA01_REG_B7CTRL1,
S2MPA01_REG_B7CTRL2,
S2MPA01_REG_B8CTRL1,
S2MPA01_REG_B8CTRL2,
S2MPA01_REG_B9CTRL1,
S2MPA01_REG_B9CTRL2,
S2MPA01_REG_B10CTRL1,
S2MPA01_REG_B10CTRL2,
S2MPA01_REG_L1CTRL,
S2MPA01_REG_L2CTRL,
S2MPA01_REG_L3CTRL,
S2MPA01_REG_L4CTRL,
S2MPA01_REG_L5CTRL,
S2MPA01_REG_L6CTRL,
S2MPA01_REG_L7CTRL,
S2MPA01_REG_L8CTRL,
S2MPA01_REG_L9CTRL,
S2MPA01_REG_L10CTRL,
S2MPA01_REG_L11CTRL,
S2MPA01_REG_L12CTRL,
S2MPA01_REG_L13CTRL,
S2MPA01_REG_L14CTRL,
S2MPA01_REG_L15CTRL,
S2MPA01_REG_L16CTRL,
S2MPA01_REG_L17CTRL,
S2MPA01_REG_L18CTRL,
S2MPA01_REG_L19CTRL,
S2MPA01_REG_L20CTRL,
S2MPA01_REG_L21CTRL,
S2MPA01_REG_L22CTRL,
S2MPA01_REG_L23CTRL,
S2MPA01_REG_L24CTRL,
S2MPA01_REG_L25CTRL,
S2MPA01_REG_L26CTRL,
S2MPA01_REG_LDO_OVCB1,
S2MPA01_REG_LDO_OVCB2,
S2MPA01_REG_LDO_OVCB3,
S2MPA01_REG_LDO_OVCB4,
};
/* S2MPA01 regulator ids */
enum s2mpa01_regulators {
S2MPA01_LDO1,
S2MPA01_LDO2,
S2MPA01_LDO3,
S2MPA01_LDO4,
S2MPA01_LDO5,
S2MPA01_LDO6,
S2MPA01_LDO7,
S2MPA01_LDO8,
S2MPA01_LDO9,
S2MPA01_LDO10,
S2MPA01_LDO11,
S2MPA01_LDO12,
S2MPA01_LDO13,
S2MPA01_LDO14,
S2MPA01_LDO15,
S2MPA01_LDO16,
S2MPA01_LDO17,
S2MPA01_LDO18,
S2MPA01_LDO19,
S2MPA01_LDO20,
S2MPA01_LDO21,
S2MPA01_LDO22,
S2MPA01_LDO23,
S2MPA01_LDO24,
S2MPA01_LDO25,
S2MPA01_LDO26,
S2MPA01_BUCK1,
S2MPA01_BUCK2,
S2MPA01_BUCK3,
S2MPA01_BUCK4,
S2MPA01_BUCK5,
S2MPA01_BUCK6,
S2MPA01_BUCK7,
S2MPA01_BUCK8,
S2MPA01_BUCK9,
S2MPA01_BUCK10,
S2MPA01_REGULATOR_MAX,
};
#define S2MPA01_LDO_VSEL_MASK 0x3F
#define S2MPA01_BUCK_VSEL_MASK 0xFF
#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT)
#define S2MPA01_ENABLE_SHIFT 0x06
#define S2MPA01_LDO_N_VOLTAGES (S2MPA01_LDO_VSEL_MASK + 1)
#define S2MPA01_BUCK_N_VOLTAGES (S2MPA01_BUCK_VSEL_MASK + 1)
#define S2MPA01_RAMP_DELAY 12500 /* uV/us */
#define S2MPA01_BUCK16_RAMP_SHIFT 4
#define S2MPA01_BUCK24_RAMP_SHIFT 6
#define S2MPA01_BUCK3_RAMP_SHIFT 4
#define S2MPA01_BUCK5_RAMP_SHIFT 6
#define S2MPA01_BUCK7_RAMP_SHIFT 2
#define S2MPA01_BUCK8910_RAMP_SHIFT 0
#define S2MPA01_BUCK1_RAMP_EN_SHIFT 3
#define S2MPA01_BUCK2_RAMP_EN_SHIFT 2
#define S2MPA01_BUCK3_RAMP_EN_SHIFT 1
#define S2MPA01_BUCK4_RAMP_EN_SHIFT 0
#define S2MPA01_PMIC_EN_SHIFT 6
#endif /*__LINUX_MFD_S2MPA01_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* wm8850.dtsi - Device tree file for Wondermedia WM8850 SoC
*
* Copyright (C) 2012 Tony Prisk <[email protected]>
*/
/ {
#address-cells = <1>;
#size-cells = <1>;
compatible = "wm,wm8850";
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x0>;
};
};
memory {
device_type = "memory";
reg = <0x0 0x0>;
};
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
};
soc {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges;
interrupt-parent = <&intc0>;
intc0: interrupt-controller@d8140000 {
compatible = "via,vt8500-intc";
interrupt-controller;
reg = <0xd8140000 0x10000>;
#interrupt-cells = <1>;
};
/* Secondary IC cascaded to intc0 */
intc1: interrupt-controller@d8150000 {
compatible = "via,vt8500-intc";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0xD8150000 0x10000>;
interrupts = <56 57 58 59 60 61 62 63>;
};
pinctrl: pinctrl@d8110000 {
compatible = "wm,wm8850-pinctrl";
reg = <0xd8110000 0x10000>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
};
pmc@d8130000 {
compatible = "via,vt8500-pmc";
reg = <0xd8130000 0x1000>;
clocks {
#address-cells = <1>;
#size-cells = <0>;
ref25: ref25M {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <25000000>;
};
ref24: ref24M {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
plla: plla {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x200>;
};
pllb: pllb {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x204>;
};
pllc: pllc {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x208>;
};
plld: plld {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x20c>;
};
plle: plle {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x210>;
};
pllf: pllf {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x214>;
};
pllg: pllg {
#clock-cells = <0>;
compatible = "wm,wm8850-pll-clock";
clocks = <&ref24>;
reg = <0x218>;
};
clkarm: arm {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&plla>;
divisor-reg = <0x300>;
};
clkahb: ahb {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&pllb>;
divisor-reg = <0x304>;
};
clkapb: apb {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&pllb>;
divisor-reg = <0x320>;
};
clkddr: ddr {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&plld>;
divisor-reg = <0x310>;
};
clkuart0: uart0 {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&ref24>;
enable-reg = <0x254>;
enable-bit = <24>;
};
clkuart1: uart1 {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&ref24>;
enable-reg = <0x254>;
enable-bit = <25>;
};
clkuart2: uart2 {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&ref24>;
enable-reg = <0x254>;
enable-bit = <26>;
};
clkuart3: uart3 {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&ref24>;
enable-reg = <0x254>;
enable-bit = <27>;
};
clkpwm: pwm {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&pllb>;
divisor-reg = <0x350>;
enable-reg = <0x250>;
enable-bit = <17>;
};
clksdhc: sdhc {
#clock-cells = <0>;
compatible = "via,vt8500-device-clock";
clocks = <&pllb>;
divisor-reg = <0x330>;
divisor-mask = <0x3f>;
enable-reg = <0x250>;
enable-bit = <0>;
};
};
};
fb: fb@d8051700 {
compatible = "wm,wm8505-fb";
reg = <0xd8051700 0x200>;
};
ge_rops@d8050400 {
compatible = "wm,prizm-ge-rops";
reg = <0xd8050400 0x100>;
};
pwm: pwm@d8220000 {
#pwm-cells = <3>;
compatible = "via,vt8500-pwm";
reg = <0xd8220000 0x100>;
clocks = <&clkpwm>;
};
timer@d8130100 {
compatible = "via,vt8500-timer";
reg = <0xd8130100 0x28>;
interrupts = <36>;
};
ehci@d8007900 {
compatible = "via,vt8500-ehci";
reg = <0xd8007900 0x200>;
interrupts = <26>;
};
usb@d8007b00 {
compatible = "platform-uhci";
reg = <0xd8007b00 0x200>;
interrupts = <26>;
};
usb@d8008d00 {
compatible = "platform-uhci";
reg = <0xd8008d00 0x200>;
interrupts = <26>;
};
uart0: serial@d8200000 {
compatible = "via,vt8500-uart";
reg = <0xd8200000 0x1040>;
interrupts = <32>;
clocks = <&clkuart0>;
status = "disabled";
};
uart1: serial@d82b0000 {
compatible = "via,vt8500-uart";
reg = <0xd82b0000 0x1040>;
interrupts = <33>;
clocks = <&clkuart1>;
status = "disabled";
};
uart2: serial@d8210000 {
compatible = "via,vt8500-uart";
reg = <0xd8210000 0x1040>;
interrupts = <47>;
clocks = <&clkuart2>;
status = "disabled";
};
uart3: serial@d82c0000 {
compatible = "via,vt8500-uart";
reg = <0xd82c0000 0x1040>;
interrupts = <50>;
clocks = <&clkuart3>;
status = "disabled";
};
rtc@d8100000 {
compatible = "via,vt8500-rtc";
reg = <0xd8100000 0x10000>;
interrupts = <48>;
};
sdhc@d800a000 {
compatible = "wm,wm8505-sdhc";
reg = <0xd800a000 0x1000>;
interrupts = <20 21>;
clocks = <&clksdhc>;
bus-width = <4>;
sdon-inverted;
};
ethernet@d8004000 {
compatible = "via,vt8500-rhine";
reg = <0xd8004000 0x100>;
interrupts = <10>;
};
};
};
|
/*
* Linear conversion Plug-In
* Copyright (c) 2000 by Abramo Bagnara <[email protected]>
*
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "pcm_plugin.h"
static snd_pcm_sframes_t copy_transfer(struct snd_pcm_plugin *plugin,
const struct snd_pcm_plugin_channel *src_channels,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
unsigned int channel;
unsigned int nchannels;
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
if (frames == 0)
return 0;
nchannels = plugin->src_format.channels;
for (channel = 0; channel < nchannels; channel++) {
if (snd_BUG_ON(src_channels->area.first % 8 ||
src_channels->area.step % 8))
return -ENXIO;
if (snd_BUG_ON(dst_channels->area.first % 8 ||
dst_channels->area.step % 8))
return -ENXIO;
if (!src_channels->enabled) {
if (dst_channels->wanted)
snd_pcm_area_silence(&dst_channels->area, 0, frames, plugin->dst_format.format);
dst_channels->enabled = 0;
continue;
}
dst_channels->enabled = 1;
snd_pcm_area_copy(&src_channels->area, 0, &dst_channels->area, 0, frames, plugin->src_format.format);
src_channels++;
dst_channels++;
}
return frames;
}
int snd_pcm_plugin_build_copy(struct snd_pcm_substream *plug,
struct snd_pcm_plugin_format *src_format,
struct snd_pcm_plugin_format *dst_format,
struct snd_pcm_plugin **r_plugin)
{
int err;
struct snd_pcm_plugin *plugin;
int width;
if (snd_BUG_ON(!r_plugin))
return -ENXIO;
*r_plugin = NULL;
if (snd_BUG_ON(src_format->format != dst_format->format))
return -ENXIO;
if (snd_BUG_ON(src_format->rate != dst_format->rate))
return -ENXIO;
if (snd_BUG_ON(src_format->channels != dst_format->channels))
return -ENXIO;
width = snd_pcm_format_physical_width(src_format->format);
if (snd_BUG_ON(width <= 0))
return -ENXIO;
err = snd_pcm_plugin_build(plug, "copy", src_format, dst_format,
0, &plugin);
if (err < 0)
return err;
plugin->transfer = copy_transfer;
*r_plugin = plugin;
return 0;
}
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Endian 4i Edge 200 Board Description
* Note: Endian UTM Mini is hardware clone of Endian Edge 200
* Copyright 2021-2022 Pawel Dembicki <[email protected]>
*/
/dts-v1/;
#include "kirkwood.dtsi"
#include "kirkwood-6281.dtsi"
#include <dt-bindings/leds/common.h>
/ {
model = "Endian 4i Edge 200";
compatible = "endian,4i-edge-200", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
reg = <0x00000000 0x20000000>;
};
chosen {
bootargs = "console=ttyS0,115200n8";
stdout-path = &uart0;
};
leds {
compatible = "gpio-leds";
pinctrl-0 = <&pmx_led>;
pinctrl-names = "default";
led-1 {
function = LED_FUNCTION_SD;
color = <LED_COLOR_ID_AMBER>;
gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc0";
};
led-2 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_AMBER>;
gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
led-3 {
function = LED_FUNCTION_STATUS;
color = <LED_COLOR_ID_GREEN>;
gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
};
};
};
ð0 {
status = "okay";
};
ð0port {
speed = <1000>;
duplex = <1>;
};
ð1 {
status = "okay";
};
ð1port {
phy-handle = <ðphyb>;
};
&mdio {
status = "okay";
ethphyb: ethernet-phy@b {
reg = <0x0b>;
marvell,reg-init =
/* link-activity, bi-color mode 4 */
<3 0x10 0xfff0 0xf>; /* Reg 3,16 <- 0xzzzf */
};
switch0: switch@11 {
compatible = "marvell,mv88e6085";
reg = <0x11>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "port1";
};
port@1 {
reg = <1>;
label = "port2";
};
port@2 {
reg = <2>;
label = "port3";
};
port@3 {
reg = <3>;
label = "port4";
};
port@5 {
reg = <5>;
phy-mode = "rgmii-id";
ethernet = <ð0port>;
fixed-link {
speed = <1000>;
full-duplex;
};
};
};
};
};
&nand {
status = "okay";
pinctrl-0 = <&pmx_nand>;
pinctrl-names = "default";
partition@0 {
label = "u-boot";
reg = <0x00000000 0x000a0000>;
read-only;
};
partition@a0000 {
label = "u-boot-env";
reg = <0x000a0000 0x00060000>;
read-only;
};
partition@100000 {
label = "kernel";
reg = <0x00100000 0x00400000>;
};
partition@500000 {
label = "ubi";
reg = <0x00500000 0x1fb00000>;
};
};
&pciec {
status = "okay";
};
&pcie0 {
status = "okay";
};
&pinctrl {
pinctrl-0 = <&pmx_sysrst>;
pinctrl-names = "default";
pmx_sysrst: pmx-sysrst {
marvell,pins = "mpp6";
marvell,function = "sysrst";
};
pmx_sdio_cd: pmx-sdio-cd {
marvell,pins = "mpp28";
marvell,function = "gpio";
};
pmx_led: pmx-led {
marvell,pins = "mpp34", "mpp35", "mpp49";
marvell,function = "gpio";
};
};
&rtc {
status = "okay";
};
&sata_phy0 {
status = "disabled";
};
&sata_phy1 {
status = "disabled";
};
&sdio {
pinctrl-0 = <&pmx_sdio_cd>;
pinctrl-names = "default";
status = "okay";
cd-gpios = <&gpio0 28 9>;
};
&uart0 {
status = "okay";
};
&usb0 {
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Apple iPad Pro (9.7-inch) (Wi-Fi), J127, iPad6,3 (A1673)
* Copyright (c) 2022, Konrad Dybcio <[email protected]>
*/
/dts-v1/;
#include "s8001-pro.dtsi"
/ {
compatible = "apple,j127", "apple,s8001", "apple,arm-platform";
model = "Apple iPad Pro (9.7-inch) (Wi-Fi)";
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2008, Christoph Hellwig
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_acl.h"
#include "xfs_trans.h"
#include "xfs_xattr.h"
#include <linux/posix_acl_xattr.h>
/*
* Locking scheme:
* - all ACL updates are protected by inode->i_mutex, which is taken before
* calling into this file.
*/
STATIC struct posix_acl *
xfs_acl_from_disk(
struct xfs_mount *mp,
const struct xfs_acl *aclp,
int len,
int max_entries)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
const struct xfs_acl_entry *ace;
unsigned int count, i;
if (len < sizeof(*aclp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
len);
return ERR_PTR(-EFSCORRUPTED);
}
count = be32_to_cpu(aclp->acl_cnt);
if (count > max_entries || XFS_ACL_SIZE(count) != len) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
len);
return ERR_PTR(-EFSCORRUPTED);
}
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
acl_e->e_uid = make_kuid(&init_user_ns,
be32_to_cpu(ace->ae_id));
break;
case ACL_GROUP:
acl_e->e_gid = make_kgid(&init_user_ns,
be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
STATIC void
xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
{
const struct posix_acl_entry *acl_e;
struct xfs_acl_entry *ace;
int i;
aclp->acl_cnt = cpu_to_be32(acl->a_count);
for (i = 0; i < acl->a_count; i++) {
ace = &aclp->acl_entry[i];
acl_e = &acl->a_entries[i];
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
switch (acl_e->e_tag) {
case ACL_USER:
ace->ae_id = cpu_to_be32(
from_kuid(&init_user_ns, acl_e->e_uid));
break;
case ACL_GROUP:
ace->ae_id = cpu_to_be32(
from_kgid(&init_user_ns, acl_e->e_gid));
break;
default:
ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
break;
}
ace->ae_perm = cpu_to_be16(acl_e->e_perm);
}
}
struct posix_acl *
xfs_get_acl(struct inode *inode, int type, bool rcu)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
struct posix_acl *acl = NULL;
struct xfs_da_args args = {
.dp = ip,
.attr_filter = XFS_ATTR_ROOT,
.valuelen = XFS_ACL_MAX_SIZE(mp),
};
int error;
if (rcu)
return ERR_PTR(-ECHILD);
trace_xfs_get_acl(ip);
switch (type) {
case ACL_TYPE_ACCESS:
args.name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
args.name = SGI_ACL_DEFAULT;
break;
default:
BUG();
}
args.namelen = strlen(args.name);
/*
* If the attribute doesn't exist make sure we have a negative cache
* entry, for any other error assume it is transient.
*/
error = xfs_attr_get(&args);
if (!error) {
acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
XFS_ACL_MAX_ENTRIES(mp));
} else if (error != -ENOATTR) {
acl = ERR_PTR(error);
}
kvfree(args.value);
return acl;
}
int
__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_da_args args = {
.dp = ip,
.attr_filter = XFS_ATTR_ROOT,
};
int error;
switch (type) {
case ACL_TYPE_ACCESS:
args.name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
args.name = SGI_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
args.namelen = strlen(args.name);
if (acl) {
args.valuelen = XFS_ACL_SIZE(acl->a_count);
args.value = kvzalloc(args.valuelen, GFP_KERNEL);
if (!args.value)
return -ENOMEM;
xfs_acl_to_disk(args.value, acl);
error = xfs_attr_change(&args, XFS_ATTRUPDATE_UPSERT);
kvfree(args.value);
} else {
error = xfs_attr_change(&args, XFS_ATTRUPDATE_REMOVE);
/*
* If the attribute didn't exist to start with that's fine.
*/
if (error == -ENOATTR)
error = 0;
}
if (!error)
set_cached_acl(inode, type, acl);
return error;
}
static int
xfs_acl_set_mode(
struct inode *inode,
umode_t mode)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
inode->i_mode = mode;
inode_set_ctime_current(inode);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (xfs_has_wsync(mp))
xfs_trans_set_sync(tp);
return xfs_trans_commit(tp);
}
int
xfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type)
{
umode_t mode;
bool set_mode = false;
int error = 0;
struct inode *inode = d_inode(dentry);
if (!acl)
goto set_acl;
error = -E2BIG;
if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
return error;
if (type == ACL_TYPE_ACCESS) {
error = posix_acl_update_mode(idmap, inode, &mode, &acl);
if (error)
return error;
set_mode = true;
}
set_acl:
/*
* We set the mode after successfully updating the ACL xattr because the
* xattr update can fail at ENOSPC and we don't want to change the mode
* if the ACL update hasn't been applied.
*/
error = __xfs_set_acl(inode, acl, type);
if (!error && set_mode && mode != inode->i_mode)
error = xfs_acl_set_mode(inode, mode);
return error;
}
/*
* Invalidate any cached ACLs if the user has bypassed the ACL interface.
* We don't validate the content whatsoever so it is caller responsibility to
* provide data in valid format and ensure i_mode is consistent.
*/
void
xfs_forget_acl(
struct inode *inode,
const char *name)
{
if (!strcmp(name, SGI_ACL_FILE))
forget_cached_acl(inode, ACL_TYPE_ACCESS);
else if (!strcmp(name, SGI_ACL_DEFAULT))
forget_cached_acl(inode, ACL_TYPE_DEFAULT);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* SDM845 OnePlus 6(T) (enchilada / fajita) common device tree source
*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
/dts-v1/;
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include <dt-bindings/sound/qcom,q6afe.h>
#include <dt-bindings/sound/qcom,q6asm.h>
#include "sdm845.dtsi"
#include "sdm845-wcd9340.dtsi"
#include "pm8998.dtsi"
#include "pmi8998.dtsi"
/delete-node/ &rmtfs_mem;
/ {
aliases {
serial0 = &uart9;
serial1 = &uart6;
};
chosen {
stdout-path = "serial0:115200n8";
};
gpio-hall-sensor {
compatible = "gpio-keys";
label = "Hall effect sensor";
pinctrl-0 = <&hall_sensor_default>;
pinctrl-names = "default";
event-hall-sensor {
gpios = <&tlmm 124 GPIO_ACTIVE_LOW>;
label = "Hall Effect Sensor";
linux,input-type = <EV_SW>;
linux,code = <SW_LID>;
linux,can-disable;
wakeup-source;
};
};
gpio-keys {
compatible = "gpio-keys";
label = "Volume keys";
autorepeat;
pinctrl-names = "default";
pinctrl-0 = <&volume_down_gpio &volume_up_gpio>;
key-vol-down {
label = "Volume down";
linux,code = <KEY_VOLUMEDOWN>;
gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
debounce-interval = <15>;
};
key-vol-up {
label = "Volume up";
linux,code = <KEY_VOLUMEUP>;
gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
debounce-interval = <15>;
};
};
reserved-memory {
/*
* The rmtfs_mem needs to be guarded due to "XPU limitations"
* it is otherwise possible for an allocation adjacent to the
* rmtfs_mem region to trigger an XPU violation, causing a crash.
*/
rmtfs_lower_guard: rmtfs-lower-guard@f5b00000 {
no-map;
reg = <0 0xf5b00000 0 0x1000>;
};
/*
* The rmtfs memory region in downstream is 'dynamically allocated'
* but given the same address every time. Hard code it as this address is
* where the modem firmware expects it to be.
*/
rmtfs_mem: rmtfs-mem@f5b01000 {
compatible = "qcom,rmtfs-mem";
reg = <0 0xf5b01000 0 0x200000>;
no-map;
qcom,client-id = <1>;
qcom,vmid = <QCOM_SCM_VMID_MSS_MSA>;
};
rmtfs_upper_guard: rmtfs-upper-guard@f5d01000 {
no-map;
reg = <0 0xf5d01000 0 0x1000>;
};
/*
* It seems like reserving the old rmtfs_mem region is also needed to prevent
* random crashes which are most likely modem related, more testing needed.
*/
removed_region: removed-region@88f00000 {
no-map;
reg = <0 0x88f00000 0 0x1c00000>;
};
ramoops: ramoops@ac300000 {
compatible = "ramoops";
reg = <0 0xac300000 0 0x400000>;
record-size = <0x40000>;
console-size = <0x40000>;
ftrace-size = <0x40000>;
pmsg-size = <0x200000>;
ecc-size = <16>;
};
};
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
regulator-min-microvolt = <3700000>;
regulator-max-microvolt = <3700000>;
};
/*
* Apparently RPMh does not provide support for PM8998 S4 because it
* is always-on; model it as a fixed regulator.
*/
vreg_s4a_1p8: pm8998-smps4 {
compatible = "regulator-fixed";
regulator-name = "vreg_s4a_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
vin-supply = <&vph_pwr>;
};
/*
* The touchscreen regulator seems to be controlled somehow by a gpio.
* Model it as a fixed regulator and keep it on. Without schematics we
* don't know how this is actually wired up...
*/
ts_1p8_supply: ts-1p8-regulator {
compatible = "regulator-fixed";
regulator-name = "ts_1p8_supply";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
gpio = <&tlmm 88 0>;
enable-active-high;
regulator-boot-on;
};
};
&adsp_pas {
status = "okay";
firmware-name = "qcom/sdm845/oneplus6/adsp.mbn";
};
&apps_rsc {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
vdd-s1-supply = <&vph_pwr>;
vdd-s2-supply = <&vph_pwr>;
vdd-s3-supply = <&vph_pwr>;
vdd-s4-supply = <&vph_pwr>;
vdd-s5-supply = <&vph_pwr>;
vdd-s6-supply = <&vph_pwr>;
vdd-s7-supply = <&vph_pwr>;
vdd-s8-supply = <&vph_pwr>;
vdd-s9-supply = <&vph_pwr>;
vdd-s10-supply = <&vph_pwr>;
vdd-s11-supply = <&vph_pwr>;
vdd-s12-supply = <&vph_pwr>;
vdd-s13-supply = <&vph_pwr>;
vdd-l1-l27-supply = <&vreg_s7a_1p025>;
vdd-l2-l8-l17-supply = <&vreg_s3a_1p35>;
vdd-l3-l11-supply = <&vreg_s7a_1p025>;
vdd-l4-l5-supply = <&vreg_s7a_1p025>;
vdd-l6-supply = <&vph_pwr>;
vdd-l7-l12-l14-l15-supply = <&vreg_s5a_2p04>;
vdd-l9-supply = <&vreg_bob>;
vdd-l10-l23-l25-supply = <&vreg_bob>;
vdd-l13-l19-l21-supply = <&vreg_bob>;
vdd-l16-l28-supply = <&vreg_bob>;
vdd-l18-l22-supply = <&vreg_bob>;
vdd-l20-l24-supply = <&vreg_bob>;
vdd-l26-supply = <&vreg_s3a_1p35>;
vin-lvs-1-2-supply = <&vreg_s4a_1p8>;
vreg_s3a_1p35: smps3 {
regulator-min-microvolt = <1352000>;
regulator-max-microvolt = <1352000>;
};
vreg_s5a_2p04: smps5 {
regulator-min-microvolt = <1904000>;
regulator-max-microvolt = <2040000>;
};
vreg_s7a_1p025: smps7 {
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1028000>;
};
vdda_mipi_dsi0_pll:
vdda_qlink_lv:
vdda_ufs1_core:
vdda_usb1_ss_core:
vreg_l1a_0p875: ldo1 {
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <880000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2a_1p2: ldo2 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-always-on;
};
vreg_l5a_0p8: ldo5 {
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7a_1p8: ldo7 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vdda_qusb_hs0_1p8:
vreg_l12a_1p8: ldo12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l14a_1p88: ldo14 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
regulator-always-on;
};
vreg_l17a_1p3: ldo17 {
regulator-min-microvolt = <1304000>;
regulator-max-microvolt = <1304000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l20a_2p95: ldo20 {
regulator-min-microvolt = <2704000>;
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l23a_3p3: ldo23 {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3312000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vdda_qusb_hs0_3p1:
vreg_l24a_3p075: ldo24 {
regulator-min-microvolt = <3088000>;
regulator-max-microvolt = <3088000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l25a_3p3: ldo25 {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3312000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vdda_mipi_dsi0_1p2:
vdda_ufs1_1p2:
vreg_l26a_1p2: ldo26 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l28a_3p0: ldo28 {
regulator-min-microvolt = <2856000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
};
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
vdd-bob-supply = <&vph_pwr>;
vreg_bob: bob {
regulator-min-microvolt = <3312000>;
regulator-max-microvolt = <3600000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
regulator-allow-bypass;
};
};
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";
vdd-s1-supply = <&vph_pwr>;
vdd-s2-supply = <&vph_pwr>;
vdd-s3-supply = <&vph_pwr>;
vdd-s4-supply = <&vph_pwr>;
vreg_s3c_0p6: smps3 {
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <600000>;
};
};
};
&cdsp_pas {
status = "okay";
firmware-name = "qcom/sdm845/oneplus6/cdsp.mbn";
};
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
};
&gmu {
status = "okay";
};
&gpu {
status = "okay";
zap-shader {
memory-region = <&gpu_mem>;
firmware-name = "qcom/sdm845/oneplus6/a630_zap.mbn";
};
};
&i2c10 {
status = "okay";
clock-frequency = <100000>;
bq27441_fg: bq27441-battery@55 {
compatible = "ti,bq27411";
status = "okay";
reg = <0x55>;
};
};
&i2c12 {
status = "okay";
clock-frequency = <400000>;
synaptics-rmi4-i2c@20 {
compatible = "syna,rmi4-i2c";
reg = <0x20>;
#address-cells = <1>;
#size-cells = <0>;
interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>;
pinctrl-names = "default";
pinctrl-0 = <&ts_default_pins>;
vdd-supply = <&vreg_l28a_3p0>;
vio-supply = <&ts_1p8_supply>;
syna,reset-delay-ms = <200>;
syna,startup-delay-ms = <200>;
rmi4-f01@1 {
reg = <0x01>;
syna,nosleep-mode = <1>;
};
rmi4_f12: rmi4-f12@12 {
reg = <0x12>;
touchscreen-x-mm = <68>;
touchscreen-y-mm = <144>;
syna,sensor-type = <1>;
syna,rezero-wait-ms = <200>;
};
};
};
&ipa {
qcom,gsi-loader = "self";
memory-region = <&ipa_fw_mem>;
firmware-name = "qcom/sdm845/oneplus6/ipa_fws.mbn";
status = "okay";
};
&mdss {
status = "okay";
};
&mdss_dsi0 {
status = "okay";
vdda-supply = <&vdda_mipi_dsi0_1p2>;
/*
* Both devices use different panels but all other properties
* are common. Compatible line is declared in device dts.
*/
display_panel: panel@0 {
status = "disabled";
reg = <0>;
vddio-supply = <&vreg_l14a_1p88>;
reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&panel_reset_pins &panel_te_pin &panel_esd_pin>;
port {
panel_in: endpoint {
remote-endpoint = <&mdss_dsi0_out>;
};
};
};
};
&mdss_dsi0_out {
remote-endpoint = <&panel_in>;
data-lanes = <0 1 2 3>;
};
&mdss_dsi0_phy {
status = "okay";
vdds-supply = <&vdda_mipi_dsi0_pll>;
};
/* Modem/wifi */
&mss_pil {
status = "okay";
firmware-name = "qcom/sdm845/oneplus6/mba.mbn", "qcom/sdm845/oneplus6/modem.mbn";
};
&pm8998_gpios {
volume_down_gpio: pm8998-gpio5-state {
pinconf {
pins = "gpio5";
function = "normal";
input-enable;
bias-pull-up;
qcom,drive-strength = <0>;
};
};
volume_up_gpio: pm8998-gpio6-state {
pinconf {
pins = "gpio6";
function = "normal";
input-enable;
bias-pull-up;
qcom,drive-strength = <0>;
};
};
};
&pmi8998_charger {
status = "okay";
};
&pmi8998_flash {
status = "okay";
led-0 {
function = LED_FUNCTION_FLASH;
color = <LED_COLOR_ID_WHITE>;
led-sources = <1>;
led-max-microamp = <500000>;
flash-max-microamp = <1500000>;
flash-max-timeout-us = <1280000>;
};
led-1 {
function = LED_FUNCTION_FLASH;
color = <LED_COLOR_ID_YELLOW>;
led-sources = <2>;
led-max-microamp = <500000>;
flash-max-microamp = <1500000>;
flash-max-timeout-us = <1280000>;
};
};
&q6afedai {
dai@22 {
reg = <QUATERNARY_MI2S_RX>;
qcom,sd-lines = <1>;
};
dai@23 {
reg = <QUATERNARY_MI2S_TX>;
qcom,sd-lines = <0>;
};
};
&q6asmdai {
dai@0 {
reg = <0>;
};
dai@1 {
reg = <1>;
};
dai@2 {
reg = <2>;
};
dai@3 {
reg = <3>;
};
dai@4 {
reg = <4>;
};
dai@5 {
reg = <5>;
};
};
&qupv3_id_1 {
status = "okay";
};
&qupv3_id_0 {
status = "okay";
};
&qup_i2c10_default {
drive-strength = <2>;
bias-disable;
};
&qup_i2c12_default {
drive-strength = <2>;
bias-disable;
};
&qup_uart9_rx {
drive-strength = <2>;
bias-pull-up;
};
&qup_uart9_tx {
drive-strength = <2>;
bias-disable;
};
&slpi_pas {
firmware-name = "qcom/sdm845/oneplus6/slpi.mbn";
status = "okay";
};
&sound {
compatible = "qcom,sdm845-sndcard";
pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active &quat_mi2s_sd1_active>;
pinctrl-names = "default";
status = "okay";
mm1-dai-link {
link-name = "MultiMedia1";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
};
};
mm2-dai-link {
link-name = "MultiMedia2";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
};
};
mm3-dai-link {
link-name = "MultiMedia3";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
};
};
mm4-dai-link {
link-name = "MultiMedia4";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA4>;
};
};
mm5-dai-link {
link-name = "MultiMedia5";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA5>;
};
};
mm6-dai-link {
link-name = "MultiMedia6";
cpu {
sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA6>;
};
};
speaker_playback_dai: speaker-dai-link {
link-name = "Speaker Playback";
cpu {
sound-dai = <&q6afedai QUATERNARY_MI2S_RX>;
};
platform {
sound-dai = <&q6routing>;
};
};
slim-dai-link {
link-name = "SLIM Playback 1";
cpu {
sound-dai = <&q6afedai SLIMBUS_0_RX>;
};
platform {
sound-dai = <&q6routing>;
};
codec {
sound-dai = <&wcd9340 0>;
};
};
slimcap-dai-link {
link-name = "SLIM Capture 1";
cpu {
sound-dai = <&q6afedai SLIMBUS_0_TX>;
};
platform {
sound-dai = <&q6routing>;
};
codec {
sound-dai = <&wcd9340 1>;
};
};
slim2-dai-link {
link-name = "SLIM Playback 2";
cpu {
sound-dai = <&q6afedai SLIMBUS_1_RX>;
};
platform {
sound-dai = <&q6routing>;
};
codec {
sound-dai = <&wcd9340 2>; /* AIF2_PB */
};
};
slimcap2-dai-link {
link-name = "SLIM Capture 2";
cpu {
sound-dai = <&q6afedai SLIMBUS_1_TX>;
};
platform {
sound-dai = <&q6routing>;
};
codec {
sound-dai = <&wcd9340 3>; /* AIF2_CAP */
};
};
slimcap3-dai-link {
link-name = "SLIM Capture 3";
cpu {
sound-dai = <&q6afedai SLIMBUS_2_TX>;
};
platform {
sound-dai = <&q6routing>;
};
codec {
sound-dai = <&wcd9340 5>; /* AIF3_CAP */
};
};
};
&uart6 {
status = "okay";
pinctrl-0 = <&qup_uart6_4pin>;
bluetooth {
compatible = "qcom,wcn3990-bt";
/*
* This path is relative to the qca/
* subdir under lib/firmware.
*/
firmware-name = "oneplus6/crnv21.bin";
vddio-supply = <&vreg_s4a_1p8>;
vddxo-supply = <&vreg_l7a_1p8>;
vddrf-supply = <&vreg_l17a_1p3>;
vddch0-supply = <&vreg_l25a_3p3>;
max-speed = <3200000>;
};
};
&uart9 {
label = "LS-UART1";
status = "okay";
};
&ufs_mem_hc {
status = "okay";
reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
vcc-supply = <&vreg_l20a_2p95>;
vcc-max-microamp = <600000>;
};
&ufs_mem_phy {
status = "okay";
vdda-phy-supply = <&vdda_ufs1_core>;
vdda-pll-supply = <&vdda_ufs1_1p2>;
};
&usb_1 {
status = "okay";
/*
* disable USB3 clock requirement as the device only supports
* USB2.
*/
qcom,select-utmi-as-pipe-clk;
};
&usb_1_dwc3 {
/*
* We don't have the capability to switch modes yet.
*/
dr_mode = "peripheral";
/* fastest mode for USB 2 */
maximum-speed = "high-speed";
/* Remove USB3 phy as it's unused on this device. */
phys = <&usb_1_hsphy>;
phy-names = "usb2-phy";
};
&usb_1_hsphy {
status = "okay";
vdd-supply = <&vdda_usb1_ss_core>;
vdda-pll-supply = <&vdda_qusb_hs0_1p8>;
vdda-phy-dpdm-supply = <&vdda_qusb_hs0_3p1>;
qcom,imp-res-offset-value = <8>;
qcom,hstx-trim-value = <QUSB2_V2_HSTX_TRIM_21_6_MA>;
qcom,preemphasis-level = <QUSB2_V2_PREEMPHASIS_5_PERCENT>;
qcom,preemphasis-width = <QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT>;
};
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
hall_sensor_default: hall-sensor-default-state {
pins = "gpio124";
function = "gpio";
drive-strength = <2>;
bias-disable;
};
tri_state_key_default: tri-state-key-default-state {
pins = "gpio40", "gpio42", "gpio26";
function = "gpio";
drive-strength = <2>;
bias-disable;
};
ts_default_pins: ts-int-state {
pins = "gpio99", "gpio125";
function = "gpio";
drive-strength = <16>;
bias-pull-up;
};
panel_reset_pins: panel-reset-state {
pins = "gpio6", "gpio25", "gpio26";
function = "gpio";
drive-strength = <8>;
bias-disable;
};
panel_te_pin: panel-te-state {
pins = "gpio10";
function = "mdp_vsync";
drive-strength = <2>;
bias-disable;
};
panel_esd_pin: panel-esd-state {
pins = "gpio30";
function = "gpio";
drive-strength = <2>;
bias-pull-down;
};
speaker_default: speaker-default-state {
pins = "gpio69";
function = "gpio";
drive-strength = <16>;
bias-pull-up;
output-high;
};
};
&venus {
status = "okay";
firmware-name = "qcom/sdm845/oneplus6/venus.mbn";
};
&wcd9340 {
pinctrl-0 = <&wcd_intr_default>;
pinctrl-names = "default";
reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>;
vdd-buck-supply = <&vreg_s4a_1p8>;
vdd-buck-sido-supply = <&vreg_s4a_1p8>;
vdd-tx-supply = <&vreg_s4a_1p8>;
vdd-rx-supply = <&vreg_s4a_1p8>;
vdd-io-supply = <&vreg_s4a_1p8>;
};
&wifi {
status = "okay";
vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
vdd-3.3-ch1-supply = <&vreg_l23a_3p3>;
qcom,snoc-host-cap-8bit-quirk;
};
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <[email protected]>,
* Frank Pavlic <[email protected]>,
* Thomas Spatzier <[email protected]>,
* Frank Blaschka <[email protected]>
*/
#ifndef __QETH_L3_H__
#define __QETH_L3_H__
#include "qeth_core.h"
#include <linux/hashtable.h>
enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
QETH_IP_TYPE_RXIP,
};
struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
u8 is_multicast:1;
u8 disp_flag:2;
u8 ipato:1; /* ucast only */
/* is changed only for normal ip addresses
* for non-normal addresses it always is 1
*/
int ref_counter;
enum qeth_prot_versions proto;
union {
struct {
__be32 addr;
__be32 mask;
} a4;
struct {
struct in6_addr addr;
unsigned int pfxlen;
} a6;
} u;
};
static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr,
enum qeth_ip_types type,
enum qeth_prot_versions proto)
{
memset(addr, 0, sizeof(*addr));
addr->type = type;
addr->proto = proto;
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
addr->ref_counter = 1;
}
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
if (a1->proto != a2->proto)
return false;
if (a1->proto == QETH_PROT_IPV6)
return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
return a1->u.a4.addr == a2->u.a4.addr;
}
static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
* so 'proto' and 'addr' match for sure.
*
* For ucast:
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
return false;
if (a1->proto == QETH_PROT_IPV6)
return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
return a1->u.a4.mask == a2->u.a4.mask;
}
static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
if (addr->proto == QETH_PROT_IPV6)
return ipv6_addr_hash(&addr->u.a6.addr);
else
return ipv4_addr_hash(addr->u.a4.addr);
}
struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
unsigned int mask_bits;
};
extern const struct attribute_group *qeth_l3_attr_groups[];
int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
char *buf);
int qeth_l3_setrouting_v4(struct qeth_card *);
int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
unsigned int mask_bits);
void qeth_l3_update_ipato(struct qeth_card *card);
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_ip_types type,
enum qeth_prot_versions proto);
#endif /* __QETH_L3_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Aic94xx SAS/SATA driver sequencer interface.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*
* Parts of this code adapted from David Chaw's adp94xx_seq.c.
*/
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include "aic94xx_reg.h"
#include "aic94xx_hwi.h"
#include "aic94xx_seq.h"
#include "aic94xx_dump.h"
/* It takes no more than 0.05 us for an instruction
* to complete. So waiting for 1 us should be more than
* plenty.
*/
#define PAUSE_DELAY 1
#define PAUSE_TRIES 1000
static const struct firmware *sequencer_fw;
static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
cseq_idle_loop, lseq_idle_loop;
static const u8 *cseq_code, *lseq_code;
static u32 cseq_code_size, lseq_code_size;
static u16 first_scb_site_no = 0xFFFF;
static u16 last_scb_site_no;
/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
/**
* asd_pause_cseq - pause the central sequencer
* @asd_ha: pointer to host adapter structure
*
* Return 0 on success, negative on failure.
*/
static int asd_pause_cseq(struct asd_ha_struct *asd_ha)
{
int count = PAUSE_TRIES;
u32 arp2ctl;
arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
if (arp2ctl & PAUSED)
return 0;
asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
do {
arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
if (arp2ctl & PAUSED)
return 0;
udelay(PAUSE_DELAY);
} while (--count > 0);
ASD_DPRINTK("couldn't pause CSEQ\n");
return -1;
}
/**
* asd_unpause_cseq - unpause the central sequencer.
* @asd_ha: pointer to host adapter structure.
*
* Return 0 on success, negative on error.
*/
static int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
{
u32 arp2ctl;
int count = PAUSE_TRIES;
arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
if (!(arp2ctl & PAUSED))
return 0;
asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
do {
arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
if (!(arp2ctl & PAUSED))
return 0;
udelay(PAUSE_DELAY);
} while (--count > 0);
ASD_DPRINTK("couldn't unpause the CSEQ\n");
return -1;
}
/**
* asd_seq_pause_lseq - pause a link sequencer
* @asd_ha: pointer to a host adapter structure
* @lseq: link sequencer of interest
*
* Return 0 on success, negative on error.
*/
static int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
{
u32 arp2ctl;
int count = PAUSE_TRIES;
arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
if (arp2ctl & PAUSED)
return 0;
asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
do {
arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
if (arp2ctl & PAUSED)
return 0;
udelay(PAUSE_DELAY);
} while (--count > 0);
ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
return -1;
}
/**
* asd_pause_lseq - pause the link sequencer(s)
* @asd_ha: pointer to host adapter structure
* @lseq_mask: mask of link sequencers of interest
*
* Return 0 on success, negative on failure.
*/
static int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
{
int lseq;
int err = 0;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_seq_pause_lseq(asd_ha, lseq);
if (err)
return err;
}
return err;
}
/**
* asd_seq_unpause_lseq - unpause a link sequencer
* @asd_ha: pointer to host adapter structure
* @lseq: link sequencer of interest
*
* Return 0 on success, negative on error.
*/
static int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
{
u32 arp2ctl;
int count = PAUSE_TRIES;
arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
if (!(arp2ctl & PAUSED))
return 0;
asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
do {
arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
if (!(arp2ctl & PAUSED))
return 0;
udelay(PAUSE_DELAY);
} while (--count > 0);
ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
return 0;
}
/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
u32 size)
{
u32 addr = CSEQ_RAM_REG_BASE_ADR;
const u32 *prog = (u32 *) _prog;
u32 i;
for (i = 0; i < size; i += 4, prog++, addr += 4) {
u32 val = asd_read_reg_dword(asd_ha, addr);
if (le32_to_cpu(*prog) != val) {
asd_printk("%s: cseq verify failed at %u "
"read:0x%x, wanted:0x%x\n",
pci_name(asd_ha->pcidev),
i, val, le32_to_cpu(*prog));
return -1;
}
}
ASD_DPRINTK("verified %d bytes, passed\n", size);
return 0;
}
/**
* asd_verify_lseq - verify the microcode of a link sequencer
* @asd_ha: pointer to host adapter structure
* @_prog: pointer to the microcode
* @size: size of the microcode in bytes
* @lseq: link sequencer of interest
*
* The link sequencer code is accessed in 4 KB pages, which are selected
* by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
* The 10 KB LSEQm instruction code is mapped, page at a time, at
* LmSEQRAM address.
*/
static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
u32 size, int lseq)
{
#define LSEQ_CODEPAGE_SIZE 4096
int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
u32 page;
const u32 *prog = (u32 *) _prog;
for (page = 0; page < pages; page++) {
u32 i;
asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
page << LmRAMPAGE_LSHIFT);
for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
i += 4, prog++, size-=4) {
u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
if (le32_to_cpu(*prog) != val) {
asd_printk("%s: LSEQ%d verify failed "
"page:%d, offs:%d\n",
pci_name(asd_ha->pcidev),
lseq, page, i);
return -1;
}
}
}
ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
(int)((u8 *)prog-_prog));
return 0;
}
/**
* asd_verify_seq -- verify CSEQ/LSEQ microcode
* @asd_ha: pointer to host adapter structure
* @prog: pointer to microcode
* @size: size of the microcode
* @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
*
* Return 0 if microcode is correct, negative on mismatch.
*/
static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
u32 size, u8 lseq_mask)
{
if (lseq_mask == 0)
return asd_verify_cseq(asd_ha, prog, size);
else {
int lseq, err;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_verify_lseq(asd_ha, prog, size, lseq);
if (err)
return err;
}
}
return 0;
}
#define ASD_DMA_MODE_DOWNLOAD
#ifdef ASD_DMA_MODE_DOWNLOAD
/* This is the size of the CSEQ Mapped instruction page */
#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
static int asd_download_seq(struct asd_ha_struct *asd_ha,
const u8 * const prog, u32 size, u8 lseq_mask)
{
u32 comstaten;
u32 reg;
int page;
const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
struct asd_dma_tok *token;
int err = 0;
if (size % 4) {
asd_printk("sequencer program not multiple of 4\n");
return -1;
}
asd_pause_cseq(asd_ha);
asd_pause_lseq(asd_ha, 0xFF);
/* save, disable and clear interrupts */
comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
asd_write_reg_dword(asd_ha, COMSTATEN, 0);
asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
if (!token) {
asd_printk("out of memory for dma SEQ download\n");
err = -ENOMEM;
goto out;
}
ASD_DPRINTK("dma-ing %d bytes\n", size);
for (page = 0; page < pages; page++) {
int i;
u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
(u32)MAX_DMA_OVLY_COUNT);
memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
reg = !page ? RESETOVLYDMA : 0;
reg |= (STARTOVLYDMA | OVLYHALTERR);
reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
/* Start DMA. */
asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
for (i = PAUSE_TRIES*100; i > 0; i--) {
u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
if (!(dmadone & OVLYDMAACT))
break;
udelay(PAUSE_DELAY);
}
}
reg = asd_read_reg_dword(asd_ha, COMSTAT);
if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
|| (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
asd_printk("%s: error DMA-ing sequencer code\n",
pci_name(asd_ha->pcidev));
err = -ENODEV;
}
asd_free_coherent(asd_ha, token);
out:
asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
}
#else /* ASD_DMA_MODE_DOWNLOAD */
static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
u32 size, u8 lseq_mask)
{
int i;
u32 reg = 0;
const u32 *prog = (u32 *) _prog;
if (size % 4) {
asd_printk("sequencer program not multiple of 4\n");
return -1;
}
asd_pause_cseq(asd_ha);
asd_pause_lseq(asd_ha, 0xFF);
reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
reg |= PIOCMODE;
asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
for (i = 0; i < size; i += 4, prog++)
asd_write_reg_dword(asd_ha, SPIODATA, *prog);
reg = (reg & ~PIOCMODE) | OVLYHALTERR;
asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
}
#endif /* ASD_DMA_MODE_DOWNLOAD */
/**
* asd_seq_download_seqs - download the sequencer microcode
* @asd_ha: pointer to host adapter structure
*
* Download the central and link sequencer microcode.
*/
static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
{
int err;
if (!asd_ha->hw_prof.enabled_phys) {
asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
return -ENODEV;
}
/* Download the CSEQ */
ASD_DPRINTK("downloading CSEQ...\n");
err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
if (err) {
asd_printk("CSEQ download failed:%d\n", err);
return err;
}
/* Download the Link Sequencers code. All of the Link Sequencers
* microcode can be downloaded at the same time.
*/
ASD_DPRINTK("downloading LSEQs...\n");
err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
asd_ha->hw_prof.enabled_phys);
if (err) {
/* Try it one at a time */
u8 lseq;
u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_download_seq(asd_ha, lseq_code,
lseq_code_size, 1<<lseq);
if (err)
break;
}
}
if (err)
asd_printk("LSEQs download failed:%d\n", err);
return err;
}
/* ---------- Initializing the chip, chip memory, etc. ---------- */
/**
* asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
* @asd_ha: pointer to host adapter structure
*/
static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
{
/* CSEQ Mode Independent, page 4 setup. */
asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
{
u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
u8 val = hweight8(con);
asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
}
asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
/* CSEQ Mode independent, page 5 setup. */
asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
/* CSEQ Mode independent, page 6 setup. */
asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
/* Calculate the free scb mask. */
{
u16 cmdctx = asd_get_cmdctx_size(asd_ha);
cmdctx = (~((cmdctx/128)-1)) >> 8;
asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
}
asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
first_scb_site_no);
asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
last_scb_site_no);
asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
/* CSEQ Mode independent, page 7 setup. */
asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
}
/**
* asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
* @asd_ha: pointer to host adapter structure
*/
static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
{
int i;
int moffs;
moffs = CSEQ_PAGE_SIZE * 2;
/* CSEQ Mode dependent, modes 0-7, page 0 setup. */
for (i = 0; i < 8; i++) {
asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
}
/* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
/* CSEQ Mode dependent, mode 8, page 0 setup. */
asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
(u16)last_scb_site_no+1);
asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
(u16)asd_ha->hw_prof.max_ddbs);
/* CSEQ Mode dependent, mode 8, page 1 setup. */
asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
/* CSEQ Mode dependent, mode 8, page 2 setup. */
/* Tell the sequencer the bus address of the first SCB. */
asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
asd_ha->seq.next_scb.dma_handle);
ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
(unsigned long long)asd_ha->seq.next_scb.dma_handle);
/* Tell the sequencer the first Done List entry address. */
asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
asd_ha->seq.actual_dl->dma_handle);
/* Initialize the Q_DONE_POINTER with the least significant
* 4 bytes of the first Done List address. */
asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
/* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
}
/**
* asd_init_cseq_scratch -- setup and init CSEQ
* @asd_ha: pointer to host adapter structure
*
* Setup and initialize Central sequencers. Initialize the mode
* independent and dependent scratch page to the default settings.
*/
static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
{
asd_init_cseq_mip(asd_ha);
asd_init_cseq_mdp(asd_ha);
}
/**
* asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
* @asd_ha: pointer to host adapter structure
* @lseq: link sequencer
*/
static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
{
int i;
/* LSEQ Mode independent page 0 setup. */
asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
ASD_NOTIFY_ENABLE_SPINUP);
asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
/* LSEQ Mode independent page 1 setup. */
asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
/* LSEQ Mode Independent page 2 setup. */
asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
for (i = 0; i < 12; i += 4)
asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
/* LSEQ Mode Independent page 3 setup. */
/* Device present timer timeout */
asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
ASD_DEV_PRESENT_TIMEOUT);
/* SATA interlock timer disabled */
asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
ASD_SATA_INTERLOCK_TIMEOUT);
/* STP shutdown timer timeout constant, IGNORED by the sequencer,
* always 0. */
asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
ASD_STP_SHUTDOWN_TIMEOUT);
asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
ASD_SRST_ASSERT_TIMEOUT);
asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
ASD_RCV_FIS_TIMEOUT);
asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
ASD_ONE_MILLISEC_TIMEOUT);
/* COM_INIT timer */
asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
ASD_TEN_MILLISEC_TIMEOUT);
asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
ASD_SMP_RCV_TIMEOUT);
}
/**
* asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
* @asd_ha: pointer to host adapter structure
* @lseq: link sequencer
*/
static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
{
int i;
u32 moffs;
u16 ret_addr[] = {
0xFFFF, /* mode 0 */
0xFFFF, /* mode 1 */
mode2_task, /* mode 2 */
0,
0xFFFF, /* mode 4/5 */
0xFFFF, /* mode 4/5 */
};
/*
* Mode 0,1,2 and 4/5 have common field on page 0 for the first
* 14 bytes.
*/
for (i = 0; i < 3; i++) {
moffs = i * LSEQ_MODE_SCRATCH_SIZE;
asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
ret_addr[i]);
asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
}
/*
* Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
*/
asd_write_reg_word(asd_ha,
LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
ret_addr[5]);
asd_write_reg_word(asd_ha,
LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
asd_write_reg_word(asd_ha,
LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
asd_write_reg_word(asd_ha,
LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
asd_write_reg_word(asd_ha,
LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
asd_write_reg_byte(asd_ha,
LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
asd_write_reg_word(asd_ha,
LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
/* LSEQ Mode dependent 0, page 0 setup. */
asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
(u16)asd_ha->hw_prof.max_ddbs);
asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
(u16)last_scb_site_no+1);
asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
(u16) ((LmM0INTEN_MASK & 0xFFFF0000) >> 16));
asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
(u16) LmM0INTEN_MASK & 0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
/* LSEQ mode dependent, mode 1, page 0 setup. */
asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
/* LSEQ Mode dependent mode 2, page 0 setup */
asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
/* LSEQ Mode dependent, mode 4/5, page 0 setup. */
asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
/*
* Set the desired interval between transmissions of the NOTIFY
* (ENABLE SPINUP) primitive. Must be initialized to val - 1.
*/
asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
ASD_NOTIFY_TIMEOUT - 1);
/* No delay for the first NOTIFY to be sent to the attached target. */
asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
ASD_NOTIFY_DOWN_COUNT);
asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq),
ASD_NOTIFY_DOWN_COUNT);
/* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
for (i = 0; i < 2; i++) {
int j;
/* Start from Page 1 of Mode 0 and 1. */
moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
/* All the fields of page 1 can be initialized to 0. */
for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
}
/* LSEQ Mode dependent, mode 2, page 1 setup. */
asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
/* LSEQ Mode dependent, mode 4/5, page 1. */
for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
/* LSEQ Mode dependent, mode 0, page 2 setup. */
asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
/* LSEQ Mode Dependent 1, page 2 setup. */
asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
/* LSEQ Mode Dependent 2, page 2 setup. */
/* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
* i.e. always 0. */
asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
/* LSEQ Mode Dependent 4/5, page 2 setup. */
asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
}
/**
* asd_init_lseq_scratch -- setup and init link sequencers
* @asd_ha: pointer to host adapter struct
*/
static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
{
u8 lseq;
u8 lseq_mask;
lseq_mask = asd_ha->hw_prof.enabled_phys;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
asd_init_lseq_mip(asd_ha, lseq);
asd_init_lseq_mdp(asd_ha, lseq);
}
}
/**
* asd_init_scb_sites -- initialize sequencer SCB sites (memory).
* @asd_ha: pointer to host adapter structure
*
* This should be done before initializing common CSEQ and LSEQ
* scratch since those areas depend on some computed values here,
* last_scb_site_no, etc.
*/
static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
{
u16 site_no;
u16 max_scbs = 0;
for (site_no = asd_ha->hw_prof.max_scbs-1;
site_no != (u16) -1;
site_no--) {
u16 i;
/* Initialize all fields in the SCB site to 0. */
for (i = 0; i < ASD_SCB_SIZE; i += 4)
asd_scbsite_write_dword(asd_ha, site_no, i, 0);
/* Initialize SCB Site Opcode field to invalid. */
asd_scbsite_write_byte(asd_ha, site_no,
offsetof(struct scb_header, opcode),
0xFF);
/* Initialize SCB Site Flags field to mean a response
* frame has been received. This means inadvertent
* frames received to be dropped. */
asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
/* Workaround needed by SEQ to fix a SATA issue is to exclude
* certain SCB sites from the free list. */
if (!SCB_SITE_VALID(site_no))
continue;
if (last_scb_site_no == 0)
last_scb_site_no = site_no;
/* For every SCB site, we need to initialize the
* following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
* and SG Element Flag. */
/* Q_NEXT field of the last SCB is invalidated. */
asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
first_scb_site_no = site_no;
max_scbs++;
}
asd_ha->hw_prof.max_scbs = max_scbs;
ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
}
/**
* asd_init_cseq_cio - initialize CSEQ CIO registers
* @asd_ha: pointer to host adapter structure
*/
static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
{
int i;
asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
asd_ha->seq.scbpro = 0;
asd_write_reg_dword(asd_ha, SCBPRO, 0);
asd_write_reg_dword(asd_ha, CSEQCON, 0);
/* Initialize CSEQ Mode 11 Interrupt Vectors.
* The addresses are 16 bit wide and in dword units.
* The values of their macros are in byte units.
* Thus we have to divide by 4. */
asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
/* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
/* Initialize CSEQ Scratch Page to 0x04. */
asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
/* Initialize CSEQ Mode[0-8] Dependent registers. */
/* Initialize Scratch Page to 0. */
for (i = 0; i < 9; i++)
asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
/* Reset the ARP2 Program Count. */
asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
for (i = 0; i < 8; i++) {
/* Initialize Mode n Link m Interrupt Enable. */
asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
/* Initialize Mode n Request Mailbox. */
asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
}
}
/**
* asd_init_lseq_cio -- initialize LmSEQ CIO registers
* @asd_ha: pointer to host adapter structure
* @lseq: link sequencer
*/
static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
{
u8 *sas_addr;
int i;
/* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
/* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
for (i = 0; i < 3; i++)
asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
/* Initialize Mode 5 SCRATCHPAGE to 0. */
asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
/* Initialize Mode 0,1,2 and 5 Interrupt Enable and
* Interrupt registers. */
asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
/* Mode 1 */
asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
/* Mode 2 */
asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
/* Mode 5 */
asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
/* Enable HW Timer status. */
asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
/* Enable Primitive Status 0 and 1. */
asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
/* Enable Frame Error. */
asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
/* Initialize Mode 0 Transfer Level to 512. */
asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
/* Initialize Mode 1 Transfer Level to 256. */
asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
/* Initialize Program Count. */
asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
/* Enable Blind SG Move. */
asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
ASD_SATA_INTERLOCK_TIMEOUT);
(void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
/* Clear Primitive Status 0 and 1. */
asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
/* Clear HW Timer status. */
asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
/* Clear DMA Errors for Mode 0 and 1. */
asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
/* Clear SG DMA Errors for Mode 0 and 1. */
asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
/* Clear Mode 0 Buffer Parity Error. */
asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
/* Clear Mode 0 Frame Error register. */
asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
/* Reset LSEQ external interrupt arbiter. */
asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
/* Set the Phy SAS for the LmSEQ WWN. */
sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
for (i = 0; i < SAS_ADDR_SIZE; i++)
asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
/* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
/* Set the Bus Inactivity Time Limit Timer. */
asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
/* Enable SATA Port Multiplier. */
asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
/* Initialize Interrupt Vector[0-10] address in Mode 3.
* See the comment on CSEQ_INT_* */
asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
/*
* Program the Link LED control, applicable only for
* Chip Rev. B or later.
*/
asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
(LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
/* Set the Align Rate for SAS and STP mode. */
asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
}
/**
* asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
* @asd_ha: pointer to host adapter struct
*/
static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
{
int i;
for (i = 0; i < 8; i++)
asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
for (i = 0; i < 8; i++)
asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
/* Reset the external interrupt arbiter. */
asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
}
/**
* asd_init_ddb_0 -- initialize DDB 0
* @asd_ha: pointer to host adapter structure
*
* Initialize DDB site 0 which is used internally by the sequencer.
*/
static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
{
int i;
/* Zero out the DDB explicitly */
for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
asd_ddbsite_write_dword(asd_ha, 0, i, 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
asd_ha->hw_prof.max_ddbs-1);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
asd_ddbsite_write_word(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
asd_ha->hw_prof.num_phys * 2);
asd_ddbsite_write_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
asd_ddbsite_write_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
asd_ddbsite_write_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
/* DDB 0 is reserved */
set_bit(0, asd_ha->hw_prof.ddb_bitmap);
}
static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
{
unsigned int i;
unsigned int ddb_site;
for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
}
/**
* asd_seq_setup_seqs -- setup and initialize central and link sequencers
* @asd_ha: pointer to host adapter structure
*/
static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
{
int lseq;
u8 lseq_mask;
/* Initialize DDB sites */
asd_seq_init_ddb_sites(asd_ha);
/* Initialize SCB sites. Done first to compute some values which
* the rest of the init code depends on. */
asd_init_scb_sites(asd_ha);
/* Initialize CSEQ Scratch RAM registers. */
asd_init_cseq_scratch(asd_ha);
/* Initialize LmSEQ Scratch RAM registers. */
asd_init_lseq_scratch(asd_ha);
/* Initialize CSEQ CIO registers. */
asd_init_cseq_cio(asd_ha);
asd_init_ddb_0(asd_ha);
/* Initialize LmSEQ CIO registers. */
lseq_mask = asd_ha->hw_prof.enabled_phys;
for_each_sequencer(lseq_mask, lseq_mask, lseq)
asd_init_lseq_cio(asd_ha, lseq);
asd_post_init_cseq(asd_ha);
}
/**
* asd_seq_start_cseq -- start the central sequencer, CSEQ
* @asd_ha: pointer to host adapter structure
*/
static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
{
/* Reset the ARP2 instruction to location zero. */
asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
/* Unpause the CSEQ */
return asd_unpause_cseq(asd_ha);
}
/**
* asd_seq_start_lseq -- start a link sequencer
* @asd_ha: pointer to host adapter structure
* @lseq: the link sequencer of interest
*/
static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
{
/* Reset the ARP2 instruction to location zero. */
asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
/* Unpause the LmSEQ */
return asd_seq_unpause_lseq(asd_ha, lseq);
}
int asd_release_firmware(void)
{
release_firmware(sequencer_fw);
return 0;
}
static int asd_request_firmware(struct asd_ha_struct *asd_ha)
{
int err, i;
struct sequencer_file_header header;
const struct sequencer_file_header *hdr_ptr;
u32 csum = 0;
u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
if (sequencer_fw)
/* already loaded */
return 0;
err = request_firmware(&sequencer_fw,
SAS_RAZOR_SEQUENCER_FW_FILE,
&asd_ha->pcidev->dev);
if (err)
return err;
hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data;
header.csum = le32_to_cpu(hdr_ptr->csum);
header.major = le32_to_cpu(hdr_ptr->major);
header.minor = le32_to_cpu(hdr_ptr->minor);
header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
csum += sequencer_fw->data[i];
if (csum != header.csum) {
asd_printk("Firmware file checksum mismatch\n");
return -EINVAL;
}
if (header.cseq_table_size != CSEQ_NUM_VECS ||
header.lseq_table_size != LSEQ_NUM_VECS) {
asd_printk("Firmware file table size mismatch\n");
return -EINVAL;
}
asd_printk("Found sequencer Firmware version %d.%d (%s)\n",
header.major, header.minor, hdr_ptr->version);
if (header.major != SAS_RAZOR_SEQUENCER_FW_MAJOR) {
asd_printk("Firmware Major Version Mismatch;"
"driver requires version %d.X",
SAS_RAZOR_SEQUENCER_FW_MAJOR);
return -EINVAL;
}
ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
mode2_task = header.mode2_task;
cseq_idle_loop = header.cseq_idle_loop;
lseq_idle_loop = header.lseq_idle_loop;
for (i = 0; i < CSEQ_NUM_VECS; i++)
cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
for (i = 0; i < LSEQ_NUM_VECS; i++)
lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
cseq_code = &sequencer_fw->data[header.cseq_code_offset];
cseq_code_size = header.cseq_code_size;
lseq_code = &sequencer_fw->data[header.lseq_code_offset];
lseq_code_size = header.lseq_code_size;
return 0;
}
int asd_init_seqs(struct asd_ha_struct *asd_ha)
{
int err;
err = asd_request_firmware(asd_ha);
if (err) {
asd_printk("Failed to load sequencer firmware file %s, error %d\n",
SAS_RAZOR_SEQUENCER_FW_FILE, err);
return err;
}
err = asd_seq_download_seqs(asd_ha);
if (err) {
asd_printk("couldn't download sequencers for %s\n",
pci_name(asd_ha->pcidev));
return err;
}
asd_seq_setup_seqs(asd_ha);
return 0;
}
int asd_start_seqs(struct asd_ha_struct *asd_ha)
{
int err;
u8 lseq_mask;
int lseq;
err = asd_seq_start_cseq(asd_ha);
if (err) {
asd_printk("couldn't start CSEQ for %s\n",
pci_name(asd_ha->pcidev));
return err;
}
lseq_mask = asd_ha->hw_prof.enabled_phys;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_seq_start_lseq(asd_ha, lseq);
if (err) {
asd_printk("couldn't start LSEQ %d for %s\n", lseq,
pci_name(asd_ha->pcidev));
return err;
}
}
return 0;
}
/**
* asd_update_port_links -- update port_map_by_links and phy_is_up
* @asd_ha: pointer to host adapter structure
* @phy: pointer to the phy which has been added to a port
*
* 1) When a link reset has completed and we got BYTES DMAED with a
* valid frame we call this function for that phy, to indicate that
* the phy is up, i.e. we update the phy_is_up in DDB 0. The
* sequencer checks phy_is_up when pending SCBs are to be sent, and
* when an open address frame has been received.
*
* 2) When we know of ports, we call this function to update the map
* of phys participaing in that port, i.e. we update the
* port_map_by_links in DDB 0. When a HARD_RESET primitive has been
* received, the sequencer disables all phys in that port.
* port_map_by_links is also used as the conn_mask byte in the
* initiator/target port DDB.
*/
void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
{
const u8 phy_mask = (u8) phy->asd_port->phy_mask;
u8 phy_is_up;
u8 mask;
int i, err;
unsigned long flags;
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
for_each_phy(phy_mask, mask, i)
asd_ddbsite_write_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared,
port_map_by_links)+i,phy_mask);
for (i = 0; i < 12; i++) {
phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, phy_is_up));
err = asd_ddbsite_update_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, phy_is_up),
phy_is_up,
phy_is_up | phy_mask);
if (!err)
break;
else if (err == -EFAULT) {
asd_printk("phy_is_up: parity error in DDB 0\n");
break;
}
}
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
if (err)
asd_printk("couldn't update DDB 0:error:%d\n", err);
}
MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Red Hat GmbH
*
* This file is released under the GPL.
*
* Device-mapper target to emulate smaller logical block
* size on backing devices exposing (natively) larger ones.
*
* E.g. 512 byte sector emulation on 4K native disks.
*/
#include "dm.h"
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "ebs"
static void ebs_dtr(struct dm_target *ti);
/* Emulated block size context. */
struct ebs_c {
struct dm_dev *dev; /* Underlying device to emulate block size on. */
struct dm_bufio_client *bufio; /* Use dm-bufio for read and read-modify-write processing. */
struct workqueue_struct *wq; /* Workqueue for ^ processing of bios. */
struct work_struct ws; /* Work item used for ^. */
struct bio_list bios_in; /* Worker bios input list. */
spinlock_t lock; /* Guard bios input list above. */
sector_t start; /* <start> table line argument, see ebs_ctr below. */
unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
unsigned int u_bs; /* Underlying block size in sectors retrieved from/set on lower layer device. */
unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
};
static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
{
return sector >> ec->block_shift;
}
static inline sector_t __block_mod(sector_t sector, unsigned int bs)
{
return sector & (bs - 1);
}
/* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
{
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
}
static inline bool __ebs_check_bs(unsigned int bs)
{
return bs && is_power_of_2(bs);
}
/*
* READ/WRITE:
*
* copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
*/
static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
struct bvec_iter *iter)
{
int r = 0;
unsigned char *ba, *pa;
unsigned int cur_len;
unsigned int bv_len = bv->bv_len;
unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
sector_t block = __sector_to_block(ec, iter->bi_sector);
struct dm_buffer *b;
if (unlikely(!bv->bv_page || !bv_len))
return -EIO;
pa = bvec_virt(bv);
/* Handle overlapping page <-> blocks */
while (bv_len) {
cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
/* Avoid reading for writes in case bio vector's page overwrites block completely. */
if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
ba = dm_bufio_read(ec->bufio, block, &b);
else
ba = dm_bufio_new(ec->bufio, block, &b);
if (IS_ERR(ba)) {
/*
* Carry on with next buffer, if any, to issue all possible
* data but return error.
*/
r = PTR_ERR(ba);
} else {
/* Copy data to/from bio to buffer if read/new was successful above. */
ba += buf_off;
if (op == REQ_OP_READ) {
memcpy(pa, ba, cur_len);
flush_dcache_page(bv->bv_page);
} else {
flush_dcache_page(bv->bv_page);
memcpy(ba, pa, cur_len);
dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
}
dm_bufio_release(b);
}
pa += cur_len;
bv_len -= cur_len;
buf_off = 0;
block++;
}
return r;
}
/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
{
int r = 0, rr;
struct bio_vec bv;
struct bvec_iter iter;
bio_for_each_bvec(bv, bio, iter) {
rr = __ebs_rw_bvec(ec, op, &bv, &iter);
if (rr)
r = rr;
}
return r;
}
/*
* Discard bio's blocks, i.e. pass discards down.
*
* Avoid discarding partial blocks at beginning and end;
* return 0 in case no blocks can be discarded as a result.
*/
static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
{
sector_t block, blocks, sector = bio->bi_iter.bi_sector;
block = __sector_to_block(ec, sector);
blocks = __nr_blocks(ec, bio);
/*
* Partial first underlying block (__nr_blocks() may have
* resulted in one block).
*/
if (__block_mod(sector, ec->u_bs)) {
block++;
blocks--;
}
/* Partial last underlying block if any. */
if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
blocks--;
return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
}
/* Release blocks them from the bufio cache. */
static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
{
sector_t blocks, sector = bio->bi_iter.bi_sector;
blocks = __nr_blocks(ec, bio);
dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
}
/* Worker function to process incoming bios. */
static void __ebs_process_bios(struct work_struct *ws)
{
int r;
bool write = false;
sector_t block1, block2;
struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
struct bio *bio;
struct bio_list bios;
bio_list_init(&bios);
spin_lock_irq(&ec->lock);
bios = ec->bios_in;
bio_list_init(&ec->bios_in);
spin_unlock_irq(&ec->lock);
/* Prefetch all read and any mis-aligned write buffers */
bio_list_for_each(bio, &bios) {
block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
if (bio_op(bio) == REQ_OP_READ)
dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
block2 = __sector_to_block(ec, bio_end_sector(bio));
if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
dm_bufio_prefetch(ec->bufio, block1, 1);
if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
dm_bufio_prefetch(ec->bufio, block2, 1);
}
}
bio_list_for_each(bio, &bios) {
r = -EIO;
if (bio_op(bio) == REQ_OP_READ)
r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
else if (bio_op(bio) == REQ_OP_WRITE) {
write = true;
r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
} else if (bio_op(bio) == REQ_OP_DISCARD) {
__ebs_forget_bio(ec, bio);
r = __ebs_discard_bio(ec, bio);
}
if (r < 0)
bio->bi_status = errno_to_blk_status(r);
}
/*
* We write dirty buffers after processing I/O on them
* but before we endio thus addressing REQ_FUA/REQ_SYNC.
*/
r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
while ((bio = bio_list_pop(&bios))) {
/* Any other request is endioed. */
if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
bio_io_error(bio);
else
bio_endio(bio);
}
}
/*
* Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
*
* <dev_path>: path of the underlying device
* <offset>: offset in 512 bytes sectors into <dev_path>
* <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
* [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
* optional, if not supplied, retrieve logical block size from underlying device
*/
static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
unsigned short tmp1;
unsigned long long tmp;
char dummy;
struct ebs_c *ec;
if (argc < 3 || argc > 4) {
ti->error = "Invalid argument count";
return -EINVAL;
}
ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec) {
ti->error = "Cannot allocate ebs context";
return -ENOMEM;
}
r = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
tmp != (sector_t)tmp ||
(sector_t)tmp >= ti->len) {
ti->error = "Invalid device offset sector";
goto bad;
}
ec->start = tmp;
if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
!__ebs_check_bs(tmp1) ||
to_bytes(tmp1) > PAGE_SIZE) {
ti->error = "Invalid emulated block size";
goto bad;
}
ec->e_bs = tmp1;
if (argc > 3) {
if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
ti->error = "Invalid underlying block size";
goto bad;
}
ec->u_bs = tmp1;
ec->u_bs_set = true;
} else
ec->u_bs_set = false;
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
if (r) {
ti->error = "Device lookup failed";
ec->dev = NULL;
goto bad;
}
r = -EINVAL;
if (!ec->u_bs_set) {
ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
if (!__ebs_check_bs(ec->u_bs)) {
ti->error = "Invalid retrieved underlying block size";
goto bad;
}
}
if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
DMINFO("Emulation superfluous: emulated equal to underlying block size");
if (__block_mod(ec->start, ec->u_bs)) {
ti->error = "Device offset must be multiple of underlying block size";
goto bad;
}
ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
0, NULL, NULL, 0);
if (IS_ERR(ec->bufio)) {
ti->error = "Cannot create dm bufio client";
r = PTR_ERR(ec->bufio);
ec->bufio = NULL;
goto bad;
}
ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
if (!ec->wq) {
ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
r = -ENOMEM;
goto bad;
}
ec->block_shift = __ffs(ec->u_bs);
INIT_WORK(&ec->ws, &__ebs_process_bios);
bio_list_init(&ec->bios_in);
spin_lock_init(&ec->lock);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 0;
ti->num_write_zeroes_bios = 0;
return 0;
bad:
ebs_dtr(ti);
return r;
}
static void ebs_dtr(struct dm_target *ti)
{
struct ebs_c *ec = ti->private;
if (ec->wq)
destroy_workqueue(ec->wq);
if (ec->bufio)
dm_bufio_client_destroy(ec->bufio);
if (ec->dev)
dm_put_device(ti, ec->dev);
kfree(ec);
}
static int ebs_map(struct dm_target *ti, struct bio *bio)
{
struct ebs_c *ec = ti->private;
bio_set_dev(bio, ec->dev->bdev);
bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
if (unlikely(bio_op(bio) == REQ_OP_FLUSH))
return DM_MAPIO_REMAPPED;
/*
* Only queue for bufio processing in case of partial or overlapping buffers
* -or-
* emulation with ebs == ubs aiming for tests of dm-bufio overhead.
*/
if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
__block_mod(bio_end_sector(bio), ec->u_bs) ||
ec->e_bs == ec->u_bs)) {
spin_lock_irq(&ec->lock);
bio_list_add(&ec->bios_in, bio);
spin_unlock_irq(&ec->lock);
queue_work(ec->wq, &ec->ws);
return DM_MAPIO_SUBMITTED;
}
/* Forget any buffer content relative to this direct backing device I/O. */
__ebs_forget_bio(ec, bio);
return DM_MAPIO_REMAPPED;
}
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct ebs_c *ec = ti->private;
switch (type) {
case STATUSTYPE_INFO:
*result = '\0';
break;
case STATUSTYPE_TABLE:
snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
break;
case STATUSTYPE_IMA:
*result = '\0';
break;
}
}
static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct ebs_c *ec = ti->private;
struct dm_dev *dev = ec->dev;
/*
* Only pass ioctls through if the device sizes match exactly.
*/
*bdev = dev->bdev;
return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
}
static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct ebs_c *ec = ti->private;
limits->logical_block_size = to_bytes(ec->e_bs);
limits->physical_block_size = to_bytes(ec->u_bs);
limits->alignment_offset = limits->physical_block_size;
limits->io_min = limits->logical_block_size;
}
static int ebs_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct ebs_c *ec = ti->private;
return fn(ti, ec->dev, ec->start, ti->len, data);
}
static struct target_type ebs_target = {
.name = "ebs",
.version = {1, 0, 1},
.features = DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
.ctr = ebs_ctr,
.dtr = ebs_dtr,
.map = ebs_map,
.status = ebs_status,
.io_hints = ebs_io_hints,
.prepare_ioctl = ebs_prepare_ioctl,
.iterate_devices = ebs_iterate_devices,
};
module_dm(ebs);
MODULE_AUTHOR("Heinz Mauelshagen <[email protected]>");
MODULE_DESCRIPTION(DM_NAME " emulated block size target");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
struct snic_io_stats {
atomic64_t active; /* Active IOs */
atomic64_t max_active; /* Max # active IOs */
atomic64_t max_sgl; /* Max # SGLs for any IO */
atomic64_t max_time; /* Max time to process IO */
atomic64_t max_qtime; /* Max time to Queue the IO */
atomic64_t max_cmpl_time; /* Max time to complete the IO */
atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */
atomic64_t max_io_sz; /* Max IO Size */
atomic64_t compl; /* IO Completions */
atomic64_t fail; /* IO Failures */
atomic64_t req_null; /* req or req info is NULL */
atomic64_t alloc_fail; /* Alloc Failures */
atomic64_t sc_null;
atomic64_t io_not_found; /* IO Not Found */
atomic64_t num_ios; /* Number of IOs */
};
struct snic_abort_stats {
atomic64_t num; /* Abort counter */
atomic64_t fail; /* Abort Failure Counter */
atomic64_t drv_tmo; /* Abort Driver Timeouts */
atomic64_t fw_tmo; /* Abort Firmware Timeouts */
atomic64_t io_not_found;/* Abort IO Not Found */
atomic64_t q_fail; /* Abort Queuing Failed */
};
struct snic_reset_stats {
atomic64_t dev_resets; /* Device Reset Counter */
atomic64_t dev_reset_fail; /* Device Reset Failures */
atomic64_t dev_reset_aborts; /* Device Reset Aborts */
atomic64_t dev_reset_tmo; /* Device Reset Timeout */
atomic64_t dev_reset_terms; /* Device Reset terminate */
atomic64_t hba_resets; /* hba/firmware resets */
atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */
atomic64_t hba_reset_fail; /* hba/firmware failures */
atomic64_t snic_resets; /* snic resets */
atomic64_t snic_reset_compl; /* snic reset completions */
atomic64_t snic_reset_fail; /* snic reset failures */
};
struct snic_fw_stats {
atomic64_t actv_reqs; /* Active Requests */
atomic64_t max_actv_reqs; /* Max Active Requests */
atomic64_t out_of_res; /* Firmware Out Of Resources */
atomic64_t io_errs; /* Firmware IO Firmware Errors */
atomic64_t scsi_errs; /* Target hits check condition */
};
struct snic_misc_stats {
u64 last_isr_time;
u64 last_ack_time;
atomic64_t ack_isr_cnt;
atomic64_t cmpl_isr_cnt;
atomic64_t errnotify_isr_cnt;
atomic64_t max_cq_ents; /* Max CQ Entries */
atomic64_t data_cnt_mismat; /* Data Count Mismatch */
atomic64_t io_tmo;
atomic64_t io_aborted;
atomic64_t sgl_inval; /* SGL Invalid */
atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */
atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */
atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t io_under_run;
atomic64_t qfull;
atomic64_t qsz_rampup;
atomic64_t qsz_rampdown;
atomic64_t last_qsz;
atomic64_t tgt_not_rdy;
};
struct snic_stats {
struct snic_io_stats io;
struct snic_abort_stats abts;
struct snic_reset_stats reset;
struct snic_fw_stats fw;
struct snic_misc_stats misc;
atomic64_t io_cmpl_skip;
};
void snic_stats_debugfs_init(struct snic *);
void snic_stats_debugfs_remove(struct snic *);
/* Auxillary function to update active IO counter */
static inline void
snic_stats_update_active_ios(struct snic_stats *s_stats)
{
struct snic_io_stats *io = &s_stats->io;
int nr_active_ios;
nr_active_ios = atomic64_read(&io->active);
if (atomic64_read(&io->max_active) < nr_active_ios)
atomic64_set(&io->max_active, nr_active_ios);
atomic64_inc(&io->num_ios);
}
/* Auxillary function to update IO completion counter */
static inline void
snic_stats_update_io_cmpl(struct snic_stats *s_stats)
{
atomic64_dec(&s_stats->io.active);
if (unlikely(atomic64_read(&s_stats->io_cmpl_skip)))
atomic64_dec(&s_stats->io_cmpl_skip);
else
atomic64_inc(&s_stats->io.compl);
}
#endif /* __SNIC_STATS_H */
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_COM
#define ENA_COM
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/netdevice.h>
#include "ena_common_defs.h"
#include "ena_admin_defs.h"
#include "ena_eth_io_defs.h"
#include "ena_regs_defs.h"
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
#define ENA_MAX_HANDLERS 256
#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48
/* Unit in usec */
#define ENA_REG_READ_TIMEOUT 200000
#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry))
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
struct ena_llq_configurations {
enum ena_admin_llq_header_location llq_header_location;
enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
u16 llq_ring_entry_size_value;
};
enum queue_direction {
ENA_COM_IO_QUEUE_DIRECTION_TX,
ENA_COM_IO_QUEUE_DIRECTION_RX
};
struct ena_com_buf {
dma_addr_t paddr; /**< Buffer physical address */
u16 len; /**< Buffer length in bytes */
};
struct ena_com_rx_buf_info {
u16 len;
u16 req_id;
};
struct ena_com_io_desc_addr {
u8 __iomem *pbuf_dev_addr; /* LLQ address */
u8 *virt_addr;
dma_addr_t phys_addr;
};
struct ena_com_tx_meta {
u16 mss;
u16 l3_hdr_len;
u16 l3_hdr_offset;
u16 l4_hdr_len; /* In words */
};
struct ena_com_llq_info {
u16 header_location_ctrl;
u16 desc_stride_ctrl;
u16 desc_list_entry_size_ctrl;
u16 desc_list_entry_size;
u16 descs_num_before_header;
u16 descs_per_entry;
u16 max_entries_in_tx_burst;
bool disable_meta_caching;
};
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
/* holds the number of cdesc of the current packet */
u16 cur_rx_pkt_cdesc_count;
/* save the first cdesc idx of the current packet */
u16 cur_rx_pkt_cdesc_start_idx;
u16 q_depth;
/* Caller qid */
u16 qid;
/* Device queue index */
u16 idx;
u16 head;
u8 phase;
u8 cdesc_entry_size_in_bytes;
} ____cacheline_aligned;
struct ena_com_io_bounce_buffer_control {
u8 *base_buffer;
u16 next_to_use;
u16 buffer_size;
u16 buffers_num; /* Must be a power of 2 */
};
/* This struct is to keep tracking the current location of the next llq entry */
struct ena_com_llq_pkt_ctrl {
u8 *curr_bounce_buf;
u16 idx;
u16 descs_left_in_line;
};
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
bool disable_meta_caching;
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
struct ena_com_llq_info llq_info;
struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
u16 idx;
u16 tail;
u16 next_to_comp;
u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
u8 dma_addr_bits;
u16 entries_in_tx_burst_left;
} ____cacheline_aligned;
struct ena_com_admin_cq {
struct ena_admin_acq_entry *entries;
dma_addr_t dma_addr;
u16 head;
u8 phase;
};
struct ena_com_admin_sq {
struct ena_admin_aq_entry *entries;
dma_addr_t dma_addr;
u32 __iomem *db_addr;
u16 head;
u16 tail;
u8 phase;
};
struct ena_com_stats_admin {
u64 aborted_cmd;
u64 submitted_cmd;
u64 completed_cmd;
u64 out_of_space;
u64 no_completion;
};
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx;
u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
/* Indicate if the admin queue should poll for completion */
bool polling;
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
* process new admin commands
*/
bool running_state;
/* Count the number of outstanding admin commands */
atomic_t outstanding_cmds;
struct ena_com_stats_admin stats;
};
struct ena_aenq_handlers;
struct ena_com_aenq {
u16 head;
u8 phase;
struct ena_admin_aenq_entry *entries;
dma_addr_t dma_addr;
u16 q_depth;
struct ena_aenq_handlers *aenq_handlers;
};
struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
spinlock_t lock;
};
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
struct ena_admin_rss_ind_table_entry *rss_ind_tbl;
dma_addr_t rss_ind_tbl_dma_addr;
u16 tbl_log_size;
/* Hash key */
enum ena_admin_hash_functions hash_func;
struct ena_admin_feature_rss_flow_hash_control *hash_key;
dma_addr_t hash_key_dma_addr;
u32 hash_init_val;
/* Flow Control */
struct ena_admin_feature_rss_hash_control *hash_ctrl;
dma_addr_t hash_ctrl_dma_addr;
};
struct ena_customer_metrics {
/* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
* and ena_admin_customer_metrics_id
*/
u64 supported_metrics;
dma_addr_t buffer_dma_addr;
void *buffer_virt_addr;
u32 buffer_len;
};
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
dma_addr_t debug_area_dma_addr;
u32 debug_area_size;
/* Host information */
struct ena_admin_host_info *host_info;
dma_addr_t host_info_dma_addr;
};
/* Each ena_dev is a PCI function. */
struct ena_com_dev {
struct ena_com_admin_queue admin_queue;
struct ena_com_aenq aenq;
struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES];
struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES];
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
struct net_device *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
u16 stats_func; /* Selected function for extended statistic dump */
u16 stats_queue; /* Selected queue for extended statistic dump */
u32 ena_min_poll_delay_us;
struct ena_com_mmio_read mmio_read;
struct ena_rss rss;
u32 supported_features;
u32 capabilities;
u32 dma_addr_bits;
struct ena_host_attribute host_attr;
bool adaptive_coalescing;
u16 intr_delay_resolution;
/* interrupt moderation intervals are in usec divided by
* intr_delay_resolution, which is supplied by the device.
*/
u32 intr_moder_tx_interval;
u32 intr_moder_rx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
struct ena_admin_queue_feature_desc max_queues;
struct ena_admin_queue_ext_feature_desc max_queue_ext;
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
enum ena_admin_placement_policy_type mem_queue_type;
enum queue_direction direction;
int numa_node;
u32 msix_vector;
u16 queue_size;
u16 qid;
};
typedef void (*ena_aenq_handler)(void *data,
struct ena_admin_aenq_entry *aenq_e);
/* Holds aenq handlers. Indexed by AENQ event group */
struct ena_aenq_handlers {
ena_aenq_handler handlers[ENA_MAX_HANDLERS];
ena_aenq_handler unimplemented_handler;
};
/*****************************************************************************/
/*****************************************************************************/
/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism
* @ena_dev: ENA communication layer struct
*
* Initialize the register read mechanism.
*
* @note: This method must be the first stage in the initialization sequence.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
*/
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
bool readless_supported);
/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return
* value physical address.
* @ena_dev: ENA communication layer struct
*/
void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev);
/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism
* @ena_dev: ENA communication layer struct
*/
void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
/* ena_com_admin_init - Init the admin and the async queues
* @ena_dev: ENA communication layer struct
* @aenq_handlers: Those handlers to be called upon event.
*
* Initialize the admin submission and completion queues.
* Initialize the asynchronous events notification queues.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_admin_init(struct ena_com_dev *ena_dev,
struct ena_aenq_handlers *aenq_handlers);
/* ena_com_admin_destroy - Destroy the admin and the async events queues.
* @ena_dev: ENA communication layer struct
*
* @note: Before calling this method, the caller must validate that the device
* won't send any additional admin completions/aenq.
* To achieve that, a FLR is recommended.
*/
void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
* @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
* @ctx - create context structure
*
* Create the submission and the completion queues.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
struct ena_com_create_io_ctx *ctx);
/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
* @ena_dev: ENA communication layer struct
* @qid - the caller virtual queue id.
*/
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
/* ena_com_get_io_handlers - Return the io queue handlers
* @ena_dev: ENA communication layer struct
* @qid - the caller virtual queue id.
* @io_sq - IO submission queue handler
* @io_cq - IO completion queue handler.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_sq **io_sq,
struct ena_com_io_cq **io_cq);
/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications
* @ena_dev: ENA communication layer struct
*
* After this method, aenq event can be received via AENQ.
*/
void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev);
/* ena_com_set_admin_running_state - Set the state of the admin queue
* @ena_dev: ENA communication layer struct
*
* Change the state of the admin queue (enable/disable)
*/
void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state);
/* ena_com_get_admin_running_state - Get the admin queue state
* @ena_dev: ENA communication layer struct
*
* Retrieve the state of the admin queue (enable/disable)
*
* @return - current polling mode (enable/disable)
*/
bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode
* @ena_dev: ENA communication layer struct
* @polling: ENAble/Disable polling mode
*
* Set the admin completion mode.
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
* This method goes over the admin completion queue and wakes up all the pending
* threads that wait on the commands wait event.
*
* @note: Should be called after MSI-X interrupt.
*/
void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
/* ena_com_aenq_intr_handler - AENQ interrupt handler
* @ena_dev: ENA communication layer struct
*
* This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
*
* This method aborts all the outstanding admin commands.
* The caller should then call ena_com_wait_for_abort_completion to make sure
* all the commands were completed.
*/
void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
* @ena_dev: ENA communication layer struct
*
* This method waits until all the outstanding admin commands are completed.
*/
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
/* ena_com_validate_version - Validate the device parameters
* @ena_dev: ENA communication layer struct
*
* This method verifies the device parameters are the same as the saved
* parameters in ena_dev.
* This method is useful after device reset, to validate the device mac address
* and the device offloads are the same as before the reset.
*
* @return - 0 on success negative value otherwise.
*/
int ena_com_validate_version(struct ena_com_dev *ena_dev);
/* ena_com_get_link_params - Retrieve physical link parameters.
* @ena_dev: ENA communication layer struct
* @resp: Link parameters
*
* Retrieve the physical link parameters,
* like speed, auto-negotiation and full duplex support.
*
* @return - 0 on Success negative value otherwise.
*/
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp);
/* ena_com_get_dma_width - Retrieve physical dma address width the device
* supports.
* @ena_dev: ENA communication layer struct
*
* Retrieve the maximum physical address bits the device can handle.
*
* @return: > 0 on Success and negative value otherwise.
*/
int ena_com_get_dma_width(struct ena_com_dev *ena_dev);
/* ena_com_set_aenq_config - Set aenq groups configurations
* @ena_dev: ENA communication layer struct
* @groups flag: bit fields flags of enum ena_admin_aenq_group.
*
* Configure which aenq event group the driver would like to receive.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
/* ena_com_get_dev_attr_feat - Get device features
* @ena_dev: ENA communication layer struct
* @get_feat_ctx: returned context that contain the get features.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx);
/* ena_com_get_eni_stats - Get extended network interface statistics
* @ena_dev: ENA communication layer struct
* @stats: stats return value
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_admin_eni_stats *stats);
/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
* @ena_dev: ENA communication layer struct
* @info: ena srd stats and flags
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
struct ena_admin_ena_srd_info *info);
/* ena_com_get_customer_metrics - Get customer metrics for network interface
* @ena_dev: ENA communication layer struct
* @buffer: buffer for returned customer metrics
* @len: size of the buffer
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
* @log_size: indirection log size
*
* Allocate RSS/RFS resources.
* The caller then can configure rss using ena_com_set_hash_function,
* ena_com_set_hash_ctrl and ena_com_indirect_table_set.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
/* ena_com_rss_destroy - Destroy rss
* @ena_dev: ENA communication layer struct
*
* Free all the RSS/RFS resources.
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
/* ena_com_get_current_hash_function - Get RSS hash function
* @ena_dev: ENA communication layer struct
*
* Return the current hash function.
* @return: 0 or one of the ena_admin_hash_functions values.
*/
int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
* @key: Hash key (for toeplitz hash)
* @key_len: key length (max length 10 DW)
* @init_val: initial value for the hash function
*
* Fill the ena_dev resources with the desire hash function, hash key, key_len
* and key initial value (if needed by the hash function).
* To flush the key into the device the caller should call
* ena_com_set_hash_function.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions func,
const u8 *key, u16 key_len, u32 init_val);
/* ena_com_set_hash_function - Flush the hash function and it dependencies to
* the device.
* @ena_dev: ENA communication layer struct
*
* Flush the hash function and it dependencies (key, key length and
* initial value) if needed.
*
* @note: Prior to this method the caller should call ena_com_fill_hash_function
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
/* ena_com_get_hash_function - Retrieve the hash function from the device.
* @ena_dev: ENA communication layer struct
* @func: hash function
*
* Retrieve the hash function from the device.
*
* @note: If the caller called ena_com_fill_hash_function but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions *func);
/* ena_com_get_hash_key - Retrieve the hash key
* @ena_dev: ENA communication layer struct
* @key: hash key
*
* Retrieve the hash key.
*
* @note: If the caller called ena_com_fill_hash_key but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
/* ena_com_fill_hash_ctrl - Fill RSS hash control
* @ena_dev: ENA communication layer struct.
* @proto: The protocol to configure.
* @hash_fields: bit mask of ena_admin_flow_hash_fields
*
* Fill the ena_dev resources with the desire hash control (the ethernet
* fields that take part of the hash) for a specific protocol.
* To flush the hash control to the device, the caller should call
* ena_com_set_hash_ctrl.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
enum ena_admin_flow_hash_proto proto,
u16 hash_fields);
/* ena_com_set_hash_ctrl - Flush the hash control resources to the device.
* @ena_dev: ENA communication layer struct
*
* Flush the hash control (the ethernet fields that take part of the hash)
*
* @note: Prior to this method the caller should call ena_com_fill_hash_ctrl.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
/* ena_com_get_hash_ctrl - Retrieve the hash control from the device.
* @ena_dev: ENA communication layer struct
* @proto: The protocol to retrieve.
* @fields: bit mask of ena_admin_flow_hash_fields.
*
* Retrieve the hash control from the device.
*
* @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
enum ena_admin_flow_hash_proto proto,
u16 *fields);
/* ena_com_set_default_hash_ctrl - Set the hash control to a default
* configuration.
* @ena_dev: ENA communication layer struct
*
* Fill the ena_dev resources with the default hash control configuration.
* To flush the hash control to the device, the caller should call
* ena_com_set_hash_ctrl.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev);
/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS
* indirection table
* @ena_dev: ENA communication layer struct.
* @entry_idx - indirection table entry.
* @entry_value - redirection value
*
* Fill a single entry of the RSS indirection table in the ena_dev resources.
* To flush the indirection table to the device, the called should call
* ena_com_indirect_table_set.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
u16 entry_idx, u16 entry_value);
/* ena_com_indirect_table_set - Flush the indirection table to the device.
* @ena_dev: ENA communication layer struct
*
* Flush the indirection hash control to the device.
* Prior to this method the caller should call ena_com_indirect_table_fill_entry
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
/* ena_com_indirect_table_get - Retrieve the indirection table from the device.
* @ena_dev: ENA communication layer struct
* @ind_tbl: indirection table
*
* Retrieve the RSS indirection table from the device.
*
* @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl);
/* ena_com_allocate_host_info - Allocate host info resources.
* @ena_dev: ENA communication layer struct
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
/* ena_com_allocate_debug_area - Allocate debug area.
* @ena_dev: ENA communication layer struct
* @debug_area_size - debug area size.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
* @ena_dev: ENA communication layer struct
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
* Free the allocated debug area.
*/
void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
/* ena_com_delete_host_info - Free the host info resources.
* @ena_dev: ENA communication layer struct
*
* Free the allocated host info.
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
* @ena_dev: ENA communication layer struct
*
* Free the allocated customer metrics area.
*/
void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev);
/* ena_com_create_io_cq - Create io completion queue.
* @ena_dev: ENA communication layer struct
* @io_cq - io completion queue handler
* Create IO completion queue.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq);
/* ena_com_destroy_io_cq - Destroy io completion queue.
* @ena_dev: ENA communication layer struct
* @io_cq - io completion queue handler
* Destroy IO completion queue.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq);
/* ena_com_execute_admin_command - Execute admin command
* @admin_queue: admin queue.
* @cmd: the admin command to execute.
* @cmd_size: the command size.
* @cmd_completion: command completion return value.
* @cmd_comp_size: command completion size.
* Submit an admin command and then wait until the device returns a
* completion.
* The completion will be copied into cmd_comp.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
struct ena_admin_aq_entry *cmd,
size_t cmd_size,
struct ena_admin_acq_entry *cmd_comp,
size_t cmd_comp_size);
/* ena_com_init_interrupt_moderation - Init interrupt moderation
* @ena_dev: ENA communication layer struct
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
* capability is supported by the device.
*
* @return - supported or not.
*/
bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
* non-adaptive interval in Tx direction.
* @ena_dev: ENA communication layer struct
* @tx_coalesce_usecs: Interval in usec.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
u32 tx_coalesce_usecs);
/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
* non-adaptive interval in Rx direction.
* @ena_dev: ENA communication layer struct
* @rx_coalesce_usecs: Interval in usec.
*
* @return - 0 on success, negative value on failure.
*/
int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
u32 rx_coalesce_usecs);
/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
* non-adaptive interval in Tx direction.
* @ena_dev: ENA communication layer struct
*
* @return - interval in usec
*/
unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
* non-adaptive interval in Rx direction.
* @ena_dev: ENA communication layer struct
*
* @return - interval in usec
*/
unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
/* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via
* ena_com_get_dev_attr_feat.
* @ena_llq_config: The default driver LLQ parameters configurations
*/
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
{
return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
}
/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
{
return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
}
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
}
static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = true;
}
static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = false;
}
/* ena_com_get_cap - query whether device supports a capability.
* @ena_dev: ENA communication layer struct
* @cap_id: enum value representing the capability
*
* @return - true if capability is supported or false otherwise
*/
static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
enum ena_admin_aq_caps_id cap_id)
{
return !!(ena_dev->capabilities & BIT(cap_id));
}
/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
* @ena_dev: ENA communication layer struct
* @metric_id: enum value representing the customer metric
*
* @return - true if customer metric is supported or false otherwise
*/
static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
enum ena_admin_customer_metrics_id metric_id)
{
return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
}
/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
* @ena_dev: ENA communication layer struct
*
* @return - the number of supported customer metrics
*/
static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
{
return hweight64(ena_dev->customer_metrics.supported_metrics);
}
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
* @tx_delay_interval: Tx interval in usecs
* @unmask: unmask enable/disable
*
* Prepare interrupt update register with the supplied parameters.
*/
static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
u32 rx_delay_interval,
u32 tx_delay_interval,
bool unmask)
{
intr_reg->intr_control = 0;
intr_reg->intr_control |= rx_delay_interval &
ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
intr_reg->intr_control |=
(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
& ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
if (unmask)
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
{
u16 size, buffers_num;
u8 *buf;
size = bounce_buf_ctrl->buffer_size;
buffers_num = bounce_buf_ctrl->buffers_num;
buf = bounce_buf_ctrl->base_buffer +
(bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
prefetchw(bounce_buf_ctrl->base_buffer +
(bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
return buf;
}
#endif /* !(ENA_COM) */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright 2023 Logic PD, Inc dba Beacon EmbeddedWorks
*/
/dts-v1/;
#include <dt-bindings/usb/pd.h>
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include "imx8mp.dtsi"
#include "imx8mp-beacon-som.dtsi"
/ {
model = "Beacon EmbeddedWorks i.MX8MPlus Development kit";
compatible = "beacon,imx8mp-beacon-kit", "fsl,imx8mp";
aliases {
ethernet0 = &eqos;
ethernet1 = &fec;
};
chosen {
stdout-path = &uart2;
};
clk_xtal25: clock-xtal25 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <25000000>;
};
connector {
compatible = "usb-c-connector";
label = "USB-C";
data-role = "dual";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
hs_ep: endpoint {
remote-endpoint = <&usb3_hs_ep>;
};
};
port@1 {
reg = <1>;
ss_ep: endpoint {
remote-endpoint = <&hd3ss3220_in_ep>;
};
};
};
};
dmic_codec: dmic-codec {
compatible = "dmic-codec";
num-channels = <1>;
#sound-dai-cells = <0>;
};
gpio-keys {
compatible = "gpio-keys";
autorepeat;
button-0 {
label = "btn0";
linux,code = <BTN_0>;
gpios = <&pca6416_1 12 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
wakeup-source;
};
button-1 {
label = "btn1";
linux,code = <BTN_1>;
gpios = <&pca6416_1 13 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
wakeup-source;
};
button-2 {
label = "btn2";
linux,code = <BTN_2>;
gpios = <&pca6416_1 14 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
wakeup-source;
};
button-3 {
label = "btn3";
linux,code = <BTN_3>;
gpios = <&pca6416_1 15 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
wakeup-source;
};
};
bridge-connector {
compatible = "hdmi-connector";
type = "a";
port {
hdmi_con: endpoint {
remote-endpoint = <&adv7535_out>;
};
};
};
hdmi-connector {
compatible = "hdmi-connector";
type = "a";
port {
hdmi_connector: endpoint {
remote-endpoint = <&hdmi_to_connector>;
};
};
};
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_led3>;
led-0 {
label = "gen_led0";
gpios = <&pca6416_1 4 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
led-1 {
label = "gen_led1";
gpios = <&pca6416_1 5 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
led-2 {
label = "gen_led2";
gpios = <&pca6416_1 6 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
led-3 {
label = "heartbeat";
gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
};
reg_audio: regulator-wm8962 {
compatible = "regulator-fixed";
regulator-name = "3v3_aud";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&pca6416_1 11 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
reg_usdhc2_vmmc: regulator-usdhc2 {
compatible = "regulator-fixed";
regulator-name = "VSD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
startup-delay-us = <100>;
off-on-delay-us = <20000>;
};
reg_usb1_host_vbus: regulator-usb1-vbus {
compatible = "regulator-fixed";
regulator-name = "usb1_host_vbus";
regulator-max-microvolt = <5000000>;
regulator-min-microvolt = <5000000>;
gpio = <&pca6416_1 0 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
sound-adv7535 {
compatible = "simple-audio-card";
simple-audio-card,name = "sound-adv7535";
simple-audio-card,format = "i2s";
simple-audio-card,cpu {
sound-dai = <&sai5>;
system-clock-direction-out;
};
simple-audio-card,codec {
sound-dai = <&adv_bridge>;
};
};
sound-dmic {
compatible = "simple-audio-card";
simple-audio-card,name = "sound-pdm";
simple-audio-card,format = "i2s";
simple-audio-card,bitclock-master = <&dailink_master>;
simple-audio-card,frame-master = <&dailink_master>;
dailink_master: simple-audio-card,cpu {
sound-dai = <&micfil>;
};
simple-audio-card,codec {
sound-dai = <&dmic_codec>;
};
};
sound-wm8962 {
compatible = "simple-audio-card";
simple-audio-card,name = "wm8962";
simple-audio-card,format = "i2s";
simple-audio-card,widgets = "Headphone", "Headphones",
"Microphone", "Headset Mic",
"Speaker", "Speaker";
simple-audio-card,routing = "Headphones", "HPOUTL",
"Headphones", "HPOUTR",
"Speaker", "SPKOUTL",
"Speaker", "SPKOUTR",
"Headset Mic", "MICBIAS",
"IN3R", "Headset Mic";
simple-audio-card,cpu {
sound-dai = <&sai3>;
frame-master;
bitclock-master;
};
simple-audio-card,codec {
sound-dai = <&wm8962>;
};
};
};
&audio_blk_ctrl {
assigned-clocks = <&clk IMX8MP_AUDIO_PLL1>, <&clk IMX8MP_AUDIO_PLL2>;
assigned-clock-rates = <393216000>, <135475200>;
};
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi2>;
cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
tpm: tpm@0 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
reg = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm>;
reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>;
spi-max-frequency = <18500000>;
};
};
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec>;
phy-mode = "rgmii-id";
phy-handle = <ðphy1>;
fsl,magic-packet;
status = "okay";
mdio {
#address-cells = <1>;
#size-cells = <0>;
ethphy1: ethernet-phy@3 {
compatible = "ethernet-phy-id0022.1640",
"ethernet-phy-ieee802.3-c22";
reg = <3>;
reset-gpios = <&gpio4 18 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
reset-deassert-us = <150000>;
interrupt-parent = <&gpio4>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
};
};
};
&flexcan1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flexcan1>;
status = "okay";
};
&gpio2 {
usb-mux-hog {
gpio-hog;
gpios = <20 0>;
output-low;
line-name = "USB-C Mux En";
};
};
&hdmi_tx {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hdmi>;
status = "okay";
ports {
port@1 {
reg = <1>;
hdmi_to_connector:endpoint {
remote-endpoint = <&hdmi_connector>;
};
};
};
};
&hdmi_tx_phy {
status = "okay";
};
&i2c2 {
clock-frequency = <384000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
pca6416_3: gpio@20 {
compatible = "nxp,pcal6416";
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&gpio4>;
interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <2>;
};
adv_bridge: hdmi@3d {
compatible = "adi,adv7535";
reg = <0x3d>;
reg-names = "main";
interrupt-parent = <&gpio4>;
interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
#sound-dai-cells = <0>;
avdd-supply = <&buck5>;
dvdd-supply = <&buck5>;
pvdd-supply = <&buck5>;
a2vdd-supply = <&buck5>;
v1p2-supply = <&buck5>;
v3p3-supply = <&buck4>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
adv7535_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
port@1 {
reg = <1>;
adv7535_out: endpoint {
remote-endpoint = <&hdmi_con>;
};
};
};
};
pcieclk: clock-generator@68 {
compatible = "renesas,9fgv0241";
reg = <0x68>;
clocks = <&clk_xtal25>;
#clock-cells = <1>;
};
};
&hdmi_pvi {
status = "okay";
};
&i2c3 {
/* Connected to USB Hub */
usb-typec@52 {
compatible = "nxp,ptn5110", "tcpci";
reg = <0x52>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_typec>;
interrupt-parent = <&gpio4>;
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
connector {
compatible = "usb-c-connector";
label = "USB-C";
power-role = "source";
data-role = "host";
source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
};
};
};
&i2c4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
clock-frequency = <384000>;
status = "okay";
wm8962: audio-codec@1a {
compatible = "wlf,wm8962";
reg = <0x1a>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wm8962>;
clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>;
assigned-clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-rates = <22576000>;
DCVDD-supply = <®_audio>;
DBVDD-supply = <®_audio>;
AVDD-supply = <®_audio>;
CPVDD-supply = <®_audio>;
MICVDD-supply = <®_audio>;
PLLVDD-supply = <®_audio>;
SPKVDD1-supply = <®_audio>;
SPKVDD2-supply = <®_audio>;
gpio-cfg = <
0x0000 /* 0:Default */
0x0000 /* 1:Default */
0x0000 /* 2:FN_DMICCLK */
0x0000 /* 3:Default */
0x0000 /* 4:FN_DMICCDAT */
0x0000 /* 5:Default */
>;
#sound-dai-cells = <0>;
};
pca6416: gpio@20 {
compatible = "nxp,pcal6416";
reg = <0x20>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcal6414>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&gpio4>;
interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <2>;
};
pca6416_1: gpio@21 {
compatible = "nxp,pcal6416";
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
interrupt-parent = <&gpio4>;
interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
interrupt-controller;
#interrupt-cells = <2>;
usb-hub-hog {
gpio-hog;
gpios = <7 0>;
output-low;
line-name = "USB Hub Enable";
};
};
usb-typec@47 {
compatible = "ti,hd3ss3220";
reg = <0x47>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hd3ss3220>;
interrupt-parent = <&gpio4>;
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
hd3ss3220_in_ep: endpoint {
remote-endpoint = <&ss_ep>;
};
};
port@1 {
reg = <1>;
hd3ss3220_out_ep: endpoint {
remote-endpoint = <&usb3_role_switch>;
};
};
};
};
};
&lcdif1 {
status = "okay";
};
&lcdif3 {
status = "okay";
};
&micfil {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pdm>;
assigned-clocks = <&clk IMX8MP_CLK_PDM>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
assigned-clock-rates = <49152000>;
status = "okay";
};
&mipi_dsi {
samsung,esc-clock-frequency = <10000000>;
status = "okay";
ports {
port@1 {
reg = <1>;
dsi_out: endpoint {
remote-endpoint = <&adv7535_in>;
};
};
};
};
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
reset-gpio = <&gpio4 21 GPIO_ACTIVE_LOW>;
status = "okay";
};
&pcie_phy {
fsl,clkreq-unsupported;
fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
clocks = <&pcieclk 1>;
clock-names = "ref";
status = "okay";
};
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
assigned-clock-rates = <12288000>;
fsl,sai-mclk-direction-output;
status = "okay";
};
&sai5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai5>;
assigned-clocks = <&clk IMX8MP_CLK_SAI5>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
assigned-clock-rates = <12288000>;
fsl,sai-mclk-direction-output;
status = "okay";
};
&snvs_pwrkey {
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
assigned-clocks = <&clk IMX8MP_CLK_UART3>;
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_80M>;
uart-has-rtscts;
status = "okay";
};
&usb3_0 {
status = "okay";
};
&usb_dwc3_0 {
dr_mode = "otg";
hnp-disable;
srp-disable;
adp-disable;
usb-role-switch;
status = "okay";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
usb3_hs_ep: endpoint {
remote-endpoint = <&hs_ep>;
};
};
port@1 {
reg = <1>;
usb3_role_switch: endpoint {
remote-endpoint = <&hd3ss3220_out_ep>;
};
};
};
};
&usb3_phy0 {
vbus-supply = <®_usb1_host_vbus>;
status = "okay";
};
&usb3_1 {
status = "okay";
};
&usb_dwc3_1 {
dr_mode = "host";
status = "okay";
};
&usb3_phy1 {
status = "okay";
};
&usdhc2 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
vmmc-supply = <®_usdhc2_vmmc>;
bus-width = <4>;
status = "okay";
};
&iomuxc {
pinctrl_ecspi2: ecspi2grp {
fsl,pins = <
MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x82
MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x82
MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x82
MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x40000
>;
};
pinctrl_fec: fecgrp {
fsl,pins = <
MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x2
MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x2
MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x90
MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x90
MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x90
MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x90
MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x90
MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x90
MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x16
MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x16
MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x16
MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x16
MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x16
MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x16
MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x140
MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x10
>;
};
pinctrl_flexcan1: flexcan1grp {
fsl,pins = <
MX8MP_IOMUXC_SPDIF_RX__CAN1_RX 0x154
MX8MP_IOMUXC_SPDIF_TX__CAN1_TX 0x154
>;
};
pinctrl_hd3ss3220: hd3ss3220grp {
fsl,pins = <
MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x140
>;
};
pinctrl_hdmi: hdmigrp {
fsl,pins = <
MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x400001c2
MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x400001c2
MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x40000010
MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_HDMI_CEC 0x40000010
>;
};
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2
MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2
>;
};
pinctrl_i2c4: i2c4grp {
fsl,pins = <
MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c2
MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x400001c2
>;
};
pinctrl_led3: led3grp {
fsl,pins = <
MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x41
>;
};
pinctrl_pcal6414: pcal6414-gpiogrp {
fsl,pins = <
MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x10
>;
};
pinctrl_pcie: pciegrp {
fsl,pins = <
MX8MP_IOMUXC_GPIO1_IO05__GPIO1_IO05 0x10 /* PCIe_nDIS */
MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x10 /* PCIe_nRST */
>;
};
pinctrl_pdm: pdmgrp {
fsl,pins = <
MX8MP_IOMUXC_SAI5_RXC__AUDIOMIX_PDM_CLK 0xd6
MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_PDM_BIT_STREAM00 0xd6
>;
};
pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
fsl,pins = <
MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40
>;
};
pinctrl_sai3: sai3grp {
fsl,pins = <
MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC 0xd6
MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI3_TX_BCLK 0xd6
MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI3_RX_DATA00 0xd6
MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI3_TX_DATA00 0xd6
MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SAI3_MCLK 0xd6
>;
};
pinctrl_sai5: sai5grp {
fsl,pins = <
MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_TX_DATA00 0xd6
MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_TX_BCLK 0xd6
MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_TX_SYNC 0xd6
>;
};
pinctrl_tpm: tpmgrp {
fsl,pins = <
MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x19 /* Reset */
MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x1d6 /* IRQ */
>;
};
pinctrl_typec: typec1grp {
fsl,pins = <
MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0xc4
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140
MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x140
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
MX8MP_IOMUXC_ECSPI1_SCLK__UART3_DCE_RX 0x140
MX8MP_IOMUXC_ECSPI1_MOSI__UART3_DCE_TX 0x140
MX8MP_IOMUXC_ECSPI1_SS0__UART3_DCE_RTS 0x140
MX8MP_IOMUXC_ECSPI1_MISO__UART3_DCE_CTS 0x140
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
pinctrl_usdhc2_gpio: usdhc2gpiogrp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4
>;
};
pinctrl_wm8962: wm8962grp {
fsl,pins = <
MX8MP_IOMUXC_GPIO1_IO14__CCM_CLKO1 0x59
>;
};
};
|
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 37xx SoC xtal clocks
*
* Copyright (C) 2016 Marvell
*
* Gregory CLEMENT <[email protected]>
*
*/
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#define NB_GPIO1_LATCH 0x8
#define XTAL_MODE BIT(9)
static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const char *xtal_name = "xtal";
struct device_node *parent;
struct regmap *regmap;
struct clk_hw *xtal_hw;
unsigned int rate;
u32 reg;
int ret;
xtal_hw = devm_kzalloc(&pdev->dev, sizeof(*xtal_hw), GFP_KERNEL);
if (!xtal_hw)
return -ENOMEM;
platform_set_drvdata(pdev, xtal_hw);
parent = np->parent;
if (!parent) {
dev_err(&pdev->dev, "no parent\n");
return -ENODEV;
}
regmap = syscon_node_to_regmap(parent);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "cannot get regmap\n");
return PTR_ERR(regmap);
}
ret = regmap_read(regmap, NB_GPIO1_LATCH, ®);
if (ret) {
dev_err(&pdev->dev, "cannot read from regmap\n");
return ret;
}
if (reg & XTAL_MODE)
rate = 40000000;
else
rate = 25000000;
of_property_read_string_index(np, "clock-output-names", 0, &xtal_name);
xtal_hw = clk_hw_register_fixed_rate(NULL, xtal_name, NULL, 0, rate);
if (IS_ERR(xtal_hw))
return PTR_ERR(xtal_hw);
ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, xtal_hw);
return ret;
}
static void armada_3700_xtal_clock_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
}
static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
{ .compatible = "marvell,armada-3700-xtal-clock", },
{ }
};
static struct platform_driver armada_3700_xtal_clock_driver = {
.probe = armada_3700_xtal_clock_probe,
.remove = armada_3700_xtal_clock_remove,
.driver = {
.name = "marvell-armada-3700-xtal-clock",
.of_match_table = armada_3700_xtal_clock_of_match,
},
};
builtin_platform_driver(armada_3700_xtal_clock_driver);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SCSI Media Changer device driver for Linux 2.6
*
* (c) 1996-2003 Gerd Knorr <[email protected]>
*
*/
#define VERSION "0.25"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/compat.h>
#include <linux/chio.h> /* here are all the ioctls */
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#define CH_DT_MAX 16
#define CH_TYPES 8
#define CH_MAX_DEVS 128
MODULE_DESCRIPTION("device driver for scsi media changer devices");
MODULE_AUTHOR("Gerd Knorr <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
static int init = 1;
module_param(init, int, 0444);
MODULE_PARM_DESC(init, \
"initialize element status on driver load (default: on)");
static int timeout_move = 300;
module_param(timeout_move, int, 0644);
MODULE_PARM_DESC(timeout_move,"timeout for move commands "
"(default: 300 seconds)");
static int timeout_init = 3600;
module_param(timeout_init, int, 0644);
MODULE_PARM_DESC(timeout_init,"timeout for INITIALIZE ELEMENT STATUS "
"(default: 3600 seconds)");
static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
static int dt_id[CH_DT_MAX] = { [ 0 ... (CH_DT_MAX-1) ] = -1 };
static int dt_lun[CH_DT_MAX];
module_param_array(dt_id, int, NULL, 0444);
module_param_array(dt_lun, int, NULL, 0444);
/* tell the driver about vendor-specific slots */
static int vendor_firsts[CH_TYPES-4];
static int vendor_counts[CH_TYPES-4];
module_param_array(vendor_firsts, int, NULL, 0444);
module_param_array(vendor_counts, int, NULL, 0444);
static const char * vendor_labels[CH_TYPES-4] = {
"v0", "v1", "v2", "v3"
};
// module_param_string_array(vendor_labels, NULL, 0444);
#define ch_printk(prefix, ch, fmt, a...) \
sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a)
#define DPRINTK(fmt, arg...) \
do { \
if (debug) \
ch_printk(KERN_DEBUG, ch, fmt, ##arg); \
} while (0)
#define VPRINTK(level, fmt, arg...) \
do { \
if (verbose) \
ch_printk(level, ch, fmt, ##arg); \
} while (0)
/* ------------------------------------------------------------------- */
#define MAX_RETRIES 1
static const struct class ch_sysfs_class = {
.name = "scsi_changer",
};
typedef struct {
struct kref ref;
struct list_head list;
int minor;
char name[8];
struct scsi_device *device;
struct scsi_device **dt; /* ptrs to data transfer elements */
u_int firsts[CH_TYPES];
u_int counts[CH_TYPES];
u_int voltags;
struct mutex lock;
} scsi_changer;
static DEFINE_IDR(ch_index_idr);
static DEFINE_SPINLOCK(ch_index_lock);
static const struct {
unsigned char sense;
unsigned char asc;
unsigned char ascq;
int errno;
} ch_err[] = {
/* Just filled in what looks right. Hav'nt checked any standard paper for
these errno assignments, so they may be wrong... */
{
.sense = ILLEGAL_REQUEST,
.asc = 0x21,
.ascq = 0x01,
.errno = EBADSLT, /* Invalid element address */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x28,
.ascq = 0x01,
.errno = EBADE, /* Import or export element accessed */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x3B,
.ascq = 0x0D,
.errno = EXFULL, /* Medium destination element full */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x3B,
.ascq = 0x0E,
.errno = EBADE, /* Medium source element empty */
},{
.sense = ILLEGAL_REQUEST,
.asc = 0x20,
.ascq = 0x00,
.errno = EBADRQC, /* Invalid command operation code */
},{
/* end of list */
}
};
/* ------------------------------------------------------------------- */
static int ch_find_errno(struct scsi_sense_hdr *sshdr)
{
int i,errno = 0;
/* Check to see if additional sense information is available */
if (scsi_sense_valid(sshdr) &&
sshdr->asc != 0) {
for (i = 0; ch_err[i].errno != 0; i++) {
if (ch_err[i].sense == sshdr->sense_key &&
ch_err[i].asc == sshdr->asc &&
ch_err[i].ascq == sshdr->ascq) {
errno = -ch_err[i].errno;
break;
}
}
}
if (errno == 0)
errno = -EIO;
return errno;
}
static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
void *buffer, unsigned int buflength, enum req_op op)
{
int errno = 0, timeout, result;
struct scsi_sense_hdr sshdr;
struct scsi_failure failure_defs[] = {
{
.sense = UNIT_ATTENTION,
.asc = SCMD_FAILURE_ASC_ANY,
.ascq = SCMD_FAILURE_ASCQ_ANY,
.allowed = 3,
.result = SAM_STAT_CHECK_CONDITION,
},
{}
};
struct scsi_failures failures = {
.failure_definitions = failure_defs,
};
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
.failures = &failures,
};
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
timeout * HZ, MAX_RETRIES, &exec_args);
if (result < 0)
return result;
if (scsi_sense_valid(&sshdr)) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
}
return errno;
}
/* ------------------------------------------------------------------------ */
static int
ch_elem_to_typecode(scsi_changer *ch, u_int elem)
{
int i;
for (i = 0; i < CH_TYPES; i++) {
if (elem >= ch->firsts[i] &&
elem < ch->firsts[i] +
ch->counts[i])
return i+1;
}
return 0;
}
static int
ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
{
u_char cmd[12];
u_char *buffer;
int result;
buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
retry:
memset(cmd,0,sizeof(cmd));
cmd[0] = READ_ELEMENT_STATUS;
cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
cmd[5] = 1;
cmd[9] = 255;
if (0 == (result = ch_do_scsi(ch, cmd, 12,
buffer, 256, REQ_OP_DRV_IN))) {
if (((buffer[16] << 8) | buffer[17]) != elem) {
DPRINTK("asked for element 0x%02x, got 0x%02x\n",
elem,(buffer[16] << 8) | buffer[17]);
kfree(buffer);
return -EIO;
}
memcpy(data,buffer+16,16);
} else {
if (ch->voltags) {
ch->voltags = 0;
VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto retry;
}
DPRINTK("READ ELEMENT STATUS for element 0x%x failed\n",elem);
}
kfree(buffer);
return result;
}
static int
ch_init_elem(scsi_changer *ch)
{
int err;
u_char cmd[6];
VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n");
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
cmd[1] = (ch->device->lun & 0x7) << 5;
err = ch_do_scsi(ch, cmd, 6, NULL, 0, REQ_OP_DRV_IN);
VPRINTK(KERN_INFO, "... finished\n");
return err;
}
static int
ch_readconfig(scsi_changer *ch)
{
u_char cmd[10], data[16];
u_char *buffer;
int result,id,lun,i;
u_int elem;
buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memset(cmd,0,sizeof(cmd));
cmd[0] = MODE_SENSE;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = 0x1d;
cmd[4] = 255;
result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
if (0 != result) {
cmd[1] |= (1<<3);
result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
}
if (0 == result) {
ch->firsts[CHET_MT] =
(buffer[buffer[3]+ 6] << 8) | buffer[buffer[3]+ 7];
ch->counts[CHET_MT] =
(buffer[buffer[3]+ 8] << 8) | buffer[buffer[3]+ 9];
ch->firsts[CHET_ST] =
(buffer[buffer[3]+10] << 8) | buffer[buffer[3]+11];
ch->counts[CHET_ST] =
(buffer[buffer[3]+12] << 8) | buffer[buffer[3]+13];
ch->firsts[CHET_IE] =
(buffer[buffer[3]+14] << 8) | buffer[buffer[3]+15];
ch->counts[CHET_IE] =
(buffer[buffer[3]+16] << 8) | buffer[buffer[3]+17];
ch->firsts[CHET_DT] =
(buffer[buffer[3]+18] << 8) | buffer[buffer[3]+19];
ch->counts[CHET_DT] =
(buffer[buffer[3]+20] << 8) | buffer[buffer[3]+21];
VPRINTK(KERN_INFO, "type #1 (mt): 0x%x+%d [medium transport]\n",
ch->firsts[CHET_MT],
ch->counts[CHET_MT]);
VPRINTK(KERN_INFO, "type #2 (st): 0x%x+%d [storage]\n",
ch->firsts[CHET_ST],
ch->counts[CHET_ST]);
VPRINTK(KERN_INFO, "type #3 (ie): 0x%x+%d [import/export]\n",
ch->firsts[CHET_IE],
ch->counts[CHET_IE]);
VPRINTK(KERN_INFO, "type #4 (dt): 0x%x+%d [data transfer]\n",
ch->firsts[CHET_DT],
ch->counts[CHET_DT]);
} else {
VPRINTK(KERN_INFO, "reading element address assignment page failed!\n");
}
/* vendor specific element types */
for (i = 0; i < 4; i++) {
if (0 == vendor_counts[i])
continue;
if (NULL == vendor_labels[i])
continue;
ch->firsts[CHET_V1+i] = vendor_firsts[i];
ch->counts[CHET_V1+i] = vendor_counts[i];
VPRINTK(KERN_INFO, "type #%d (v%d): 0x%x+%d [%s, vendor specific]\n",
i+5,i+1,vendor_firsts[i],vendor_counts[i],
vendor_labels[i]);
}
/* look up the devices of the data transfer elements */
ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt),
GFP_KERNEL);
if (!ch->dt) {
kfree(buffer);
return -ENOMEM;
}
for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
id = -1;
lun = 0;
if (elem < CH_DT_MAX && -1 != dt_id[elem]) {
id = dt_id[elem];
lun = dt_lun[elem];
VPRINTK(KERN_INFO, "dt 0x%x: [insmod option] ",
elem+ch->firsts[CHET_DT]);
} else if (0 != ch_read_element_status
(ch,elem+ch->firsts[CHET_DT],data)) {
VPRINTK(KERN_INFO, "dt 0x%x: READ ELEMENT STATUS failed\n",
elem+ch->firsts[CHET_DT]);
} else {
VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]);
if (data[6] & 0x80) {
VPRINTK(KERN_CONT, "not this SCSI bus\n");
ch->dt[elem] = NULL;
} else if (0 == (data[6] & 0x30)) {
VPRINTK(KERN_CONT, "ID/LUN unknown\n");
ch->dt[elem] = NULL;
} else {
id = ch->device->id;
lun = 0;
if (data[6] & 0x20) id = data[7];
if (data[6] & 0x10) lun = data[6] & 7;
}
}
if (-1 != id) {
VPRINTK(KERN_CONT, "ID %i, LUN %i, ",id,lun);
ch->dt[elem] =
scsi_device_lookup(ch->device->host,
ch->device->channel,
id,lun);
if (!ch->dt[elem]) {
/* should not happen */
VPRINTK(KERN_CONT, "Huh? device not found!\n");
} else {
VPRINTK(KERN_CONT, "name: %8.8s %16.16s %4.4s\n",
ch->dt[elem]->vendor,
ch->dt[elem]->model,
ch->dt[elem]->rev);
}
}
}
ch->voltags = 1;
kfree(buffer);
return 0;
}
/* ------------------------------------------------------------------------ */
static int
ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
{
u_char cmd[10];
DPRINTK("position: 0x%x\n",elem);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = POSITION_TO_ELEMENT;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (elem >> 8) & 0xff;
cmd[5] = elem & 0xff;
cmd[8] = rotate ? 1 : 0;
return ch_do_scsi(ch, cmd, 10, NULL, 0, REQ_OP_DRV_IN);
}
static int
ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
{
u_char cmd[12];
DPRINTK("move: 0x%x => 0x%x\n",src,dest);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = MOVE_MEDIUM;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
cmd[5] = src & 0xff;
cmd[6] = (dest >> 8) & 0xff;
cmd[7] = dest & 0xff;
cmd[10] = rotate ? 1 : 0;
return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static int
ch_exchange(scsi_changer *ch, u_int trans, u_int src,
u_int dest1, u_int dest2, int rotate1, int rotate2)
{
u_char cmd[12];
DPRINTK("exchange: 0x%x => 0x%x => 0x%x\n",
src,dest1,dest2);
if (0 == trans)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = EXCHANGE_MEDIUM;
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
cmd[5] = src & 0xff;
cmd[6] = (dest1 >> 8) & 0xff;
cmd[7] = dest1 & 0xff;
cmd[8] = (dest2 >> 8) & 0xff;
cmd[9] = dest2 & 0xff;
cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static void
ch_check_voltag(char *tag)
{
int i;
for (i = 0; i < 32; i++) {
/* restrict to ascii */
if (tag[i] >= 0x7f || tag[i] < 0x20)
tag[i] = ' ';
/* don't allow search wildcards */
if (tag[i] == '?' ||
tag[i] == '*')
tag[i] = ' ';
}
}
static int
ch_set_voltag(scsi_changer *ch, u_int elem,
int alternate, int clear, u_char *tag)
{
u_char cmd[12];
u_char *buffer;
int result;
buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
DPRINTK("%s %s voltag: 0x%x => \"%s\"\n",
clear ? "clear" : "set",
alternate ? "alternate" : "primary",
elem, tag);
memset(cmd,0,sizeof(cmd));
cmd[0] = SEND_VOLUME_TAG;
cmd[1] = ((ch->device->lun & 0x7) << 5) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
cmd[5] = clear
? (alternate ? 0x0d : 0x0c)
: (alternate ? 0x0b : 0x0a);
cmd[9] = 255;
memcpy(buffer,tag,32);
ch_check_voltag(buffer);
result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_OUT);
kfree(buffer);
return result;
}
static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
{
int retval = 0;
u_char data[16];
unsigned int i;
mutex_lock(&ch->lock);
for (i = 0; i < ch->counts[type]; i++) {
if (0 != ch_read_element_status
(ch, ch->firsts[type]+i,data)) {
retval = -EIO;
break;
}
put_user(data[2], dest+i);
if (data[2] & CESTATUS_EXCEPT)
VPRINTK(KERN_INFO, "element 0x%x: asc=0x%x, ascq=0x%x\n",
ch->firsts[type]+i,
(int)data[4],(int)data[5]);
retval = ch_read_element_status
(ch, ch->firsts[type]+i,data);
if (0 != retval)
break;
}
mutex_unlock(&ch->lock);
return retval;
}
/* ------------------------------------------------------------------------ */
static void ch_destroy(struct kref *ref)
{
scsi_changer *ch = container_of(ref, scsi_changer, ref);
ch->device = NULL;
kfree(ch->dt);
kfree(ch);
}
static int
ch_release(struct inode *inode, struct file *file)
{
scsi_changer *ch = file->private_data;
scsi_device_put(ch->device);
file->private_data = NULL;
kref_put(&ch->ref, ch_destroy);
return 0;
}
static int
ch_open(struct inode *inode, struct file *file)
{
scsi_changer *ch;
int minor = iminor(inode);
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
if (ch == NULL || !kref_get_unless_zero(&ch->ref)) {
spin_unlock(&ch_index_lock);
return -ENXIO;
}
spin_unlock(&ch_index_lock);
if (scsi_device_get(ch->device)) {
kref_put(&ch->ref, ch_destroy);
return -ENXIO;
}
/* Synchronize with ch_probe() */
mutex_lock(&ch->lock);
file->private_data = ch;
mutex_unlock(&ch->lock);
return 0;
}
static int
ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
{
if (type >= CH_TYPES || unit >= ch->counts[type])
return -1;
return 0;
}
struct changer_element_status32 {
int ces_type;
compat_uptr_t ces_data;
};
#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32)
static long ch_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
scsi_changer *ch = file->private_data;
int retval;
void __user *argp = (void __user *)arg;
retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
file->f_flags & O_NDELAY);
if (retval)
return retval;
switch (cmd) {
case CHIOGPARAMS:
{
struct changer_params params;
params.cp_curpicker = 0;
params.cp_npickers = ch->counts[CHET_MT];
params.cp_nslots = ch->counts[CHET_ST];
params.cp_nportals = ch->counts[CHET_IE];
params.cp_ndrives = ch->counts[CHET_DT];
if (copy_to_user(argp, ¶ms, sizeof(params)))
return -EFAULT;
return 0;
}
case CHIOGVPARAMS:
{
struct changer_vendor_params vparams;
memset(&vparams,0,sizeof(vparams));
if (ch->counts[CHET_V1]) {
vparams.cvp_n1 = ch->counts[CHET_V1];
strscpy(vparams.cvp_label1, vendor_labels[0],
sizeof(vparams.cvp_label1));
}
if (ch->counts[CHET_V2]) {
vparams.cvp_n2 = ch->counts[CHET_V2];
strscpy(vparams.cvp_label2, vendor_labels[1],
sizeof(vparams.cvp_label2));
}
if (ch->counts[CHET_V3]) {
vparams.cvp_n3 = ch->counts[CHET_V3];
strscpy(vparams.cvp_label3, vendor_labels[2],
sizeof(vparams.cvp_label3));
}
if (ch->counts[CHET_V4]) {
vparams.cvp_n4 = ch->counts[CHET_V4];
strscpy(vparams.cvp_label4, vendor_labels[3],
sizeof(vparams.cvp_label4));
}
if (copy_to_user(argp, &vparams, sizeof(vparams)))
return -EFAULT;
return 0;
}
case CHIOPOSITION:
{
struct changer_position pos;
if (copy_from_user(&pos, argp, sizeof (pos)))
return -EFAULT;
if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
DPRINTK("CHIOPOSITION: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_position(ch,0,
ch->firsts[pos.cp_type] + pos.cp_unit,
pos.cp_flags & CP_INVERT);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOMOVE:
{
struct changer_move mv;
if (copy_from_user(&mv, argp, sizeof (mv)))
return -EFAULT;
if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) {
DPRINTK("CHIOMOVE: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_move(ch,0,
ch->firsts[mv.cm_fromtype] + mv.cm_fromunit,
ch->firsts[mv.cm_totype] + mv.cm_tounit,
mv.cm_flags & CM_INVERT);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOEXCHANGE:
{
struct changer_exchange mv;
if (copy_from_user(&mv, argp, sizeof (mv)))
return -EFAULT;
if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) ||
0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) ||
0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) {
DPRINTK("CHIOEXCHANGE: invalid parameter\n");
return -EBADSLT;
}
mutex_lock(&ch->lock);
retval = ch_exchange
(ch,0,
ch->firsts[mv.ce_srctype] + mv.ce_srcunit,
ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit,
ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit,
mv.ce_flags & CE_INVERT1, mv.ce_flags & CE_INVERT2);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOGSTATUS:
{
struct changer_element_status ces;
if (copy_from_user(&ces, argp, sizeof (ces)))
return -EFAULT;
if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
return -EINVAL;
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
}
#ifdef CONFIG_COMPAT
case CHIOGSTATUS32:
{
struct changer_element_status32 ces32;
if (copy_from_user(&ces32, argp, sizeof(ces32)))
return -EFAULT;
if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
return -EINVAL;
return ch_gstatus(ch, ces32.ces_type,
compat_ptr(ces32.ces_data));
}
#endif
case CHIOGELEM:
{
struct changer_get_element cge;
u_char ch_cmd[12];
u_char *buffer;
unsigned int elem;
int result,i;
if (copy_from_user(&cge, argp, sizeof (cge)))
return -EFAULT;
if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
voltag_retry:
memset(ch_cmd, 0, sizeof(ch_cmd));
ch_cmd[0] = READ_ELEMENT_STATUS;
ch_cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
ch_cmd[2] = (elem >> 8) & 0xff;
ch_cmd[3] = elem & 0xff;
ch_cmd[5] = 1;
ch_cmd[9] = 255;
result = ch_do_scsi(ch, ch_cmd, 12, buffer, 256, REQ_OP_DRV_IN);
if (!result) {
cge.cge_status = buffer[18];
cge.cge_flags = 0;
if (buffer[18] & CESTATUS_EXCEPT) {
cge.cge_errno = EIO;
}
if (buffer[25] & 0x80) {
cge.cge_flags |= CGE_SRC;
if (buffer[25] & 0x40)
cge.cge_flags |= CGE_INVERT;
elem = (buffer[26]<<8) | buffer[27];
for (i = 0; i < 4; i++) {
if (elem >= ch->firsts[i] &&
elem < ch->firsts[i] + ch->counts[i]) {
cge.cge_srctype = i;
cge.cge_srcunit = elem-ch->firsts[i];
}
}
}
if ((buffer[22] & 0x30) == 0x30) {
cge.cge_flags |= CGE_IDLUN;
cge.cge_id = buffer[23];
cge.cge_lun = buffer[22] & 7;
}
if (buffer[9] & 0x80) {
cge.cge_flags |= CGE_PVOLTAG;
memcpy(cge.cge_pvoltag,buffer+28,36);
}
if (buffer[9] & 0x40) {
cge.cge_flags |= CGE_AVOLTAG;
memcpy(cge.cge_avoltag,buffer+64,36);
}
} else if (ch->voltags) {
ch->voltags = 0;
VPRINTK(KERN_INFO, "device has no volume tag support\n");
goto voltag_retry;
}
kfree(buffer);
mutex_unlock(&ch->lock);
if (copy_to_user(argp, &cge, sizeof (cge)))
return -EFAULT;
return result;
}
case CHIOINITELEM:
{
mutex_lock(&ch->lock);
retval = ch_init_elem(ch);
mutex_unlock(&ch->lock);
return retval;
}
case CHIOSVOLTAG:
{
struct changer_set_voltag csv;
int elem;
if (copy_from_user(&csv, argp, sizeof(csv)))
return -EFAULT;
if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
DPRINTK("CHIOSVOLTAG: invalid parameter\n");
return -EBADSLT;
}
elem = ch->firsts[csv.csv_type] + csv.csv_unit;
mutex_lock(&ch->lock);
retval = ch_set_voltag(ch, elem,
csv.csv_flags & CSV_AVOLTAG,
csv.csv_flags & CSV_CLEARTAG,
csv.csv_voltag);
mutex_unlock(&ch->lock);
return retval;
}
default:
return scsi_ioctl(ch->device, file->f_mode & FMODE_WRITE, cmd,
argp);
}
}
/* ------------------------------------------------------------------------ */
static int ch_probe(struct device *dev)
{
struct scsi_device *sd = to_scsi_device(dev);
struct device *class_dev;
int ret;
scsi_changer *ch;
if (sd->type != TYPE_MEDIUM_CHANGER)
return -ENODEV;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (NULL == ch)
return -ENOMEM;
idr_preload(GFP_KERNEL);
spin_lock(&ch_index_lock);
ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
spin_unlock(&ch_index_lock);
idr_preload_end();
if (ret < 0) {
if (ret == -ENOSPC)
ret = -ENODEV;
goto free_ch;
}
ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
ret = scsi_device_get(sd);
if (ret) {
sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n",
ch->minor);
goto remove_idr;
}
mutex_init(&ch->lock);
kref_init(&ch->ref);
ch->device = sd;
class_dev = device_create(&ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
ch->minor);
ret = PTR_ERR(class_dev);
goto put_device;
}
mutex_lock(&ch->lock);
ret = ch_readconfig(ch);
if (ret) {
mutex_unlock(&ch->lock);
goto destroy_dev;
}
if (init)
ch_init_elem(ch);
mutex_unlock(&ch->lock);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
destroy_dev:
device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
put_device:
scsi_device_put(sd);
remove_idr:
idr_remove(&ch_index_idr, ch->minor);
free_ch:
kfree(ch);
return ret;
}
static int ch_remove(struct device *dev)
{
scsi_changer *ch = dev_get_drvdata(dev);
spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
device_destroy(&ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
}
static struct scsi_driver ch_template = {
.gendrv = {
.name = "ch",
.owner = THIS_MODULE,
.probe = ch_probe,
.remove = ch_remove,
},
};
static const struct file_operations changer_fops = {
.owner = THIS_MODULE,
.open = ch_open,
.release = ch_release,
.unlocked_ioctl = ch_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
static int __init init_ch_module(void)
{
int rc;
printk(KERN_INFO "SCSI Media Changer driver v" VERSION " \n");
rc = class_register(&ch_sysfs_class);
if (rc)
return rc;
rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops);
if (rc < 0) {
printk("Unable to get major %d for SCSI-Changer\n",
SCSI_CHANGER_MAJOR);
goto fail1;
}
rc = scsi_register_driver(&ch_template.gendrv);
if (rc < 0)
goto fail2;
return 0;
fail2:
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
fail1:
class_unregister(&ch_sysfs_class);
return rc;
}
static void __exit exit_ch_module(void)
{
scsi_unregister_driver(&ch_template.gendrv);
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
class_unregister(&ch_sysfs_class);
idr_destroy(&ch_index_idr);
}
module_init(init_ch_module);
module_exit(exit_ch_module);
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
*/
/*
* Driver debug definitions.
*/
/* #define QL_DEBUG */ /* DEBUG messages */
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_7 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
#if defined(QL_DEBUG)
#define DEBUG(x) do {x;} while (0);
#else
#define DEBUG(x) do {} while (0);
#endif
#if defined(QL_DEBUG_LEVEL_2)
#define DEBUG2(x) do {if(ql4xextended_error_logging == 2) x;} while (0);
#define DEBUG2_3(x) do {x;} while (0);
#else /* */
#define DEBUG2(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_3)
#define DEBUG3(x) do {if(ql4xextended_error_logging == 3) x;} while (0);
#else /* */
#define DEBUG3(x) do {} while (0);
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_3(x) do {} while (0);
#endif /* */
#endif /* */
#if defined(QL_DEBUG_LEVEL_4)
#define DEBUG4(x) do {x;} while (0);
#else /* */
#define DEBUG4(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_5)
#define DEBUG5(x) do {x;} while (0);
#else /* */
#define DEBUG5(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_7)
#define DEBUG7(x) do {x; } while (0)
#else /* */
#define DEBUG7(x) do {} while (0)
#endif /* */
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */
#define DEBUG9(x) do {} while (0);
#endif /* */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* adm1026.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2002, 2003 Philip Pokorny <[email protected]>
* Copyright (C) 2004 Justin Thiessen <[email protected]>
*
* Chip details at:
*
* <https://www.onsemi.com/PowerSolutions/product.do?id=ADM1026>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_inverted[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_normal[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_fan[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
module_param_array(gpio_input, int, NULL, 0);
MODULE_PARM_DESC(gpio_input, "List of GPIO pins (0-16) to program as inputs");
module_param_array(gpio_output, int, NULL, 0);
MODULE_PARM_DESC(gpio_output,
"List of GPIO pins (0-16) to program as outputs");
module_param_array(gpio_inverted, int, NULL, 0);
MODULE_PARM_DESC(gpio_inverted,
"List of GPIO pins (0-16) to program as inverted");
module_param_array(gpio_normal, int, NULL, 0);
MODULE_PARM_DESC(gpio_normal,
"List of GPIO pins (0-16) to program as normal/non-inverted");
module_param_array(gpio_fan, int, NULL, 0);
MODULE_PARM_DESC(gpio_fan, "List of GPIO pins (0-7) to program as fan tachs");
/* Many ADM1026 constants specified below */
/* The ADM1026 registers */
#define ADM1026_REG_CONFIG1 0x00
#define CFG1_MONITOR 0x01
#define CFG1_INT_ENABLE 0x02
#define CFG1_INT_CLEAR 0x04
#define CFG1_AIN8_9 0x08
#define CFG1_THERM_HOT 0x10
#define CFG1_DAC_AFC 0x20
#define CFG1_PWM_AFC 0x40
#define CFG1_RESET 0x80
#define ADM1026_REG_CONFIG2 0x01
/* CONFIG2 controls FAN0/GPIO0 through FAN7/GPIO7 */
#define ADM1026_REG_CONFIG3 0x07
#define CFG3_GPIO16_ENABLE 0x01
#define CFG3_CI_CLEAR 0x02
#define CFG3_VREF_250 0x04
#define CFG3_GPIO16_DIR 0x40
#define CFG3_GPIO16_POL 0x80
#define ADM1026_REG_E2CONFIG 0x13
#define E2CFG_READ 0x01
#define E2CFG_WRITE 0x02
#define E2CFG_ERASE 0x04
#define E2CFG_ROM 0x08
#define E2CFG_CLK_EXT 0x80
/*
* There are 10 general analog inputs and 7 dedicated inputs
* They are:
* 0 - 9 = AIN0 - AIN9
* 10 = Vbat
* 11 = 3.3V Standby
* 12 = 3.3V Main
* 13 = +5V
* 14 = Vccp (CPU core voltage)
* 15 = +12V
* 16 = -12V
*/
static u16 ADM1026_REG_IN[] = {
0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
0x36, 0x37, 0x27, 0x29, 0x26, 0x2a,
0x2b, 0x2c, 0x2d, 0x2e, 0x2f
};
static u16 ADM1026_REG_IN_MIN[] = {
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d,
0x5e, 0x5f, 0x6d, 0x49, 0x6b, 0x4a,
0x4b, 0x4c, 0x4d, 0x4e, 0x4f
};
static u16 ADM1026_REG_IN_MAX[] = {
0x50, 0x51, 0x52, 0x53, 0x54, 0x55,
0x56, 0x57, 0x6c, 0x41, 0x6a, 0x42,
0x43, 0x44, 0x45, 0x46, 0x47
};
/*
* Temperatures are:
* 0 - Internal
* 1 - External 1
* 2 - External 2
*/
static u16 ADM1026_REG_TEMP[] = { 0x1f, 0x28, 0x29 };
static u16 ADM1026_REG_TEMP_MIN[] = { 0x69, 0x48, 0x49 };
static u16 ADM1026_REG_TEMP_MAX[] = { 0x68, 0x40, 0x41 };
static u16 ADM1026_REG_TEMP_TMIN[] = { 0x10, 0x11, 0x12 };
static u16 ADM1026_REG_TEMP_THERM[] = { 0x0d, 0x0e, 0x0f };
static u16 ADM1026_REG_TEMP_OFFSET[] = { 0x1e, 0x6e, 0x6f };
#define ADM1026_REG_FAN(nr) (0x38 + (nr))
#define ADM1026_REG_FAN_MIN(nr) (0x60 + (nr))
#define ADM1026_REG_FAN_DIV_0_3 0x02
#define ADM1026_REG_FAN_DIV_4_7 0x03
#define ADM1026_REG_DAC 0x04
#define ADM1026_REG_PWM 0x05
#define ADM1026_REG_GPIO_CFG_0_3 0x08
#define ADM1026_REG_GPIO_CFG_4_7 0x09
#define ADM1026_REG_GPIO_CFG_8_11 0x0a
#define ADM1026_REG_GPIO_CFG_12_15 0x0b
/* CFG_16 in REG_CFG3 */
#define ADM1026_REG_GPIO_STATUS_0_7 0x24
#define ADM1026_REG_GPIO_STATUS_8_15 0x25
/* STATUS_16 in REG_STATUS4 */
#define ADM1026_REG_GPIO_MASK_0_7 0x1c
#define ADM1026_REG_GPIO_MASK_8_15 0x1d
/* MASK_16 in REG_MASK4 */
#define ADM1026_REG_COMPANY 0x16
#define ADM1026_REG_VERSTEP 0x17
/* These are the recognized values for the above regs */
#define ADM1026_COMPANY_ANALOG_DEV 0x41
#define ADM1026_VERSTEP_GENERIC 0x40
#define ADM1026_VERSTEP_ADM1026 0x44
#define ADM1026_REG_MASK1 0x18
#define ADM1026_REG_MASK2 0x19
#define ADM1026_REG_MASK3 0x1a
#define ADM1026_REG_MASK4 0x1b
#define ADM1026_REG_STATUS1 0x20
#define ADM1026_REG_STATUS2 0x21
#define ADM1026_REG_STATUS3 0x22
#define ADM1026_REG_STATUS4 0x23
#define ADM1026_FAN_ACTIVATION_TEMP_HYST -6
#define ADM1026_FAN_CONTROL_TEMP_RANGE 20
#define ADM1026_PWM_MAX 255
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants. Note that you should be a bit careful with which arguments
* these macros are called: arguments may be evaluated more than once.
*/
/*
* IN are scaled according to built-in resistors. These are the
* voltages corresponding to 3/4 of full scale (192 or 0xc0)
* NOTE: The -12V input needs an additional factor to account
* for the Vref pullup resistor.
* NEG12_OFFSET = SCALE * Vref / V-192 - Vref
* = 13875 * 2.50 / 1.875 - 2500
* = 16000
*
* The values in this table are based on Table II, page 15 of the
* datasheet.
*/
static int adm1026_scaling[] = { /* .001 Volts */
2250, 2250, 2250, 2250, 2250, 2250,
1875, 1875, 1875, 1875, 3000, 3330,
3330, 4995, 2250, 12000, 13875
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
#define INS_TO_REG(n, val) \
SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \
adm1026_scaling[n], 192)
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
/*
* FAN speed is measured using 22.5kHz clock and counts for 2 pulses
* and we assume a 2 pulse-per-rev fan tach signal
* 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000
*/
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \
clamp_val(1350000 / ((val) * (div)), \
1, 254))
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
1350000 / ((val) * (div)))
#define DIV_FROM_REG(val) (1 << (val))
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
#define TEMP_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
#define OFFSET_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
1000)
#define OFFSET_FROM_REG(val) ((val) * 1000)
#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
#define PWM_FROM_REG(val) (val)
#define PWM_MIN_TO_REG(val) ((val) & 0xf0)
#define PWM_MIN_FROM_REG(val) (((val) & 0xf0) + ((val) >> 4))
/*
* Analog output is a voltage, and scaled to millivolts. The datasheet
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
#define DAC_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, 0, 2500) * 255, \
2500)
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
* Chip sampling rates
*
* Some sensors are not updated more frequently than once per second
* so it doesn't make sense to read them more often than that.
* We cache the results and return the saved data if the driver
* is called again before a second has elapsed.
*
* Also, there is significant configuration data for this chip
* So, we keep the config data up to date in the cache
* when it is written and only sample it once every 5 *minutes*
*/
#define ADM1026_DATA_INTERVAL (1 * HZ)
#define ADM1026_CONFIG_INTERVAL (5 * 60 * HZ)
/*
* We allow for multiple chips in a single system.
*
* For each registered ADM1026, we need to keep state information
* at client->data. The adm1026_data structure is dynamically
* allocated, when a new client structure is allocated.
*/
struct pwm_data {
u8 pwm;
u8 enable;
u8 auto_pwm_min;
};
struct adm1026_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
bool valid; /* true if following fields are valid */
unsigned long last_reading; /* In jiffies */
unsigned long last_config; /* In jiffies */
u8 in[17]; /* Register value */
u8 in_max[17]; /* Register value */
u8 in_min[17]; /* Register value */
s8 temp[3]; /* Register value */
s8 temp_min[3]; /* Register value */
s8 temp_max[3]; /* Register value */
s8 temp_tmin[3]; /* Register value */
s8 temp_crit[3]; /* Register value */
s8 temp_offset[3]; /* Register value */
u8 fan[8]; /* Register value */
u8 fan_min[8]; /* Register value */
u8 fan_div[8]; /* Decoded value */
struct pwm_data pwm1; /* Pwm control values */
u8 vrm; /* VRM version */
u8 analog_out; /* Register value (DAC) */
long alarms; /* Register encoding, combined */
long alarm_mask; /* Register encoding, combined */
long gpio; /* Register encoding, combined */
long gpio_mask; /* Register encoding, combined */
u8 gpio_config[17]; /* Decoded value */
u8 config1; /* Register value */
u8 config2; /* Register value */
u8 config3; /* Register value */
};
static int adm1026_read_value(struct i2c_client *client, u8 reg)
{
int res;
if (reg < 0x80) {
/* "RAM" locations */
res = i2c_smbus_read_byte_data(client, reg) & 0xff;
} else {
/* EEPROM, do nothing */
res = 0;
}
return res;
}
static int adm1026_write_value(struct i2c_client *client, u8 reg, int value)
{
int res;
if (reg < 0x80) {
/* "RAM" locations */
res = i2c_smbus_write_byte_data(client, reg, value);
} else {
/* EEPROM, do nothing */
res = 0;
}
return res;
}
static struct adm1026_data *adm1026_update_device(struct device *dev)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int i;
long value, alarms, gpio;
mutex_lock(&data->update_lock);
if (!data->valid
|| time_after(jiffies,
data->last_reading + ADM1026_DATA_INTERVAL)) {
/* Things that change quickly */
dev_dbg(&client->dev, "Reading sensor values\n");
for (i = 0; i <= 16; ++i) {
data->in[i] =
adm1026_read_value(client, ADM1026_REG_IN[i]);
}
for (i = 0; i <= 7; ++i) {
data->fan[i] =
adm1026_read_value(client, ADM1026_REG_FAN(i));
}
for (i = 0; i <= 2; ++i) {
/*
* NOTE: temp[] is s8 and we assume 2's complement
* "conversion" in the assignment
*/
data->temp[i] =
adm1026_read_value(client, ADM1026_REG_TEMP[i]);
}
data->pwm1.pwm = adm1026_read_value(client,
ADM1026_REG_PWM);
data->analog_out = adm1026_read_value(client,
ADM1026_REG_DAC);
/* GPIO16 is MSbit of alarms, move it to gpio */
alarms = adm1026_read_value(client, ADM1026_REG_STATUS4);
gpio = alarms & 0x80 ? 0x0100 : 0; /* GPIO16 */
alarms &= 0x7f;
alarms <<= 8;
alarms |= adm1026_read_value(client, ADM1026_REG_STATUS3);
alarms <<= 8;
alarms |= adm1026_read_value(client, ADM1026_REG_STATUS2);
alarms <<= 8;
alarms |= adm1026_read_value(client, ADM1026_REG_STATUS1);
data->alarms = alarms;
/* Read the GPIO values */
gpio |= adm1026_read_value(client,
ADM1026_REG_GPIO_STATUS_8_15);
gpio <<= 8;
gpio |= adm1026_read_value(client,
ADM1026_REG_GPIO_STATUS_0_7);
data->gpio = gpio;
data->last_reading = jiffies;
} /* last_reading */
if (!data->valid ||
time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) {
/* Things that don't change often */
dev_dbg(&client->dev, "Reading config values\n");
for (i = 0; i <= 16; ++i) {
data->in_min[i] = adm1026_read_value(client,
ADM1026_REG_IN_MIN[i]);
data->in_max[i] = adm1026_read_value(client,
ADM1026_REG_IN_MAX[i]);
}
value = adm1026_read_value(client, ADM1026_REG_FAN_DIV_0_3)
| (adm1026_read_value(client, ADM1026_REG_FAN_DIV_4_7)
<< 8);
for (i = 0; i <= 7; ++i) {
data->fan_min[i] = adm1026_read_value(client,
ADM1026_REG_FAN_MIN(i));
data->fan_div[i] = DIV_FROM_REG(value & 0x03);
value >>= 2;
}
for (i = 0; i <= 2; ++i) {
/*
* NOTE: temp_xxx[] are s8 and we assume 2's
* complement "conversion" in the assignment
*/
data->temp_min[i] = adm1026_read_value(client,
ADM1026_REG_TEMP_MIN[i]);
data->temp_max[i] = adm1026_read_value(client,
ADM1026_REG_TEMP_MAX[i]);
data->temp_tmin[i] = adm1026_read_value(client,
ADM1026_REG_TEMP_TMIN[i]);
data->temp_crit[i] = adm1026_read_value(client,
ADM1026_REG_TEMP_THERM[i]);
data->temp_offset[i] = adm1026_read_value(client,
ADM1026_REG_TEMP_OFFSET[i]);
}
/* Read the STATUS/alarm masks */
alarms = adm1026_read_value(client, ADM1026_REG_MASK4);
gpio = alarms & 0x80 ? 0x0100 : 0; /* GPIO16 */
alarms = (alarms & 0x7f) << 8;
alarms |= adm1026_read_value(client, ADM1026_REG_MASK3);
alarms <<= 8;
alarms |= adm1026_read_value(client, ADM1026_REG_MASK2);
alarms <<= 8;
alarms |= adm1026_read_value(client, ADM1026_REG_MASK1);
data->alarm_mask = alarms;
/* Read the GPIO values */
gpio |= adm1026_read_value(client,
ADM1026_REG_GPIO_MASK_8_15);
gpio <<= 8;
gpio |= adm1026_read_value(client, ADM1026_REG_GPIO_MASK_0_7);
data->gpio_mask = gpio;
/* Read various values from CONFIG1 */
data->config1 = adm1026_read_value(client,
ADM1026_REG_CONFIG1);
if (data->config1 & CFG1_PWM_AFC) {
data->pwm1.enable = 2;
data->pwm1.auto_pwm_min =
PWM_MIN_FROM_REG(data->pwm1.pwm);
}
/* Read the GPIO config */
data->config2 = adm1026_read_value(client,
ADM1026_REG_CONFIG2);
data->config3 = adm1026_read_value(client,
ADM1026_REG_CONFIG3);
data->gpio_config[16] = (data->config3 >> 6) & 0x03;
value = 0;
for (i = 0; i <= 15; ++i) {
if ((i & 0x03) == 0) {
value = adm1026_read_value(client,
ADM1026_REG_GPIO_CFG_0_3 + i/4);
}
data->gpio_config[i] = value & 0x03;
value >>= 2;
}
data->last_config = jiffies;
} /* last_config */
data->valid = true;
mutex_unlock(&data->update_lock);
return data;
}
static ssize_t in_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in[nr]));
}
static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_min[nr] = INS_TO_REG(nr, val);
adm1026_write_value(client, ADM1026_REG_IN_MIN[nr], data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_max[nr] = INS_TO_REG(nr, val);
adm1026_write_value(client, ADM1026_REG_IN_MAX[nr], data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
static SENSOR_DEVICE_ATTR_RO(in8_input, in, 8);
static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
static SENSOR_DEVICE_ATTR_RO(in9_input, in, 9);
static SENSOR_DEVICE_ATTR_RW(in9_min, in_min, 9);
static SENSOR_DEVICE_ATTR_RW(in9_max, in_max, 9);
static SENSOR_DEVICE_ATTR_RO(in10_input, in, 10);
static SENSOR_DEVICE_ATTR_RW(in10_min, in_min, 10);
static SENSOR_DEVICE_ATTR_RW(in10_max, in_max, 10);
static SENSOR_DEVICE_ATTR_RO(in11_input, in, 11);
static SENSOR_DEVICE_ATTR_RW(in11_min, in_min, 11);
static SENSOR_DEVICE_ATTR_RW(in11_max, in_max, 11);
static SENSOR_DEVICE_ATTR_RO(in12_input, in, 12);
static SENSOR_DEVICE_ATTR_RW(in12_min, in_min, 12);
static SENSOR_DEVICE_ATTR_RW(in12_max, in_max, 12);
static SENSOR_DEVICE_ATTR_RO(in13_input, in, 13);
static SENSOR_DEVICE_ATTR_RW(in13_min, in_min, 13);
static SENSOR_DEVICE_ATTR_RW(in13_max, in_max, 13);
static SENSOR_DEVICE_ATTR_RO(in14_input, in, 14);
static SENSOR_DEVICE_ATTR_RW(in14_min, in_min, 14);
static SENSOR_DEVICE_ATTR_RW(in14_max, in_max, 14);
static SENSOR_DEVICE_ATTR_RO(in15_input, in, 15);
static SENSOR_DEVICE_ATTR_RW(in15_min, in_min, 15);
static SENSOR_DEVICE_ATTR_RW(in15_max, in_max, 15);
static ssize_t in16_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in[16]) -
NEG12_OFFSET);
}
static ssize_t in16_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_min[16])
- NEG12_OFFSET);
}
static ssize_t in16_min_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_min[16] = INS_TO_REG(16,
clamp_val(val, INT_MIN,
INT_MAX - NEG12_OFFSET) +
NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t in16_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_max[16])
- NEG12_OFFSET);
}
static ssize_t in16_max_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_max[16] = INS_TO_REG(16,
clamp_val(val, INT_MIN,
INT_MAX - NEG12_OFFSET) +
NEG12_OFFSET);
adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RO(in16_input, in16, 16);
static SENSOR_DEVICE_ATTR_RW(in16_min, in16_min, 16);
static SENSOR_DEVICE_ATTR_RW(in16_max, in16_max, 16);
/* Now add fan read/write functions */
static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
data->fan_div[nr]));
}
static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
data->fan_div[nr]));
}
static ssize_t fan_min_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, data->fan_div[nr]);
adm1026_write_value(client, ADM1026_REG_FAN_MIN(nr),
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
static SENSOR_DEVICE_ATTR_RO(fan5_input, fan, 4);
static SENSOR_DEVICE_ATTR_RW(fan5_min, fan_min, 4);
static SENSOR_DEVICE_ATTR_RO(fan6_input, fan, 5);
static SENSOR_DEVICE_ATTR_RW(fan6_min, fan_min, 5);
static SENSOR_DEVICE_ATTR_RO(fan7_input, fan, 6);
static SENSOR_DEVICE_ATTR_RW(fan7_min, fan_min, 6);
static SENSOR_DEVICE_ATTR_RO(fan8_input, fan, 7);
static SENSOR_DEVICE_ATTR_RW(fan8_min, fan_min, 7);
/* Adjust fan_min to account for new fan divisor */
static void fixup_fan_min(struct device *dev, int fan, int old_div)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int new_min;
int new_div = data->fan_div[fan];
/* 0 and 0xff are special. Don't adjust them */
if (data->fan_min[fan] == 0 || data->fan_min[fan] == 0xff)
return;
new_min = data->fan_min[fan] * old_div / new_div;
new_min = clamp_val(new_min, 1, 254);
data->fan_min[fan] = new_min;
adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min);
}
/* Now add fan_div read/write functions */
static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->fan_div[nr]);
}
static ssize_t fan_div_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int orig_div, new_div;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
new_div = DIV_TO_REG(val);
mutex_lock(&data->update_lock);
orig_div = data->fan_div[nr];
data->fan_div[nr] = DIV_FROM_REG(new_div);
if (nr < 4) { /* 0 <= nr < 4 */
adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3,
(DIV_TO_REG(data->fan_div[0]) << 0) |
(DIV_TO_REG(data->fan_div[1]) << 2) |
(DIV_TO_REG(data->fan_div[2]) << 4) |
(DIV_TO_REG(data->fan_div[3]) << 6));
} else { /* 3 < nr < 8 */
adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7,
(DIV_TO_REG(data->fan_div[4]) << 0) |
(DIV_TO_REG(data->fan_div[5]) << 2) |
(DIV_TO_REG(data->fan_div[6]) << 4) |
(DIV_TO_REG(data->fan_div[7]) << 6));
}
if (data->fan_div[nr] != orig_div)
fixup_fan_min(dev, nr, orig_div);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static SENSOR_DEVICE_ATTR_RW(fan4_div, fan_div, 3);
static SENSOR_DEVICE_ATTR_RW(fan5_div, fan_div, 4);
static SENSOR_DEVICE_ATTR_RW(fan6_div, fan_div, 5);
static SENSOR_DEVICE_ATTR_RW(fan7_div, fan_div, 6);
static SENSOR_DEVICE_ATTR_RW(fan8_div, fan_div, 7);
/* Temps */
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
static ssize_t temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
static ssize_t temp_min_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_TEMP_MIN[nr],
data->temp_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
static ssize_t temp_max_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_TEMP_MAX[nr],
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
static ssize_t temp_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
static ssize_t temp_offset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_TEMP_OFFSET[nr],
data->temp_offset[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
static ssize_t temp_auto_point1_temp_hyst_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(
ADM1026_FAN_ACTIVATION_TEMP_HYST + data->temp_tmin[nr]));
}
static ssize_t temp_auto_point2_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr] +
ADM1026_FAN_CONTROL_TEMP_RANGE));
}
static ssize_t temp_auto_point1_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr]));
}
static ssize_t temp_auto_point1_temp_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_tmin[nr] = TEMP_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_TEMP_TMIN[nr],
data->temp_tmin[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RW(temp1_auto_point1_temp, temp_auto_point1_temp, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_auto_point1_temp_hyst,
temp_auto_point1_temp_hyst, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_auto_point2_temp, temp_auto_point2_temp, 0);
static SENSOR_DEVICE_ATTR_RW(temp2_auto_point1_temp, temp_auto_point1_temp, 1);
static SENSOR_DEVICE_ATTR_RO(temp2_auto_point1_temp_hyst,
temp_auto_point1_temp_hyst, 1);
static SENSOR_DEVICE_ATTR_RO(temp2_auto_point2_temp, temp_auto_point2_temp, 1);
static SENSOR_DEVICE_ATTR_RW(temp3_auto_point1_temp, temp_auto_point1_temp, 2);
static SENSOR_DEVICE_ATTR_RO(temp3_auto_point1_temp_hyst,
temp_auto_point1_temp_hyst, 2);
static SENSOR_DEVICE_ATTR_RO(temp3_auto_point2_temp, temp_auto_point2_temp, 2);
static ssize_t show_temp_crit_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", (data->config1 & CFG1_THERM_HOT) >> 4);
}
static ssize_t set_temp_crit_enable(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->config1 = (data->config1 & ~CFG1_THERM_HOT) | (val << 4);
adm1026_write_value(client, ADM1026_REG_CONFIG1, data->config1);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR(temp1_crit_enable, 0644, show_temp_crit_enable,
set_temp_crit_enable);
static DEVICE_ATTR(temp2_crit_enable, 0644, show_temp_crit_enable,
set_temp_crit_enable);
static DEVICE_ATTR(temp3_crit_enable, 0644, show_temp_crit_enable,
set_temp_crit_enable);
static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
static ssize_t temp_crit_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_TEMP_THERM[nr],
data->temp_crit[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", DAC_FROM_REG(data->analog_out));
}
static ssize_t analog_out_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->analog_out = DAC_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_DAC, data->analog_out);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR_RW(analog_out);
static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
int vid = (data->gpio >> 11) & 0x1f;
dev_dbg(dev, "Setting VID from GPIO11-15.\n");
return sprintf(buf, "%d\n", vid_from_reg(vid, data->vrm));
}
static DEVICE_ATTR_RO(cpu0_vid);
static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 255)
return -EINVAL;
data->vrm = val;
return count;
}
static DEVICE_ATTR_RW(vrm);
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->alarms);
}
static DEVICE_ATTR_RO(alarms);
static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%ld\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0);
static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
static SENSOR_DEVICE_ATTR_RO(in9_alarm, alarm, 1);
static SENSOR_DEVICE_ATTR_RO(in11_alarm, alarm, 2);
static SENSOR_DEVICE_ATTR_RO(in12_alarm, alarm, 3);
static SENSOR_DEVICE_ATTR_RO(in13_alarm, alarm, 4);
static SENSOR_DEVICE_ATTR_RO(in14_alarm, alarm, 5);
static SENSOR_DEVICE_ATTR_RO(in15_alarm, alarm, 6);
static SENSOR_DEVICE_ATTR_RO(in16_alarm, alarm, 7);
static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 8);
static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 9);
static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 10);
static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 11);
static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 12);
static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 13);
static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 14);
static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 15);
static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 16);
static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 17);
static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 18);
static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 19);
static SENSOR_DEVICE_ATTR_RO(fan5_alarm, alarm, 20);
static SENSOR_DEVICE_ATTR_RO(fan6_alarm, alarm, 21);
static SENSOR_DEVICE_ATTR_RO(fan7_alarm, alarm, 22);
static SENSOR_DEVICE_ATTR_RO(fan8_alarm, alarm, 23);
static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 24);
static SENSOR_DEVICE_ATTR_RO(in10_alarm, alarm, 25);
static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 26);
static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->alarm_mask);
}
static ssize_t alarm_mask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long mask;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->alarm_mask = val & 0x7fffffff;
mask = data->alarm_mask
| (data->gpio_mask & 0x10000 ? 0x80000000 : 0);
adm1026_write_value(client, ADM1026_REG_MASK1,
mask & 0xff);
mask >>= 8;
adm1026_write_value(client, ADM1026_REG_MASK2,
mask & 0xff);
mask >>= 8;
adm1026_write_value(client, ADM1026_REG_MASK3,
mask & 0xff);
mask >>= 8;
adm1026_write_value(client, ADM1026_REG_MASK4,
mask & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR_RW(alarm_mask);
static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->gpio);
}
static ssize_t gpio_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long gpio;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->gpio = val & 0x1ffff;
gpio = data->gpio;
adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_0_7, gpio & 0xff);
gpio >>= 8;
adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_8_15, gpio & 0xff);
gpio = ((gpio >> 1) & 0x80) | (data->alarms >> 24 & 0x7f);
adm1026_write_value(client, ADM1026_REG_STATUS4, gpio & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR_RW(gpio);
static ssize_t gpio_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%ld\n", data->gpio_mask);
}
static ssize_t gpio_mask_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
long mask;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->gpio_mask = val & 0x1ffff;
mask = data->gpio_mask;
adm1026_write_value(client, ADM1026_REG_GPIO_MASK_0_7, mask & 0xff);
mask >>= 8;
adm1026_write_value(client, ADM1026_REG_GPIO_MASK_8_15, mask & 0xff);
mask = ((mask >> 1) & 0x80) | (data->alarm_mask >> 24 & 0x7f);
adm1026_write_value(client, ADM1026_REG_MASK1, mask & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR_RW(gpio_mask);
static ssize_t pwm1_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm1.pwm));
}
static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
if (data->pwm1.enable == 1) {
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->pwm1.pwm = PWM_TO_REG(val);
adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm);
mutex_unlock(&data->update_lock);
}
return count;
}
static ssize_t temp1_auto_point1_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->pwm1.auto_pwm_min);
}
static ssize_t temp1_auto_point1_pwm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->pwm1.auto_pwm_min = clamp_val(val, 0, 255);
if (data->pwm1.enable == 2) { /* apply immediately */
data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t temp1_auto_point2_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", ADM1026_PWM_MAX);
}
static ssize_t pwm1_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->pwm1.enable);
}
static ssize_t pwm1_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int old_enable;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val >= 3)
return -EINVAL;
mutex_lock(&data->update_lock);
old_enable = data->pwm1.enable;
data->pwm1.enable = val;
data->config1 = (data->config1 & ~CFG1_PWM_AFC)
| ((val == 2) ? CFG1_PWM_AFC : 0);
adm1026_write_value(client, ADM1026_REG_CONFIG1, data->config1);
if (val == 2) { /* apply pwm1_auto_pwm_min to pwm1 */
data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm);
} else if (!((old_enable == 1) && (val == 1))) {
/* set pwm to safe value */
data->pwm1.pwm = 255;
adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm);
}
mutex_unlock(&data->update_lock);
return count;
}
/* enable PWM fan control */
static DEVICE_ATTR_RW(pwm1);
static DEVICE_ATTR(pwm2, 0644, pwm1_show, pwm1_store);
static DEVICE_ATTR(pwm3, 0644, pwm1_show, pwm1_store);
static DEVICE_ATTR_RW(pwm1_enable);
static DEVICE_ATTR(pwm2_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
static DEVICE_ATTR(pwm3_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
static DEVICE_ATTR(temp2_auto_point1_pwm, 0644,
temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR(temp3_auto_point1_pwm, 0644,
temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
static DEVICE_ATTR(temp2_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
static DEVICE_ATTR(temp3_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
static struct attribute *adm1026_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
&sensor_dev_attr_in10_input.dev_attr.attr,
&sensor_dev_attr_in10_max.dev_attr.attr,
&sensor_dev_attr_in10_min.dev_attr.attr,
&sensor_dev_attr_in10_alarm.dev_attr.attr,
&sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_in11_max.dev_attr.attr,
&sensor_dev_attr_in11_min.dev_attr.attr,
&sensor_dev_attr_in11_alarm.dev_attr.attr,
&sensor_dev_attr_in12_input.dev_attr.attr,
&sensor_dev_attr_in12_max.dev_attr.attr,
&sensor_dev_attr_in12_min.dev_attr.attr,
&sensor_dev_attr_in12_alarm.dev_attr.attr,
&sensor_dev_attr_in13_input.dev_attr.attr,
&sensor_dev_attr_in13_max.dev_attr.attr,
&sensor_dev_attr_in13_min.dev_attr.attr,
&sensor_dev_attr_in13_alarm.dev_attr.attr,
&sensor_dev_attr_in14_input.dev_attr.attr,
&sensor_dev_attr_in14_max.dev_attr.attr,
&sensor_dev_attr_in14_min.dev_attr.attr,
&sensor_dev_attr_in14_alarm.dev_attr.attr,
&sensor_dev_attr_in15_input.dev_attr.attr,
&sensor_dev_attr_in15_max.dev_attr.attr,
&sensor_dev_attr_in15_min.dev_attr.attr,
&sensor_dev_attr_in15_alarm.dev_attr.attr,
&sensor_dev_attr_in16_input.dev_attr.attr,
&sensor_dev_attr_in16_max.dev_attr.attr,
&sensor_dev_attr_in16_min.dev_attr.attr,
&sensor_dev_attr_in16_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan4_div.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
&sensor_dev_attr_fan5_input.dev_attr.attr,
&sensor_dev_attr_fan5_div.dev_attr.attr,
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
&sensor_dev_attr_fan6_input.dev_attr.attr,
&sensor_dev_attr_fan6_div.dev_attr.attr,
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
&sensor_dev_attr_fan7_input.dev_attr.attr,
&sensor_dev_attr_fan7_div.dev_attr.attr,
&sensor_dev_attr_fan7_min.dev_attr.attr,
&sensor_dev_attr_fan7_alarm.dev_attr.attr,
&sensor_dev_attr_fan8_input.dev_attr.attr,
&sensor_dev_attr_fan8_div.dev_attr.attr,
&sensor_dev_attr_fan8_min.dev_attr.attr,
&sensor_dev_attr_fan8_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&dev_attr_temp1_crit_enable.attr,
&dev_attr_temp2_crit_enable.attr,
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
&dev_attr_alarms.attr,
&dev_attr_alarm_mask.attr,
&dev_attr_gpio.attr,
&dev_attr_gpio_mask.attr,
&dev_attr_pwm1.attr,
&dev_attr_pwm2.attr,
&dev_attr_pwm3.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm2_enable.attr,
&dev_attr_pwm3_enable.attr,
&dev_attr_temp1_auto_point1_pwm.attr,
&dev_attr_temp2_auto_point1_pwm.attr,
&dev_attr_temp1_auto_point2_pwm.attr,
&dev_attr_temp2_auto_point2_pwm.attr,
&dev_attr_analog_out.attr,
NULL
};
static const struct attribute_group adm1026_group = {
.attrs = adm1026_attributes,
};
static struct attribute *adm1026_attributes_temp3[] = {
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_temp3_crit.dev_attr.attr,
&dev_attr_temp3_crit_enable.attr,
&dev_attr_temp3_auto_point1_pwm.attr,
&dev_attr_temp3_auto_point2_pwm.attr,
NULL
};
static const struct attribute_group adm1026_group_temp3 = {
.attrs = adm1026_attributes_temp3,
};
static struct attribute *adm1026_attributes_in8_9[] = {
&sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_in8_max.dev_attr.attr,
&sensor_dev_attr_in8_min.dev_attr.attr,
&sensor_dev_attr_in8_alarm.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
&sensor_dev_attr_in9_max.dev_attr.attr,
&sensor_dev_attr_in9_min.dev_attr.attr,
&sensor_dev_attr_in9_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group adm1026_group_in8_9 = {
.attrs = adm1026_attributes_in8_9,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int adm1026_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
int company, verstep;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
/* We need to be able to do byte I/O */
return -ENODEV;
}
/* Now, we do the remaining detection. */
company = adm1026_read_value(client, ADM1026_REG_COMPANY);
verstep = adm1026_read_value(client, ADM1026_REG_VERSTEP);
dev_dbg(&adapter->dev,
"Detecting device at %d,0x%02x with COMPANY: 0x%02x and VERSTEP: 0x%02x\n",
i2c_adapter_id(client->adapter), client->addr,
company, verstep);
/* Determine the chip type. */
dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x...\n",
i2c_adapter_id(adapter), address);
if (company == ADM1026_COMPANY_ANALOG_DEV
&& verstep == ADM1026_VERSTEP_ADM1026) {
/* Analog Devices ADM1026 */
} else if (company == ADM1026_COMPANY_ANALOG_DEV
&& (verstep & 0xf0) == ADM1026_VERSTEP_GENERIC) {
dev_err(&adapter->dev,
"Unrecognized stepping 0x%02x. Defaulting to ADM1026.\n",
verstep);
} else if ((verstep & 0xf0) == ADM1026_VERSTEP_GENERIC) {
dev_err(&adapter->dev,
"Found version/stepping 0x%02x. Assuming generic ADM1026.\n",
verstep);
} else {
dev_dbg(&adapter->dev, "Autodetection failed\n");
/* Not an ADM1026... */
return -ENODEV;
}
strscpy(info->type, "adm1026", I2C_NAME_SIZE);
return 0;
}
static void adm1026_print_gpio(struct i2c_client *client)
{
struct adm1026_data *data = i2c_get_clientdata(client);
int i;
dev_dbg(&client->dev, "GPIO config is:\n");
for (i = 0; i <= 7; ++i) {
if (data->config2 & (1 << i)) {
dev_dbg(&client->dev, "\t%sGP%s%d\n",
data->gpio_config[i] & 0x02 ? "" : "!",
data->gpio_config[i] & 0x01 ? "OUT" : "IN",
i);
} else {
dev_dbg(&client->dev, "\tFAN%d\n", i);
}
}
for (i = 8; i <= 15; ++i) {
dev_dbg(&client->dev, "\t%sGP%s%d\n",
data->gpio_config[i] & 0x02 ? "" : "!",
data->gpio_config[i] & 0x01 ? "OUT" : "IN",
i);
}
if (data->config3 & CFG3_GPIO16_ENABLE) {
dev_dbg(&client->dev, "\t%sGP%s16\n",
data->gpio_config[16] & 0x02 ? "" : "!",
data->gpio_config[16] & 0x01 ? "OUT" : "IN");
} else {
/* GPIO16 is THERM */
dev_dbg(&client->dev, "\tTHERM\n");
}
}
static void adm1026_fixup_gpio(struct i2c_client *client)
{
struct adm1026_data *data = i2c_get_clientdata(client);
int i;
int value;
/* Make the changes requested. */
/*
* We may need to unlock/stop monitoring or soft-reset the
* chip before we can make changes. This hasn't been
* tested much. FIXME
*/
/* Make outputs */
for (i = 0; i <= 16; ++i) {
if (gpio_output[i] >= 0 && gpio_output[i] <= 16)
data->gpio_config[gpio_output[i]] |= 0x01;
/* if GPIO0-7 is output, it isn't a FAN tach */
if (gpio_output[i] >= 0 && gpio_output[i] <= 7)
data->config2 |= 1 << gpio_output[i];
}
/* Input overrides output */
for (i = 0; i <= 16; ++i) {
if (gpio_input[i] >= 0 && gpio_input[i] <= 16)
data->gpio_config[gpio_input[i]] &= ~0x01;
/* if GPIO0-7 is input, it isn't a FAN tach */
if (gpio_input[i] >= 0 && gpio_input[i] <= 7)
data->config2 |= 1 << gpio_input[i];
}
/* Inverted */
for (i = 0; i <= 16; ++i) {
if (gpio_inverted[i] >= 0 && gpio_inverted[i] <= 16)
data->gpio_config[gpio_inverted[i]] &= ~0x02;
}
/* Normal overrides inverted */
for (i = 0; i <= 16; ++i) {
if (gpio_normal[i] >= 0 && gpio_normal[i] <= 16)
data->gpio_config[gpio_normal[i]] |= 0x02;
}
/* Fan overrides input and output */
for (i = 0; i <= 7; ++i) {
if (gpio_fan[i] >= 0 && gpio_fan[i] <= 7)
data->config2 &= ~(1 << gpio_fan[i]);
}
/* Write new configs to registers */
adm1026_write_value(client, ADM1026_REG_CONFIG2, data->config2);
data->config3 = (data->config3 & 0x3f)
| ((data->gpio_config[16] & 0x03) << 6);
adm1026_write_value(client, ADM1026_REG_CONFIG3, data->config3);
for (i = 15, value = 0; i >= 0; --i) {
value <<= 2;
value |= data->gpio_config[i] & 0x03;
if ((i & 0x03) == 0) {
adm1026_write_value(client,
ADM1026_REG_GPIO_CFG_0_3 + i/4,
value);
value = 0;
}
}
/* Print the new config */
adm1026_print_gpio(client);
}
static void adm1026_init_client(struct i2c_client *client)
{
int value, i;
struct adm1026_data *data = i2c_get_clientdata(client);
dev_dbg(&client->dev, "Initializing device\n");
/* Read chip config */
data->config1 = adm1026_read_value(client, ADM1026_REG_CONFIG1);
data->config2 = adm1026_read_value(client, ADM1026_REG_CONFIG2);
data->config3 = adm1026_read_value(client, ADM1026_REG_CONFIG3);
/* Inform user of chip config */
dev_dbg(&client->dev, "ADM1026_REG_CONFIG1 is: 0x%02x\n",
data->config1);
if ((data->config1 & CFG1_MONITOR) == 0) {
dev_dbg(&client->dev,
"Monitoring not currently enabled.\n");
}
if (data->config1 & CFG1_INT_ENABLE) {
dev_dbg(&client->dev,
"SMBALERT interrupts are enabled.\n");
}
if (data->config1 & CFG1_AIN8_9) {
dev_dbg(&client->dev,
"in8 and in9 enabled. temp3 disabled.\n");
} else {
dev_dbg(&client->dev,
"temp3 enabled. in8 and in9 disabled.\n");
}
if (data->config1 & CFG1_THERM_HOT) {
dev_dbg(&client->dev,
"Automatic THERM, PWM, and temp limits enabled.\n");
}
if (data->config3 & CFG3_GPIO16_ENABLE) {
dev_dbg(&client->dev,
"GPIO16 enabled. THERM pin disabled.\n");
} else {
dev_dbg(&client->dev,
"THERM pin enabled. GPIO16 disabled.\n");
}
if (data->config3 & CFG3_VREF_250)
dev_dbg(&client->dev, "Vref is 2.50 Volts.\n");
else
dev_dbg(&client->dev, "Vref is 1.82 Volts.\n");
/* Read and pick apart the existing GPIO configuration */
value = 0;
for (i = 0; i <= 15; ++i) {
if ((i & 0x03) == 0) {
value = adm1026_read_value(client,
ADM1026_REG_GPIO_CFG_0_3 + i / 4);
}
data->gpio_config[i] = value & 0x03;
value >>= 2;
}
data->gpio_config[16] = (data->config3 >> 6) & 0x03;
/* ... and then print it */
adm1026_print_gpio(client);
/*
* If the user asks us to reprogram the GPIO config, then
* do it now.
*/
if (gpio_input[0] != -1 || gpio_output[0] != -1
|| gpio_inverted[0] != -1 || gpio_normal[0] != -1
|| gpio_fan[0] != -1) {
adm1026_fixup_gpio(client);
}
/*
* WE INTENTIONALLY make no changes to the limits,
* offsets, pwms, fans and zones. If they were
* configured, we don't want to mess with them.
* If they weren't, the default is 100% PWM, no
* control and will suffice until 'sensors -s'
* can be run by the user. We DO set the default
* value for pwm1.auto_pwm_min to its maximum
* so that enabling automatic pwm fan control
* without first setting a value for pwm1.auto_pwm_min
* will not result in potentially dangerous fan speed decrease.
*/
data->pwm1.auto_pwm_min = 255;
/* Start monitoring */
value = adm1026_read_value(client, ADM1026_REG_CONFIG1);
/* Set MONITOR, clear interrupt acknowledge and s/w reset */
value = (value | CFG1_MONITOR) & (~CFG1_INT_CLEAR & ~CFG1_RESET);
dev_dbg(&client->dev, "Setting CONFIG to: 0x%02x\n", value);
data->config1 = value;
adm1026_write_value(client, ADM1026_REG_CONFIG1, value);
/* initialize fan_div[] to hardware defaults */
value = adm1026_read_value(client, ADM1026_REG_FAN_DIV_0_3) |
(adm1026_read_value(client, ADM1026_REG_FAN_DIV_4_7) << 8);
for (i = 0; i <= 7; ++i) {
data->fan_div[i] = DIV_FROM_REG(value & 0x03);
value >>= 2;
}
}
static int adm1026_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct adm1026_data *data;
data = devm_kzalloc(dev, sizeof(struct adm1026_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
data->client = client;
mutex_init(&data->update_lock);
/* Set the VRM version */
data->vrm = vid_which_vrm();
/* Initialize the ADM1026 chip */
adm1026_init_client(client);
/* sysfs hooks */
data->groups[0] = &adm1026_group;
if (data->config1 & CFG1_AIN8_9)
data->groups[1] = &adm1026_group_in8_9;
else
data->groups[1] = &adm1026_group_temp3;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id adm1026_id[] = {
{ "adm1026" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1026_id);
static struct i2c_driver adm1026_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adm1026",
},
.probe = adm1026_probe,
.id_table = adm1026_id,
.detect = adm1026_detect,
.address_list = normal_i2c,
};
module_i2c_driver(adm1026_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Philip Pokorny <[email protected]>, "
"Justin Thiessen <[email protected]>");
MODULE_DESCRIPTION("ADM1026 driver");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Google LLC
*/
#include <kunit/test.h>
#include <linux/unaligned.h>
#include <scsi/scsi_proto.h>
static void test_scsi_proto(struct kunit *test)
{
static const union {
struct scsi_io_group_descriptor desc;
u8 arr[sizeof(struct scsi_io_group_descriptor)];
} d = { .arr = { 0x45, 0, 0, 0, 0xb0, 0xe4, 0xe3 } };
KUNIT_EXPECT_EQ(test, d.desc.io_advice_hints_mode + 0, 1);
KUNIT_EXPECT_EQ(test, d.desc.st_enble + 0, 1);
KUNIT_EXPECT_EQ(test, d.desc.cs_enble + 0, 0);
KUNIT_EXPECT_EQ(test, d.desc.ic_enable + 0, 1);
KUNIT_EXPECT_EQ(test, d.desc.acdlu + 0, 1);
KUNIT_EXPECT_EQ(test, d.desc.rlbsr + 0, 3);
KUNIT_EXPECT_EQ(test, d.desc.lbm_descriptor_type + 0, 0);
KUNIT_EXPECT_EQ(test, d.desc.params[0] + 0, 0xe4);
KUNIT_EXPECT_EQ(test, d.desc.params[1] + 0, 0xe3);
static const union {
struct scsi_stream_status s;
u8 arr[sizeof(struct scsi_stream_status)];
} ss = { .arr = { 0x80, 0, 0x12, 0x34, 0x3f } };
KUNIT_EXPECT_EQ(test, ss.s.perm + 0, 1);
KUNIT_EXPECT_EQ(test, get_unaligned_be16(&ss.s.stream_identifier),
0x1234);
KUNIT_EXPECT_EQ(test, ss.s.rel_lifetime + 0, 0x3f);
static const union {
struct scsi_stream_status_header h;
u8 arr[sizeof(struct scsi_stream_status_header)];
} sh = { .arr = { 1, 2, 3, 4, 0, 0, 5, 6 } };
KUNIT_EXPECT_EQ(test, get_unaligned_be32(&sh.h.len), 0x1020304);
KUNIT_EXPECT_EQ(test, get_unaligned_be16(&sh.h.number_of_open_streams),
0x506);
}
static struct kunit_case scsi_proto_test_cases[] = {
KUNIT_CASE(test_scsi_proto),
{}
};
static struct kunit_suite scsi_proto_test_suite = {
.name = "scsi_proto",
.test_cases = scsi_proto_test_cases,
};
kunit_test_suite(scsi_proto_test_suite);
MODULE_DESCRIPTION("<scsi/scsi_proto.h> unit tests");
MODULE_AUTHOR("Bart Van Assche");
MODULE_LICENSE("GPL");
|
/*
* Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef USNIC_IB_VERBS_H_
#define USNIC_IB_VERBS_H_
#include "usnic_ib.h"
enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
u32 port_num);
int usnic_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw);
int usnic_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int usnic_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
int usnic_ib_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma);
#endif /* !USNIC_IB_VERBS_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/return_address.c
*
* Copyright (C) 2009 Matt Fleming
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/dwarf.h>
#include <asm/ftrace.h>
#ifdef CONFIG_DWARF_UNWINDER
void *return_address(unsigned int depth)
{
struct dwarf_frame *frame;
unsigned long ra;
int i;
for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
struct dwarf_frame *tmp;
tmp = dwarf_unwind_stack(ra, frame);
if (!tmp)
return NULL;
if (frame)
dwarf_free_frame(frame);
frame = tmp;
if (!frame || !frame->return_addr)
break;
ra = frame->return_addr;
}
/* Failed to unwind the stack to the specified depth. */
WARN_ON(i != depth + 1);
if (frame)
dwarf_free_frame(frame);
return (void *)ra;
}
#else
void *return_address(unsigned int depth)
{
return NULL;
}
#endif
EXPORT_SYMBOL_GPL(return_address);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the LID cover switch of the Surface 3
*
* Copyright (c) 2016 Red Hat Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
MODULE_AUTHOR("Benjamin Tissoires <[email protected]>");
MODULE_DESCRIPTION("Surface 3 platform driver");
MODULE_LICENSE("GPL");
#define ACPI_BUTTON_HID_LID "PNP0C0D"
#define SPI_CTL_OBJ_NAME "SPI"
#define SPI_TS_OBJ_NAME "NTRG"
#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE"
MODULE_ALIAS("wmi:" SURFACE3_LID_GUID);
static const struct dmi_system_id surface3_dmi_table[] = {
#if defined(CONFIG_X86)
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
},
#endif
{ }
};
struct surface3_wmi {
struct acpi_device *touchscreen_adev;
struct acpi_device *pnp0c0d_adev;
struct acpi_hotplug_context hp;
struct input_dev *input;
};
static struct platform_device *s3_wmi_pdev;
static struct surface3_wmi s3_wmi;
static DEFINE_MUTEX(s3_wmi_lock);
static int s3_wmi_query_block(const char *guid, int instance, int *ret)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj = NULL;
acpi_status status;
int error = 0;
mutex_lock(&s3_wmi_lock);
status = wmi_query_block(guid, instance, &output);
if (ACPI_FAILURE(status)) {
error = -EIO;
goto out_free_unlock;
}
obj = output.pointer;
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
pr_err("query block returned object type: %d - buffer length:%d\n",
obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
}
error = -EINVAL;
goto out_free_unlock;
}
*ret = obj->integer.value;
out_free_unlock:
kfree(obj);
mutex_unlock(&s3_wmi_lock);
return error;
}
static inline int s3_wmi_query_lid(int *ret)
{
return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret);
}
static int s3_wmi_send_lid_state(void)
{
int ret, lid_sw;
ret = s3_wmi_query_lid(&lid_sw);
if (ret)
return ret;
input_report_switch(s3_wmi.input, SW_LID, lid_sw);
input_sync(s3_wmi.input);
return 0;
}
static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value)
{
return s3_wmi_send_lid_state();
}
static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
u32 level,
void *data,
void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct acpi_device **ts_adev = data;
if (!adev || strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME,
strlen(SPI_TS_OBJ_NAME)))
return AE_OK;
if (*ts_adev) {
pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME);
return AE_OK;
}
*ts_adev = adev;
return AE_OK;
}
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_device *ts_adev = NULL;
acpi_status status;
/* ignore non ACPI devices */
if (!adev)
return 0;
/* check for LID ACPI switch */
if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) {
s3_wmi.pnp0c0d_adev = adev;
return 0;
}
/* ignore non SPI controllers */
if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME,
strlen(SPI_CTL_OBJ_NAME)))
return 0;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, adev->handle, 1,
s3_wmi_attach_spi_device, NULL,
&ts_adev, NULL);
if (ACPI_FAILURE(status))
dev_warn(dev, "failed to enumerate SPI slaves\n");
if (!ts_adev)
return 0;
s3_wmi.touchscreen_adev = ts_adev;
return 0;
}
static int s3_wmi_create_and_register_input(struct platform_device *pdev)
{
struct input_dev *input;
int error;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
input->name = "Lid Switch";
input->phys = "button/input0";
input->id.bustype = BUS_HOST;
input->id.product = 0x0005;
input_set_capability(input, EV_SW, SW_LID);
error = input_register_device(input);
if (error)
return error;
s3_wmi.input = input;
return 0;
}
static int __init s3_wmi_probe(struct platform_device *pdev)
{
int error;
if (!dmi_check_system(surface3_dmi_table))
return -ENODEV;
memset(&s3_wmi, 0, sizeof(s3_wmi));
bus_for_each_dev(&platform_bus_type, NULL, NULL,
s3_wmi_check_platform_device);
if (!s3_wmi.touchscreen_adev)
return -ENODEV;
acpi_bus_trim(s3_wmi.pnp0c0d_adev);
error = s3_wmi_create_and_register_input(pdev);
if (error)
goto restore_acpi_lid;
acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp,
s3_wmi_hp_notify, NULL);
s3_wmi_send_lid_state();
return 0;
restore_acpi_lid:
acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
return error;
}
static void s3_wmi_remove(struct platform_device *device)
{
/* remove the hotplug context from the acpi device */
s3_wmi.touchscreen_adev->hp = NULL;
/* reinstall the actual PNPC0C0D LID default handle */
acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
}
static int __maybe_unused s3_wmi_resume(struct device *dev)
{
s3_wmi_send_lid_state();
return 0;
}
static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
static struct platform_driver s3_wmi_driver = {
.driver = {
.name = "surface3-wmi",
.pm = &s3_wmi_pm,
},
.remove = s3_wmi_remove,
};
static int __init s3_wmi_init(void)
{
int error;
s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1);
if (!s3_wmi_pdev)
return -ENOMEM;
error = platform_device_add(s3_wmi_pdev);
if (error)
goto err_device_put;
error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe);
if (error)
goto err_device_del;
pr_info("Surface 3 WMI Extras loaded\n");
return 0;
err_device_del:
platform_device_del(s3_wmi_pdev);
err_device_put:
platform_device_put(s3_wmi_pdev);
return error;
}
static void __exit s3_wmi_exit(void)
{
platform_device_unregister(s3_wmi_pdev);
platform_driver_unregister(&s3_wmi_driver);
}
module_init(s3_wmi_init);
module_exit(s3_wmi_exit);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Voltage and current regulation for AD5398 and AD5821
*
* Copyright 2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#define AD5398_CURRENT_EN_MASK 0x8000
struct ad5398_chip_info {
struct i2c_client *client;
int min_uA;
int max_uA;
unsigned int current_level;
unsigned int current_mask;
unsigned int current_offset;
struct regulator_dev *rdev;
};
static int ad5398_calc_current(struct ad5398_chip_info *chip,
unsigned selector)
{
unsigned range_uA = chip->max_uA - chip->min_uA;
return chip->min_uA + (selector * range_uA / chip->current_level);
}
static int ad5398_read_reg(struct i2c_client *client, unsigned short *data)
{
unsigned short val;
int ret;
ret = i2c_master_recv(client, (char *)&val, 2);
if (ret < 0) {
dev_err(&client->dev, "I2C read error\n");
return ret;
}
*data = be16_to_cpu(val);
return ret;
}
static int ad5398_write_reg(struct i2c_client *client, const unsigned short data)
{
unsigned short val;
int ret;
val = cpu_to_be16(data);
ret = i2c_master_send(client, (char *)&val, 2);
if (ret != 2) {
dev_err(&client->dev, "I2C write error\n");
return ret < 0 ? ret : -EIO;
}
return 0;
}
static int ad5398_get_current_limit(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
ret = (data & chip->current_mask) >> chip->current_offset;
return ad5398_calc_current(chip, ret);
}
static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned range_uA = chip->max_uA - chip->min_uA;
unsigned selector;
unsigned short data;
int ret;
if (min_uA < chip->min_uA)
min_uA = chip->min_uA;
if (max_uA > chip->max_uA)
max_uA = chip->max_uA;
if (min_uA > chip->max_uA || max_uA < chip->min_uA)
return -EINVAL;
selector = DIV_ROUND_UP((min_uA - chip->min_uA) * chip->current_level,
range_uA);
if (ad5398_calc_current(chip, selector) > max_uA)
return -EINVAL;
dev_dbg(&client->dev, "changing current %duA\n",
ad5398_calc_current(chip, selector));
/* read chip enable bit */
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
/* prepare register data */
selector = (selector << chip->current_offset) & chip->current_mask;
data = (unsigned short)selector | (data & AD5398_CURRENT_EN_MASK);
/* write the new current value back as well as enable bit */
ret = ad5398_write_reg(client, data);
return ret;
}
static int ad5398_is_enabled(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (data & AD5398_CURRENT_EN_MASK)
return 1;
else
return 0;
}
static int ad5398_enable(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (data & AD5398_CURRENT_EN_MASK)
return 0;
data |= AD5398_CURRENT_EN_MASK;
ret = ad5398_write_reg(client, data);
return ret;
}
static int ad5398_disable(struct regulator_dev *rdev)
{
struct ad5398_chip_info *chip = rdev_get_drvdata(rdev);
struct i2c_client *client = chip->client;
unsigned short data;
int ret;
ret = ad5398_read_reg(client, &data);
if (ret < 0)
return ret;
if (!(data & AD5398_CURRENT_EN_MASK))
return 0;
data &= ~AD5398_CURRENT_EN_MASK;
ret = ad5398_write_reg(client, data);
return ret;
}
static const struct regulator_ops ad5398_ops = {
.get_current_limit = ad5398_get_current_limit,
.set_current_limit = ad5398_set_current_limit,
.enable = ad5398_enable,
.disable = ad5398_disable,
.is_enabled = ad5398_is_enabled,
};
static const struct regulator_desc ad5398_reg = {
.name = "isink",
.id = 0,
.ops = &ad5398_ops,
.type = REGULATOR_CURRENT,
.owner = THIS_MODULE,
};
struct ad5398_current_data_format {
int current_bits;
int current_offset;
int min_uA;
int max_uA;
};
static const struct ad5398_current_data_format df_10_4_120 = {10, 4, 0, 120000};
static const struct i2c_device_id ad5398_id[] = {
{ "ad5398", (kernel_ulong_t)&df_10_4_120 },
{ "ad5821", (kernel_ulong_t)&df_10_4_120 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5398_id);
static int ad5398_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct regulator_init_data *init_data = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
if (!init_data)
return -EINVAL;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
config.dev = &client->dev;
config.init_data = init_data;
config.driver_data = chip;
chip->client = client;
chip->min_uA = df->min_uA;
chip->max_uA = df->max_uA;
chip->current_level = 1 << df->current_bits;
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
chip->rdev = devm_regulator_register(&client->dev, &ad5398_reg,
&config);
if (IS_ERR(chip->rdev)) {
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
return PTR_ERR(chip->rdev);
}
i2c_set_clientdata(client, chip);
dev_dbg(&client->dev, "%s regulator driver is registered.\n", id->name);
return 0;
}
static struct i2c_driver ad5398_driver = {
.probe = ad5398_probe,
.driver = {
.name = "ad5398",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = ad5398_id,
};
static int __init ad5398_init(void)
{
return i2c_add_driver(&ad5398_driver);
}
subsys_initcall(ad5398_init);
static void __exit ad5398_exit(void)
{
i2c_del_driver(&ad5398_driver);
}
module_exit(ad5398_exit);
MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
MODULE_AUTHOR("Sonic Zhang");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* at91-tse850-3.dts - Device Tree file for the Axentia TSE-850 3.0 board
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <[email protected]>
*/
/dts-v1/;
#include <dt-bindings/pwm/pwm.h>
#include "at91-linea.dtsi"
/ {
model = "Axentia TSE-850 3.0";
compatible = "axentia,tse850v3", "axentia,linea",
"atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
sck: oscillator {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <16000000>;
clock-output-names = "sck";
};
reg_3v3: regulator {
compatible = "regulator-fixed";
regulator-name = "3v3-supply";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
ana: reg-ana {
compatible = "pwm-regulator";
regulator-name = "ANA";
pwms = <&pwm0 2 1000 PWM_POLARITY_INVERTED>;
pwm-dutycycle-unit = <1000>;
pwm-dutycycle-range = <100 1000>;
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <20000000>;
regulator-ramp-delay = <1000>;
};
sound {
compatible = "axentia,tse850-pcm5142";
axentia,cpu-dai = <&ssc0>;
axentia,audio-codec = <&pcm5142>;
axentia,add-gpios = <&pioA 8 GPIO_ACTIVE_LOW>;
axentia,loop1-gpios = <&pioA 10 GPIO_ACTIVE_LOW>;
axentia,loop2-gpios = <&pioA 11 GPIO_ACTIVE_LOW>;
axentia,ana-supply = <&ana>;
};
dac: dpot-dac {
compatible = "dpot-dac";
vref-supply = <®_3v3>;
io-channels = <&dpot 0>;
io-channel-names = "dpot";
#io-channel-cells = <1>;
};
env_det: envelope-detector {
compatible = "axentia,tse850-envelope-detector";
io-channels = <&dac 0>;
io-channel-names = "dac";
#io-channel-cells = <1>;
interrupt-parent = <&pioA>;
interrupts = <3 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "comp";
};
mux: mux-controller {
compatible = "gpio-mux";
#mux-control-cells = <0>;
mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
<&pioA 1 GPIO_ACTIVE_HIGH>,
<&pioA 2 GPIO_ACTIVE_HIGH>;
idle-state = <0>;
};
envelope-detector-mux {
compatible = "io-channel-mux";
io-channels = <&env_det 0>;
io-channel-names = "parent";
mux-controls = <&mux>;
channels = "", "",
"sync-1",
"in",
"out",
"sync-2",
"sys-reg",
"ana-reg";
};
leds {
compatible = "gpio-leds";
led-ch1-red {
label = "ch-1:red";
gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
};
led-ch1-green {
label = "ch-1:green";
gpios = <&pioA 22 GPIO_ACTIVE_LOW>;
};
led-ch2-red {
label = "ch-2:red";
gpios = <&pioA 21 GPIO_ACTIVE_LOW>;
};
led-ch2-green {
label = "ch-2:green";
gpios = <&pioA 20 GPIO_ACTIVE_LOW>;
};
led-data-red {
label = "data:red";
gpios = <&pioA 19 GPIO_ACTIVE_LOW>;
};
led-data-green {
label = "data:green";
gpios = <&pioA 18 GPIO_ACTIVE_LOW>;
};
led-alarm-red {
label = "alarm:red";
gpios = <&pioA 17 GPIO_ACTIVE_LOW>;
};
led-alarm-green {
label = "alarm:green";
gpios = <&pioA 16 GPIO_ACTIVE_LOW>;
};
};
};
&nand {
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
at91bootstrap@0 {
label = "at91bootstrap";
reg = <0x0 0x40000>;
};
barebox@40000 {
label = "bootloader";
reg = <0x40000 0x60000>;
};
bareboxenv@c0000 {
label = "bareboxenv";
reg = <0xc0000 0x40000>;
};
bareboxenv2@100000 {
label = "bareboxenv2";
reg = <0x100000 0x40000>;
};
oftree@180000 {
label = "oftree";
reg = <0x180000 0x20000>;
};
kernel@200000 {
label = "kernel";
reg = <0x200000 0x500000>;
};
rootfs@800000 {
label = "rootfs";
reg = <0x800000 0x0f800000>;
};
ovlfs@10000000 {
label = "ovlfs";
reg = <0x10000000 0x10000000>;
};
};
};
&ssc0 {
#sound-dai-cells = <0>;
status = "okay";
};
&i2c0 {
status = "okay";
jc42@18 {
compatible = "nxp,se97b", "jedec,jc-42.4-temp";
reg = <0x18>;
smbus-timeout-disable;
};
dpot: mcp4651-104@28 {
compatible = "microchip,mcp4651-104";
reg = <0x28>;
#io-channel-cells = <1>;
};
pcm5142: pcm5142@4c {
compatible = "ti,pcm5142";
reg = <0x4c>;
#sound-dai-cells = <0>;
AVDD-supply = <®_3v3>;
DVDD-supply = <®_3v3>;
CPVDD-supply = <®_3v3>;
clocks = <&sck>;
pll-in = <3>;
pll-out = <6>;
};
eeprom@50 {
compatible = "nxp,se97b", "atmel,24c02";
reg = <0x50>;
pagesize = <16>;
};
};
&pinctrl {
tse850 {
pinctrl_usba_vbus: usba-vbus {
atmel,pins = <AT91_PIOC 31 AT91_PERIPH_GPIO
AT91_PINCTRL_DEGLITCH>;
};
};
};
&watchdog {
status = "okay";
};
&usart0 {
status = "okay";
atmel,use-dma-rx;
};
&pwm0 {
status = "okay";
pinctrl-0 = <&pinctrl_pwm0_pwml2_1>;
pinctrl-names = "default";
};
&macb1 {
status = "okay";
phy-mode = "rmii";
#address-cells = <1>;
#size-cells = <0>;
phy0: ethernet-phy@3 {
reg = <3>;
interrupt-parent = <&pioE>;
interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
};
};
&usb0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
atmel,vbus-gpio = <&pioC 31 GPIO_ACTIVE_HIGH>;
};
&usb1 {
status = "okay";
num-ports = <1>;
atmel,vbus-gpio = <&pioD 29 GPIO_ACTIVE_HIGH>;
atmel,oc-gpio = <&pioC 15 GPIO_ACTIVE_LOW>;
};
&usb2 {
status = "okay";
};
&dbgu {
status = "okay";
dmas = <0>, <0>; /* Do not use DMA for dbgu */
};
&pioA {
gpio-line-names =
/* 0 */ "SUP-A", "SUP-B", "SUP-C", "SIG<LEV",
/* 4 */ "", "/RFRST", "", "",
/* 8 */ "/ADD", "", "/LOOP1", "/LOOP2",
/* 12 */ "", "", "", "",
/* 16 */ "LED1GREEN", "LED1RED", "LED2GREEN", "LED2RED",
/* 20 */ "LED3GREEN", "LED3RED", "LED4GREEN", "LED4RED",
/* 24 */ "", "", "", "",
/* 28 */ "", "", "SDA", "SCL";
};
&pioB {
gpio-line-names =
/* 0 */ "", "", "", "",
/* 4 */ "", "", "", "",
/* 8 */ "", "", "", "",
/* 12 */ "", "", "", "",
/* 16 */ "", "", "", "",
/* 20 */ "", "", "", "",
/* 24 */ "", "", "SIG<LIN", "SIG>LIN",
/* 28 */ "RXD", "TXD", "BRX", "BTX";
};
&pioC {
gpio-line-names =
/* 0 */ "ETX0", "ETX1", "ERX0", "ERX1",
/* 4 */ "ETXEN", "ECRSDV", "ERXER", "EREFCK",
/* 8 */ "EMDC", "EMDIO", "", "",
/* 12 */ "", "", "", "/ILIM",
/* 16 */ "BCK", "LRCK", "DIN", "",
/* 20 */ "", "", "", "",
/* 24 */ "", "", "", "",
/* 28 */ "", "", "", "VBUS";
};
&pioD {
gpio-line-names =
/* 0 */ "I1", "I2", "O1", "EXTVEN",
/* 4 */ "", "456KHZ", "VCTRL", "SYNCSEL",
/* 8 */ "STEREO", "", "", "",
/* 12 */ "", "", "", "",
/* 16 */ "", ">LIN", "LIN>", "",
/* 20 */ "VREFEN", "", "", "",
/* 24 */ "", "", "VINOK", "",
/* 28 */ "POEOK", "USBON", "POELOAD", "";
};
&pioE {
gpio-line-names =
/* 0 */ "", "", "", "",
/* 4 */ "", "", "", "",
/* 8 */ "", "", "", "",
/* 12 */ "", "", "", "",
/* 16 */ "", "", "", "",
/* 20 */ "", "ALE", "CLE", "",
/* 24 */ "", "", "", "",
/* 28 */ "", "", "", "/ETHINT";
};
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <[email protected]> for STMicroelectronics.
*/
#ifndef _STI_PLANE_H_
#define _STI_PLANE_H_
#include <drm/drm_atomic_helper.h>
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
#define STI_PLANE_TYPE_SHIFT 8
#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
enum sti_plane_type {
STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
};
enum sti_plane_id_of_type {
STI_ID_0 = 0,
STI_ID_1 = 1,
STI_ID_2 = 2,
STI_ID_3 = 3
};
enum sti_plane_desc {
STI_GDP_0 = STI_GDP | STI_ID_0,
STI_GDP_1 = STI_GDP | STI_ID_1,
STI_GDP_2 = STI_GDP | STI_ID_2,
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
enum sti_plane_status {
STI_PLANE_READY,
STI_PLANE_UPDATED,
STI_PLANE_DISABLING,
STI_PLANE_FLUSHING,
STI_PLANE_DISABLED,
};
#define FPS_LENGTH 128
struct sti_fps_info {
bool output;
unsigned int curr_frame_counter;
unsigned int last_frame_counter;
unsigned int curr_field_counter;
unsigned int last_field_counter;
ktime_t last_timestamp;
char fps_str[FPS_LENGTH];
char fips_str[FPS_LENGTH];
};
/**
* STI plane structure
*
* @plane: drm plane it is bound to (if any)
* @desc: plane type & id
* @status: to know the status of the plane
* @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
struct sti_fps_info fps_info;
};
const char *sti_plane_to_str(struct sti_plane *plane);
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field);
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Qualcomm #define SM7150 interconnect IDs
*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Danila Tikhonov <[email protected]>
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_SM7150_H
#define __DRIVERS_INTERCONNECT_QCOM_SM7150_H
#define SM7150_A1NOC_SNOC_MAS 0
#define SM7150_A1NOC_SNOC_SLV 1
#define SM7150_A2NOC_SNOC_MAS 2
#define SM7150_A2NOC_SNOC_SLV 3
#define SM7150_MASTER_A1NOC_CFG 4
#define SM7150_MASTER_A2NOC_CFG 5
#define SM7150_MASTER_AMPSS_M0 6
#define SM7150_MASTER_CAMNOC_HF0 7
#define SM7150_MASTER_CAMNOC_HF0_UNCOMP 8
#define SM7150_MASTER_CAMNOC_NRT 9
#define SM7150_MASTER_CAMNOC_NRT_UNCOMP 10
#define SM7150_MASTER_CAMNOC_RT 11
#define SM7150_MASTER_CAMNOC_RT_UNCOMP 12
#define SM7150_MASTER_CAMNOC_SF 13
#define SM7150_MASTER_CAMNOC_SF_UNCOMP 14
#define SM7150_MASTER_CNOC_A2NOC 15
#define SM7150_MASTER_CNOC_DC_NOC 16
#define SM7150_MASTER_CNOC_MNOC_CFG 17
#define SM7150_MASTER_COMPUTE_NOC 18
#define SM7150_MASTER_CRYPTO_CORE_0 19
#define SM7150_MASTER_EMMC 20
#define SM7150_MASTER_GEM_NOC_CFG 21
#define SM7150_MASTER_GEM_NOC_PCIE_SNOC 22
#define SM7150_MASTER_GEM_NOC_SNOC 23
#define SM7150_MASTER_GIC 24
#define SM7150_MASTER_GRAPHICS_3D 25
#define SM7150_MASTER_IPA 26
#define SM7150_MASTER_LLCC 27
#define SM7150_MASTER_MDP_PORT0 28
#define SM7150_MASTER_MDP_PORT1 29
#define SM7150_MASTER_MNOC_HF_MEM_NOC 30
#define SM7150_MASTER_MNOC_SF_MEM_NOC 31
#define SM7150_MASTER_NPU 32
#define SM7150_MASTER_PCIE 33
#define SM7150_MASTER_PIMEM 34
#define SM7150_MASTER_QDSS_BAM 35
#define SM7150_MASTER_QDSS_DAP 36
#define SM7150_MASTER_QDSS_ETR 37
#define SM7150_MASTER_QUP_0 38
#define SM7150_MASTER_QUP_1 39
#define SM7150_MASTER_ROTATOR 40
#define SM7150_MASTER_SDCC_2 41
#define SM7150_MASTER_SDCC_4 42
#define SM7150_MASTER_SNOC_CFG 43
#define SM7150_MASTER_SNOC_GC_MEM_NOC 44
#define SM7150_MASTER_SNOC_SF_MEM_NOC 45
#define SM7150_MASTER_SPDM 46
#define SM7150_MASTER_SYS_TCU 47
#define SM7150_MASTER_TSIF 48
#define SM7150_MASTER_UFS_MEM 49
#define SM7150_MASTER_USB3 50
#define SM7150_MASTER_VIDEO_P0 51
#define SM7150_MASTER_VIDEO_P1 52
#define SM7150_MASTER_VIDEO_PROC 53
#define SM7150_SLAVE_A1NOC_CFG 54
#define SM7150_SLAVE_A2NOC_CFG 55
#define SM7150_SLAVE_AHB2PHY_NORTH 56
#define SM7150_SLAVE_AHB2PHY_SOUTH 57
#define SM7150_SLAVE_AHB2PHY_WEST 58
#define SM7150_SLAVE_ANOC_PCIE_GEM_NOC 59
#define SM7150_SLAVE_AOP 60
#define SM7150_SLAVE_AOSS 61
#define SM7150_SLAVE_APPSS 62
#define SM7150_SLAVE_CAMERA_CFG 63
#define SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG 64
#define SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG 65
#define SM7150_SLAVE_CAMNOC_UNCOMP 66
#define SM7150_SLAVE_CDSP_CFG 67
#define SM7150_SLAVE_CDSP_GEM_NOC 68
#define SM7150_SLAVE_CLK_CTL 69
#define SM7150_SLAVE_CNOC_A2NOC 70
#define SM7150_SLAVE_CNOC_DDRSS 71
#define SM7150_SLAVE_CNOC_MNOC_CFG 72
#define SM7150_SLAVE_CRYPTO_0_CFG 73
#define SM7150_SLAVE_DISPLAY_CFG 74
#define SM7150_SLAVE_DISPLAY_THROTTLE_CFG 75
#define SM7150_SLAVE_EBI_CH0 76
#define SM7150_SLAVE_EMMC_CFG 77
#define SM7150_SLAVE_GEM_NOC_CFG 78
#define SM7150_SLAVE_GEM_NOC_SNOC 79
#define SM7150_SLAVE_GLM 80
#define SM7150_SLAVE_GRAPHICS_3D_CFG 81
#define SM7150_SLAVE_IMEM_CFG 82
#define SM7150_SLAVE_IPA_CFG 83
#define SM7150_SLAVE_LLCC 84
#define SM7150_SLAVE_LLCC_CFG 85
#define SM7150_SLAVE_MNOC_HF_MEM_NOC 86
#define SM7150_SLAVE_MNOC_SF_MEM_NOC 87
#define SM7150_SLAVE_MSS_PROC_MS_MPU_CFG 88
#define SM7150_SLAVE_OCIMEM 89
#define SM7150_SLAVE_PCIE_CFG 90
#define SM7150_SLAVE_PDM 91
#define SM7150_SLAVE_PIMEM 92
#define SM7150_SLAVE_PIMEM_CFG 93
#define SM7150_SLAVE_PRNG 94
#define SM7150_SLAVE_QDSS_CFG 95
#define SM7150_SLAVE_QDSS_STM 96
#define SM7150_SLAVE_QUP_0 97
#define SM7150_SLAVE_QUP_1 98
#define SM7150_SLAVE_RBCPR_CX_CFG 99
#define SM7150_SLAVE_RBCPR_MX_CFG 100
#define SM7150_SLAVE_SDCC_2 101
#define SM7150_SLAVE_SDCC_4 102
#define SM7150_SLAVE_SERVICE_A1NOC 103
#define SM7150_SLAVE_SERVICE_A2NOC 104
#define SM7150_SLAVE_SERVICE_CNOC 105
#define SM7150_SLAVE_SERVICE_GEM_NOC 106
#define SM7150_SLAVE_SERVICE_MNOC 107
#define SM7150_SLAVE_SERVICE_SNOC 108
#define SM7150_SLAVE_SNOC_CFG 109
#define SM7150_SLAVE_SNOC_GEM_NOC_GC 110
#define SM7150_SLAVE_SNOC_GEM_NOC_SF 111
#define SM7150_SLAVE_SPDM_WRAPPER 112
#define SM7150_SLAVE_TCSR 113
#define SM7150_SLAVE_TCU 114
#define SM7150_SLAVE_TLMM_NORTH 115
#define SM7150_SLAVE_TLMM_SOUTH 116
#define SM7150_SLAVE_TLMM_WEST 117
#define SM7150_SLAVE_TSIF 118
#define SM7150_SLAVE_UFS_MEM_CFG 119
#define SM7150_SLAVE_USB3 120
#define SM7150_SLAVE_VENUS_CFG 121
#define SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG 122
#define SM7150_SLAVE_VENUS_THROTTLE_CFG 123
#define SM7150_SLAVE_VSENSE_CTRL_CFG 124
#define SM7150_SNOC_CNOC_MAS 125
#define SM7150_SNOC_CNOC_SLV 126
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <[email protected]>, Novell Inc.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/capability.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/power.h>
#include "power.h"
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
*/
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
static unsigned int wakeup_irq[2] __read_mostly;
static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
* They need to be modified together atomically, so it's better to use one
* atomic variable to hold them both.
*/
static atomic_t combined_event_count = ATOMIC_INIT(0);
#define IN_PROGRESS_BITS (sizeof(int) * 4)
#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
static void split_counters(unsigned int *cnt, unsigned int *inpr)
{
unsigned int comb = atomic_read(&combined_event_count);
*cnt = (comb >> IN_PROGRESS_BITS);
*inpr = comb & MAX_IN_PROGRESS;
}
/* A preserved old value of the events counter. */
static unsigned int saved_count;
static DEFINE_RAW_SPINLOCK(events_lock);
static void pm_wakeup_timer_fn(struct timer_list *t);
static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
DEFINE_STATIC_SRCU(wakeup_srcu);
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
static DEFINE_IDA(wakeup_ida);
/**
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
int id;
ws = kzalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
goto err_ws;
ws_name = kstrdup_const(name, GFP_KERNEL);
if (!ws_name)
goto err_name;
ws->name = ws_name;
id = ida_alloc(&wakeup_ida, GFP_KERNEL);
if (id < 0)
goto err_id;
ws->id = id;
return ws;
err_id:
kfree_const(ws->name);
err_name:
kfree(ws);
err_ws:
return NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
*/
static void wakeup_source_record(struct wakeup_source *ws)
{
unsigned long flags;
spin_lock_irqsave(&deleted_ws.lock, flags);
if (ws->event_count) {
deleted_ws.total_time =
ktime_add(deleted_ws.total_time, ws->total_time);
deleted_ws.prevent_sleep_time =
ktime_add(deleted_ws.prevent_sleep_time,
ws->prevent_sleep_time);
deleted_ws.max_time =
ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
deleted_ws.max_time : ws->max_time;
deleted_ws.event_count += ws->event_count;
deleted_ws.active_count += ws->active_count;
deleted_ws.relax_count += ws->relax_count;
deleted_ws.expire_count += ws->expire_count;
deleted_ws.wakeup_count += ws->wakeup_count;
}
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
static void wakeup_source_free(struct wakeup_source *ws)
{
ida_free(&wakeup_ida, ws->id);
kfree_const(ws->name);
kfree(ws);
}
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
__pm_relax(ws);
wakeup_source_record(ws);
wakeup_source_free(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
spin_lock_init(&ws->lock);
timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws->active = false;
raw_spin_lock_irqsave(&events_lock, flags);
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
del_timer_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
* @dev: Device this wakeup source is associated with (or NULL if virtual).
* @name: Name of the wakeup source to register.
*/
struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
struct wakeup_source *ws;
int ret;
ws = wakeup_source_create(name);
if (ws) {
if (!dev || device_is_registered(dev)) {
ret = wakeup_source_sysfs_add(dev, ws);
if (ret) {
wakeup_source_free(ws);
return NULL;
}
}
wakeup_source_add(ws);
}
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_register);
/**
* wakeup_source_unregister - Remove wakeup source from the list and remove it.
* @ws: Wakeup source object to unregister.
*/
void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
if (ws->dev)
wakeup_source_sysfs_remove(ws);
wakeup_source_destroy(ws);
}
}
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
/**
* wakeup_sources_read_lock - Lock wakeup source list for read.
*
* Returns an index of srcu lock for struct wakeup_srcu.
* This index must be passed to the matching wakeup_sources_read_unlock().
*/
int wakeup_sources_read_lock(void)
{
return srcu_read_lock(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
/**
* wakeup_sources_read_unlock - Unlock wakeup source list.
* @idx: return value from corresponding wakeup_sources_read_lock()
*/
void wakeup_sources_read_unlock(int idx)
{
srcu_read_unlock(&wakeup_srcu, idx);
}
EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
/**
* wakeup_sources_walk_start - Begin a walk on wakeup source list
*
* Returns first object of the list of wakeup sources.
*
* Note that to be safe, wakeup sources list needs to be locked by calling
* wakeup_source_read_lock() for this.
*/
struct wakeup_source *wakeup_sources_walk_start(void)
{
struct list_head *ws_head = &wakeup_sources;
return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
}
EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
/**
* wakeup_sources_walk_next - Get next wakeup source from the list
* @ws: Previous wakeup source object
*
* Note that to be safe, wakeup sources list needs to be locked by calling
* wakeup_source_read_lock() for this.
*/
struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
{
struct list_head *ws_head = &wakeup_sources;
return list_next_or_null_rcu(ws_head, &ws->entry,
struct wakeup_source, entry);
}
EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
/**
* device_wakeup_attach - Attach a wakeup source object to a device object.
* @dev: Device to handle.
* @ws: Wakeup source object to attach to @dev.
*
* This causes @dev to be treated as a wakeup device.
*/
static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
{
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
spin_unlock_irq(&dev->power.lock);
return -EEXIST;
}
dev->power.wakeup = ws;
if (dev->power.wakeirq)
device_wakeup_attach_irq(dev, dev->power.wakeirq);
spin_unlock_irq(&dev->power.lock);
return 0;
}
/**
* device_wakeup_enable - Enable given device to be a wakeup source.
* @dev: Device to handle.
*
* Create a wakeup source object, register it and attach it to @dev.
*/
int device_wakeup_enable(struct device *dev)
{
struct wakeup_source *ws;
int ret;
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
if (pm_suspend_target_state != PM_SUSPEND_ON)
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
if (!ws)
return -ENOMEM;
ret = device_wakeup_attach(dev, ws);
if (ret)
wakeup_source_unregister(ws);
return ret;
}
EXPORT_SYMBOL_GPL(device_wakeup_enable);
/**
* device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
* @dev: Device to handle
* @wakeirq: Device specific wakeirq entry
*
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
*
* Call under the device's power.lock lock.
*/
void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
if (!ws)
return;
if (ws->wakeirq)
dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
}
/**
* device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
*
* Call under the device's power.lock lock.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
if (ws)
ws->wakeirq = NULL;
}
/**
* device_wakeup_arm_wake_irqs -
*
* Iterates over the list of device wakeirqs to arm them.
*/
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
* device_wakeup_disarm_wake_irqs -
*
* Iterates over the list of device wakeirqs to disarm them.
*/
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
* device_wakeup_detach - Detach a device's wakeup source object from it.
* @dev: Device to detach the wakeup source object from.
*
* After it returns, @dev will not be treated as a wakeup device any more.
*/
static struct wakeup_source *device_wakeup_detach(struct device *dev)
{
struct wakeup_source *ws;
spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
dev->power.wakeup = NULL;
spin_unlock_irq(&dev->power.lock);
return ws;
}
/**
* device_wakeup_disable - Do not regard a device as a wakeup source any more.
* @dev: Device to handle.
*
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
/**
* device_set_wakeup_capable - Set/reset device wakeup capability flag.
* @dev: Device to handle.
* @capable: Whether or not @dev is capable of waking up the system from sleep.
*
* If @capable is set, set the @dev's power.can_wakeup flag and add its
* wakeup-related attributes to sysfs. Otherwise, unset the @dev's
* power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
*
* This function may sleep and it can't be called from any context where
* sleeping is not allowed.
*/
void device_set_wakeup_capable(struct device *dev, bool capable)
{
if (!!dev->power.can_wakeup == !!capable)
return;
dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
int ret = wakeup_sysfs_add(dev);
if (ret)
dev_info(dev, "Wakeup sysfs attributes not added\n");
} else {
wakeup_sysfs_remove(dev);
}
}
}
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
* @enable: enable/disable flag
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
if (enable)
return device_wakeup_enable(dev);
device_wakeup_disable(dev);
return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
* wakeup_source_not_registered - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
static bool wakeup_source_not_registered(struct wakeup_source *ws)
{
/*
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
/*
* The functions below use the observation that each wakeup event starts a
* period in which the system should not be suspended. The moment this period
* will end depends on how the wakeup event is going to be processed after being
* detected and all of the possible cases can be divided into two distinct
* groups.
*
* First, a wakeup event may be detected by the same functional unit that will
* carry out the entire processing of it and possibly will pass it to user space
* for further processing. In that case the functional unit that has detected
* the event may later "close" the "no suspend" period associated with it
* directly as soon as it has been dealt with. The pair of pm_stay_awake() and
* pm_relax(), balanced with each other, is supposed to be used in such
* situations.
*
* Second, a wakeup event may be detected by one functional unit and processed
* by another one. In that case the unit that has detected it cannot really
* "close" the "no suspend" period associated with it, unless it knows in
* advance what's going to happen to the event during processing. This
* knowledge, however, may not be available to it, so it can simply specify time
* to wait before the system can be suspended and pass it as the second
* argument of pm_wakeup_event().
*
* It is valid to call pm_relax() after pm_wakeup_event(), in which case the
* "no suspend" period will be ended either by the pm_relax(), or by the timer
* function executed when the timer expires, whichever comes first.
*/
/**
* wakeup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
* core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
if (WARN_ONCE(wakeup_source_not_registered(ws),
"unregistered wakeup source\n"))
return;
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
if (ws->autosleep_enabled)
ws->start_prevent_time = ws->last_time;
/* Increment the counter of events in progress. */
cec = atomic_inc_return(&combined_event_count);
trace_wakeup_source_activate(ws->name, cec);
}
/**
* wakeup_source_report_event - Report wakeup event using the given source.
* @ws: Wakeup source to report the event for.
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*/
static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
{
ws->event_count++;
/* This is racy, but the counter is approximate anyway. */
if (events_check_enabled)
ws->wakeup_count++;
if (!ws->active)
wakeup_source_activate(ws);
if (hard)
pm_system_wakeup();
}
/**
* __pm_stay_awake - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the source of the event.
*
* It is safe to call this function from interrupt context.
*/
void __pm_stay_awake(struct wakeup_source *ws)
{
unsigned long flags;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
del_timer(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_stay_awake);
/**
* pm_stay_awake - Notify the PM core that a wakeup event is being processed.
* @dev: Device the wakeup event is related to.
*
* Notify the PM core of a wakeup event (signaled by @dev) by calling
* __pm_stay_awake for the @dev's wakeup source object.
*
* Call this function after detecting of a wakeup event if pm_relax() is going
* to be called directly after processing the event (and possibly passing it to
* user space for further processing).
*/
void pm_stay_awake(struct device *dev)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
__pm_stay_awake(dev->power.wakeup);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_stay_awake);
#ifdef CONFIG_PM_AUTOSLEEP
static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
{
ktime_t delta = ktime_sub(now, ws->start_prevent_time);
ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
}
#else
static inline void update_prevent_sleep_time(struct wakeup_source *ws,
ktime_t now) {}
#endif
/**
* wakeup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and notify the PM core that the wakeup source has
* become inactive by decrementing the counter of wakeup events being processed
* and incrementing the counter of registered wakeup events.
*/
static void wakeup_source_deactivate(struct wakeup_source *ws)
{
unsigned int cnt, inpr, cec;
ktime_t duration;
ktime_t now;
ws->relax_count++;
/*
* __pm_relax() may be called directly or from a timer function.
* If it is called directly right after the timer function has been
* started, but before the timer function calls __pm_relax(), it is
* possible that __pm_stay_awake() will be called in the meantime and
* will set ws->active. Then, ws->active may be cleared immediately
* by the __pm_relax() called from the timer function, but in such a
* case ws->relax_count will be different from ws->active_count.
*/
if (ws->relax_count != ws->active_count) {
ws->relax_count--;
return;
}
ws->active = false;
now = ktime_get();
duration = ktime_sub(now, ws->last_time);
ws->total_time = ktime_add(ws->total_time, duration);
if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
ws->max_time = duration;
ws->last_time = now;
del_timer(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)
update_prevent_sleep_time(ws, now);
/*
* Increment the counter of registered wakeup events and decrement the
* counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
split_counters(&cnt, &inpr);
if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
wake_up(&wakeup_count_wait_queue);
}
/**
* __pm_relax - Notify the PM core that processing of a wakeup event has ended.
* @ws: Wakeup source object associated with the source of the event.
*
* Call this function for wakeup events whose processing started with calling
* __pm_stay_awake().
*
* It is safe to call it from interrupt context.
*/
void __pm_relax(struct wakeup_source *ws)
{
unsigned long flags;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
if (ws->active)
wakeup_source_deactivate(ws);
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_relax);
/**
* pm_relax - Notify the PM core that processing of a wakeup event has ended.
* @dev: Device that signaled the event.
*
* Execute __pm_relax() for the @dev's wakeup source object.
*/
void pm_relax(struct device *dev)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
__pm_relax(dev->power.wakeup);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_relax);
/**
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
* @t: timer list
*
* Call wakeup_source_deactivate() for the wakeup source whose address is stored
* in @data if it is currently active and its timer has not been canceled and
* the expiration time of the timer is not in future.
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
struct wakeup_source *ws = from_timer(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
if (ws->active && ws->timer_expires
&& time_after_eq(jiffies, ws->timer_expires)) {
wakeup_source_deactivate(ws);
ws->expire_count++;
}
spin_unlock_irqrestore(&ws->lock, flags);
}
/**
* pm_wakeup_ws_event - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the event source.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Notify the PM core of a wakeup event whose source is @ws that will take
* approximately @msec milliseconds to be processed by the kernel. If @ws is
* not active, activate it. If @msec is nonzero, set up the @ws' timer to
* execute pm_wakeup_timer_fn() in future.
*
* It is safe to call this function from interrupt context.
*/
void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
{
unsigned long flags;
unsigned long expires;
if (!ws)
return;
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, hard);
if (!msec) {
wakeup_source_deactivate(ws);
goto unlock;
}
expires = jiffies + msecs_to_jiffies(msec);
if (!expires)
expires = 1;
if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
mod_timer(&ws->timer, expires);
ws->timer_expires = expires;
}
unlock:
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
* pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Call pm_wakeup_ws_event() for the @dev's wakeup source object.
*/
void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
{
unsigned long flags;
if (!dev)
return;
spin_lock_irqsave(&dev->power.lock, flags);
pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
ktime_to_ns(ws->last_time) >
ktime_to_ns(last_activity_ws->last_time))) {
last_activity_ws = ws;
}
}
if (!active && last_activity_ws)
pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
* Compare the current number of registered wakeup events with its preserved
* value from the past and return true if new wakeup events have been registered
* since the old value was stored. Also return true if the current number of
* wakeup events being processed is different from zero.
*/
bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
raw_spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
unsigned int cnt, inpr;
split_counters(&cnt, &inpr);
ret = (cnt != saved_count || inpr > 0);
events_check_enabled = !ret;
}
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
return ret || atomic_read(&pm_abort_suspend) > 0;
}
EXPORT_SYMBOL_GPL(pm_wakeup_pending);
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_system_cancel_wakeup(void)
{
atomic_dec_if_positive(&pm_abort_suspend);
}
void pm_wakeup_clear(unsigned int irq_number)
{
raw_spin_lock_irq(&wakeup_irq_lock);
if (irq_number && wakeup_irq[0] == irq_number)
wakeup_irq[0] = wakeup_irq[1];
else
wakeup_irq[0] = 0;
wakeup_irq[1] = 0;
raw_spin_unlock_irq(&wakeup_irq_lock);
if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
unsigned long flags;
raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
if (wakeup_irq[0] == 0)
wakeup_irq[0] = irq_number;
else if (wakeup_irq[1] == 0)
wakeup_irq[1] = irq_number;
else
irq_number = 0;
pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
if (irq_number)
pm_system_wakeup();
}
unsigned int pm_wakeup_irq(void)
{
return wakeup_irq[0];
}
/**
* pm_get_wakeup_count - Read the number of registered wakeup events.
* @count: Address to store the value at.
* @block: Whether or not to block.
*
* Store the number of registered wakeup events at the address in @count. If
* @block is set, block until the current number of wakeup events being
* processed is zero.
*
* Return 'false' if the current number of wakeup events being processed is
* nonzero. Otherwise return 'true'.
*/
bool pm_get_wakeup_count(unsigned int *count, bool block)
{
unsigned int cnt, inpr;
if (block) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&wakeup_count_wait_queue, &wait,
TASK_INTERRUPTIBLE);
split_counters(&cnt, &inpr);
if (inpr == 0 || signal_pending(current))
break;
pm_print_active_wakeup_sources();
schedule();
}
finish_wait(&wakeup_count_wait_queue, &wait);
}
split_counters(&cnt, &inpr);
*count = cnt;
return !inpr;
}
/**
* pm_save_wakeup_count - Save the current number of registered wakeup events.
* @count: Value to compare with the current number of registered wakeup events.
*
* If @count is equal to the current number of registered wakeup events and the
* current number of wakeup events being processed is zero, store @count as the
* old number of registered wakeup events for pm_check_wakeup_events(), enable
* wakeup events detection and return 'true'. Otherwise disable wakeup events
* detection and return 'false'.
*/
bool pm_save_wakeup_count(unsigned int count)
{
unsigned int cnt, inpr;
unsigned long flags;
events_check_enabled = false;
raw_spin_lock_irqsave(&events_lock, flags);
split_counters(&cnt, &inpr);
if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
}
raw_spin_unlock_irqrestore(&events_lock, flags);
return events_check_enabled;
}
#ifdef CONFIG_PM_AUTOSLEEP
/**
* pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
* @set: Whether to set or to clear the autosleep_enabled flags.
*/
void pm_wakep_autosleep_enabled(bool set)
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
if (ws->active) {
if (set)
ws->start_prevent_time = now;
else
update_prevent_sleep_time(ws, now);
}
}
spin_unlock_irq(&ws->lock);
}
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
/**
* print_wakeup_source_stats - Print wakeup source statistics information.
* @m: seq_file to print the statistics into.
* @ws: Wakeup source object to print the statistics for.
*/
static int print_wakeup_source_stats(struct seq_file *m,
struct wakeup_source *ws)
{
unsigned long flags;
ktime_t total_time;
ktime_t max_time;
unsigned long active_count;
ktime_t active_time;
ktime_t prevent_sleep_time;
spin_lock_irqsave(&ws->lock, flags);
total_time = ws->total_time;
max_time = ws->max_time;
prevent_sleep_time = ws->prevent_sleep_time;
active_count = ws->active_count;
if (ws->active) {
ktime_t now = ktime_get();
active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
if (active_time > max_time)
max_time = active_time;
if (ws->autosleep_enabled)
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(now, ws->start_prevent_time));
} else {
active_time = 0;
}
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
ws->name, active_count, ws->event_count,
ws->wakeup_count, ws->expire_count,
ktime_to_ms(active_time), ktime_to_ms(total_time),
ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
ktime_to_ms(prevent_sleep_time));
spin_unlock_irqrestore(&ws->lock, flags);
return 0;
}
static void *wakeup_sources_stats_seq_start(struct seq_file *m,
loff_t *pos)
{
struct wakeup_source *ws;
loff_t n = *pos;
int *srcuidx = m->private;
if (n == 0) {
seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
return NULL;
}
static void *wakeup_sources_stats_seq_next(struct seq_file *m,
void *v, loff_t *pos)
{
struct wakeup_source *ws = v;
struct wakeup_source *next_ws = NULL;
++(*pos);
list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
next_ws = ws;
break;
}
if (!next_ws)
print_wakeup_source_stats(m, &deleted_ws);
return next_ws;
}
static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
{
int *srcuidx = m->private;
srcu_read_unlock(&wakeup_srcu, *srcuidx);
}
/**
* wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
* @m: seq_file to print the statistics into.
* @v: wakeup_source of each iteration
*/
static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
{
struct wakeup_source *ws = v;
print_wakeup_source_stats(m, ws);
return 0;
}
static const struct seq_operations wakeup_sources_stats_seq_ops = {
.start = wakeup_sources_stats_seq_start,
.next = wakeup_sources_stats_seq_next,
.stop = wakeup_sources_stats_seq_stop,
.show = wakeup_sources_stats_seq_show,
};
static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
}
static const struct file_operations wakeup_sources_stats_fops = {
.owner = THIS_MODULE,
.open = wakeup_sources_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int __init wakeup_sources_debugfs_init(void)
{
debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
&wakeup_sources_stats_fops);
return 0;
}
postcore_initcall(wakeup_sources_debugfs_init);
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Driver for Freescale MC44S803 Low Power CMOS Broadband Tuner
*
* Copyright (c) 2009 Jochen Friedrich <[email protected]>
*/
#ifndef MC44S803_PRIV_H
#define MC44S803_PRIV_H
/* This driver is based on the information available in the datasheet
http://www.freescale.com/files/rf_if/doc/data_sheet/MC44S803.pdf
SPI or I2C Address : 0xc0-0xc6
Reg.No | Function
-------------------------------------------
00 | Power Down
01 | Reference Oszillator
02 | Reference Dividers
03 | Mixer and Reference Buffer
04 | Reset/Serial Out
05 | LO 1
06 | LO 2
07 | Circuit Adjust
08 | Test
09 | Digital Tune
0A | LNA AGC
0B | Data Register Address
0C | Regulator Test
0D | VCO Test
0E | LNA Gain/Input Power
0F | ID Bits
*/
#define MC44S803_OSC 26000000 /* 26 MHz */
#define MC44S803_IF1 1086000000 /* 1086 MHz */
#define MC44S803_IF2 36125000 /* 36.125 MHz */
#define MC44S803_REG_POWER 0
#define MC44S803_REG_REFOSC 1
#define MC44S803_REG_REFDIV 2
#define MC44S803_REG_MIXER 3
#define MC44S803_REG_RESET 4
#define MC44S803_REG_LO1 5
#define MC44S803_REG_LO2 6
#define MC44S803_REG_CIRCADJ 7
#define MC44S803_REG_TEST 8
#define MC44S803_REG_DIGTUNE 9
#define MC44S803_REG_LNAAGC 0x0A
#define MC44S803_REG_DATAREG 0x0B
#define MC44S803_REG_REGTEST 0x0C
#define MC44S803_REG_VCOTEST 0x0D
#define MC44S803_REG_LNAGAIN 0x0E
#define MC44S803_REG_ID 0x0F
/* Register definitions */
#define MC44S803_ADDR 0x0F
#define MC44S803_ADDR_S 0
/* REG_POWER */
#define MC44S803_POWER 0xFFFFF0
#define MC44S803_POWER_S 4
/* REG_REFOSC */
#define MC44S803_REFOSC 0x1FF0
#define MC44S803_REFOSC_S 4
#define MC44S803_OSCSEL 0x2000
#define MC44S803_OSCSEL_S 13
/* REG_REFDIV */
#define MC44S803_R2 0x1FF0
#define MC44S803_R2_S 4
#define MC44S803_REFBUF_EN 0x2000
#define MC44S803_REFBUF_EN_S 13
#define MC44S803_R1 0x7C000
#define MC44S803_R1_S 14
/* REG_MIXER */
#define MC44S803_R3 0x70
#define MC44S803_R3_S 4
#define MC44S803_MUX3 0x80
#define MC44S803_MUX3_S 7
#define MC44S803_MUX4 0x100
#define MC44S803_MUX4_S 8
#define MC44S803_OSC_SCR 0x200
#define MC44S803_OSC_SCR_S 9
#define MC44S803_TRI_STATE 0x400
#define MC44S803_TRI_STATE_S 10
#define MC44S803_BUF_GAIN 0x800
#define MC44S803_BUF_GAIN_S 11
#define MC44S803_BUF_IO 0x1000
#define MC44S803_BUF_IO_S 12
#define MC44S803_MIXER_RES 0xFE000
#define MC44S803_MIXER_RES_S 13
/* REG_RESET */
#define MC44S803_RS 0x10
#define MC44S803_RS_S 4
#define MC44S803_SO 0x20
#define MC44S803_SO_S 5
/* REG_LO1 */
#define MC44S803_LO1 0xFFF0
#define MC44S803_LO1_S 4
/* REG_LO2 */
#define MC44S803_LO2 0x7FFF0
#define MC44S803_LO2_S 4
/* REG_CIRCADJ */
#define MC44S803_G1 0x20
#define MC44S803_G1_S 5
#define MC44S803_G3 0x80
#define MC44S803_G3_S 7
#define MC44S803_CIRCADJ_RES 0x300
#define MC44S803_CIRCADJ_RES_S 8
#define MC44S803_G6 0x400
#define MC44S803_G6_S 10
#define MC44S803_G7 0x800
#define MC44S803_G7_S 11
#define MC44S803_S1 0x1000
#define MC44S803_S1_S 12
#define MC44S803_LP 0x7E000
#define MC44S803_LP_S 13
#define MC44S803_CLRF 0x80000
#define MC44S803_CLRF_S 19
#define MC44S803_CLIF 0x100000
#define MC44S803_CLIF_S 20
/* REG_TEST */
/* REG_DIGTUNE */
#define MC44S803_DA 0xF0
#define MC44S803_DA_S 4
#define MC44S803_XOD 0x300
#define MC44S803_XOD_S 8
#define MC44S803_RST 0x10000
#define MC44S803_RST_S 16
#define MC44S803_LO_REF 0x1FFF00
#define MC44S803_LO_REF_S 8
#define MC44S803_AT 0x200000
#define MC44S803_AT_S 21
#define MC44S803_MT 0x400000
#define MC44S803_MT_S 22
/* REG_LNAAGC */
#define MC44S803_G 0x3F0
#define MC44S803_G_S 4
#define MC44S803_AT1 0x400
#define MC44S803_AT1_S 10
#define MC44S803_AT2 0x800
#define MC44S803_AT2_S 11
#define MC44S803_HL_GR_EN 0x8000
#define MC44S803_HL_GR_EN_S 15
#define MC44S803_AGC_AN_DIG 0x10000
#define MC44S803_AGC_AN_DIG_S 16
#define MC44S803_ATTEN_EN 0x20000
#define MC44S803_ATTEN_EN_S 17
#define MC44S803_AGC_READ_EN 0x40000
#define MC44S803_AGC_READ_EN_S 18
#define MC44S803_LNA0 0x80000
#define MC44S803_LNA0_S 19
#define MC44S803_AGC_SEL 0x100000
#define MC44S803_AGC_SEL_S 20
#define MC44S803_AT0 0x200000
#define MC44S803_AT0_S 21
#define MC44S803_B 0xC00000
#define MC44S803_B_S 22
/* REG_DATAREG */
#define MC44S803_D 0xF0
#define MC44S803_D_S 4
/* REG_REGTEST */
/* REG_VCOTEST */
/* REG_LNAGAIN */
#define MC44S803_IF_PWR 0x700
#define MC44S803_IF_PWR_S 8
#define MC44S803_RF_PWR 0x3800
#define MC44S803_RF_PWR_S 11
#define MC44S803_LNA_GAIN 0xFC000
#define MC44S803_LNA_GAIN_S 14
/* REG_ID */
#define MC44S803_ID 0x3E00
#define MC44S803_ID_S 9
/* Some macros to read/write fields */
/* First shift, then mask */
#define MC44S803_REG_SM(_val, _reg) \
(((_val) << _reg##_S) & (_reg))
/* First mask, then shift */
#define MC44S803_REG_MS(_val, _reg) \
(((_val) & (_reg)) >> _reg##_S)
struct mc44s803_priv {
struct mc44s803_config *cfg;
struct i2c_adapter *i2c;
struct dvb_frontend *fe;
u32 frequency;
};
#endif
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _IPT_CLUSTERIP_H_target
#define _IPT_CLUSTERIP_H_target
#include <linux/types.h>
#include <linux/if_ether.h>
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
CLUSTERIP_HASHMODE_SIP_SPT,
CLUSTERIP_HASHMODE_SIP_SPT_DPT,
};
#define CLUSTERIP_HASHMODE_MAX CLUSTERIP_HASHMODE_SIP_SPT_DPT
#define CLUSTERIP_MAX_NODES 16
#define CLUSTERIP_FLAG_NEW 0x00000001
struct clusterip_config;
struct ipt_clusterip_tgt_info {
__u32 flags;
/* only relevant for new ones */
__u8 clustermac[ETH_ALEN];
__u16 num_total_nodes;
__u16 num_local_nodes;
__u16 local_nodes[CLUSTERIP_MAX_NODES];
__u32 hash_mode;
__u32 hash_initval;
/* Used internally by the kernel */
struct clusterip_config *config;
};
#endif /*_IPT_CLUSTERIP_H_target*/
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2007-2008 Pierre Ossman
*/
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#define RESULT_OK 0
#define RESULT_FAIL 1
#define RESULT_UNSUP_HOST 2
#define RESULT_UNSUP_CARD 3
#define BUFFER_ORDER 2
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
#define TEST_ALIGN_END 8
/*
* Limit the test area size to the maximum MMC HC erase group size. Note that
* the maximum SD allocation unit size is just 4MiB.
*/
#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
/**
* struct mmc_test_pages - pages allocated by 'alloc_pages()'.
* @page: first page in the allocation
* @order: order of the number of pages allocated
*/
struct mmc_test_pages {
struct page *page;
unsigned int order;
};
/**
* struct mmc_test_mem - allocated memory.
* @arr: array of allocations
* @cnt: number of allocations
*/
struct mmc_test_mem {
struct mmc_test_pages *arr;
unsigned int cnt;
};
/**
* struct mmc_test_area - information for performance tests.
* @max_sz: test area size (in bytes)
* @dev_addr: address on card at which to do performance tests
* @max_tfr: maximum transfer size allowed by driver (in bytes)
* @max_segs: maximum segments allowed by driver in scatterlist @sg
* @max_seg_sz: maximum segment size allowed by driver
* @blocks: number of (512 byte) blocks currently mapped by @sg
* @sg_len: length of currently mapped scatterlist @sg
* @mem: allocated memory
* @sg: scatterlist
* @sg_areq: scatterlist for non-blocking request
*/
struct mmc_test_area {
unsigned long max_sz;
unsigned int dev_addr;
unsigned int max_tfr;
unsigned int max_segs;
unsigned int max_seg_sz;
unsigned int blocks;
unsigned int sg_len;
struct mmc_test_mem *mem;
struct scatterlist *sg;
struct scatterlist *sg_areq;
};
/**
* struct mmc_test_transfer_result - transfer results for performance tests.
* @link: double-linked list
* @count: amount of group of sectors to check
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
* @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
/**
* struct mmc_test_general_result - results for tests.
* @link: double-linked list
* @card: card under test
* @testcase: number of test case
* @result: result of test run
* @tr_lst: transfer measurements if any as mmc_test_transfer_result
*/
struct mmc_test_general_result {
struct list_head link;
struct mmc_card *card;
int testcase;
int result;
struct list_head tr_lst;
};
/**
* struct mmc_test_dbgfs_file - debugfs related file.
* @link: double-linked list
* @card: card under test
* @file: file created under debugfs
*/
struct mmc_test_dbgfs_file {
struct list_head link;
struct mmc_card *card;
struct dentry *file;
};
/**
* struct mmc_test_card - test information.
* @card: card under test
* @scratch: transfer buffer
* @buffer: transfer buffer
* @highmem: buffer for highmem tests
* @area: information for performance tests
* @gr: pointer to results of current testcase
*/
struct mmc_test_card {
struct mmc_card *card;
u8 scratch[BUFFER_SIZE];
u8 *buffer;
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
struct mmc_test_area area;
struct mmc_test_general_result *gr;
};
enum mmc_test_prep_media {
MMC_TEST_PREP_NONE = 0,
MMC_TEST_PREP_WRITE_FULL = 1 << 0,
MMC_TEST_PREP_ERASE = 1 << 1,
};
struct mmc_test_multiple_rw {
unsigned int *sg_len;
unsigned int *bs;
unsigned int len;
unsigned int size;
bool do_write;
bool do_nonblock_req;
enum mmc_test_prep_media prepare;
};
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
/*
* Configure correct block size in card
*/
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
{
return mmc_set_blocklen(test->card, size);
}
static bool mmc_test_card_cmd23(struct mmc_card *card)
{
return mmc_card_mmc(card) ||
(mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
}
static void mmc_test_prepare_sbc(struct mmc_test_card *test,
struct mmc_request *mrq, unsigned int blocks)
{
struct mmc_card *card = test->card;
if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
!mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
(card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
mrq->sbc = NULL;
return;
}
mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
mrq->sbc->arg = blocks;
mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
}
/*
* Fill in the mmc_request structure given a set of transfer parameters.
*/
static void mmc_test_prepare_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
{
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
return;
if (blocks > 1) {
mrq->cmd->opcode = write ?
MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
} else {
mrq->cmd->opcode = write ?
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
}
mrq->cmd->arg = dev_addr;
if (!mmc_card_blockaddr(test->card))
mrq->cmd->arg <<= 9;
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
if (blocks == 1)
mrq->stop = NULL;
else {
mrq->stop->opcode = MMC_STOP_TRANSMISSION;
mrq->stop->arg = 0;
mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
}
mrq->data->blksz = blksz;
mrq->data->blocks = blocks;
mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mrq->data->sg = sg;
mrq->data->sg_len = sg_len;
mmc_test_prepare_sbc(test, mrq, blocks);
mmc_set_data_timeout(mrq->data, test->card);
}
static int mmc_test_busy(struct mmc_command *cmd)
{
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
}
/*
* Wait for the card to finish the busy state
*/
static int mmc_test_wait_busy(struct mmc_test_card *test)
{
int ret, busy;
struct mmc_command cmd = {};
busy = 0;
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = test->card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
if (ret)
break;
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
pr_info("%s: Warning: Host did not wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
return ret;
}
/*
* Transfer a single sector of kernel addressable data
*/
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
u8 *buffer, unsigned addr, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
sg_init_one(&sg, buffer, blksz);
mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
mmc_wait_for_req(test->card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return mmc_test_wait_busy(test);
}
static void mmc_test_free_mem(struct mmc_test_mem *mem)
{
if (!mem)
return;
while (mem->cnt--)
__free_pages(mem->arr[mem->cnt].page,
mem->arr[mem->cnt].order);
kfree(mem->arr);
kfree(mem);
}
/*
* Allocate a lot of memory, preferably max_sz but at least min_sz. In case
* there isn't much memory do not exceed 1/16th total lowmem pages. Also do
* not exceed a maximum number of segments and try not to make segments much
* bigger than maximum segment size.
*/
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
unsigned long max_sz,
unsigned int max_segs,
unsigned int max_seg_sz)
{
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
unsigned long page_cnt = 0;
unsigned long limit = nr_free_buffer_pages() >> 4;
struct mmc_test_mem *mem;
if (max_page_cnt > limit)
max_page_cnt = limit;
if (min_page_cnt > max_page_cnt)
min_page_cnt = max_page_cnt;
if (max_seg_page_cnt > max_page_cnt)
max_seg_page_cnt = max_page_cnt;
if (max_segs > max_page_cnt)
max_segs = max_page_cnt;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
if (!mem->arr)
goto out_free;
while (max_page_cnt) {
struct page *page;
unsigned int order;
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
__GFP_NORETRY;
order = get_order(max_seg_page_cnt << PAGE_SHIFT);
while (1) {
page = alloc_pages(flags, order);
if (page || !order)
break;
order -= 1;
}
if (!page) {
if (page_cnt < min_page_cnt)
goto out_free;
break;
}
mem->arr[mem->cnt].page = page;
mem->arr[mem->cnt].order = order;
mem->cnt += 1;
if (max_page_cnt <= (1UL << order))
break;
max_page_cnt -= 1UL << order;
page_cnt += 1UL << order;
if (mem->cnt >= max_segs) {
if (page_cnt < min_page_cnt)
goto out_free;
break;
}
}
return mem;
out_free:
mmc_test_free_mem(mem);
return NULL;
}
/*
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
unsigned long sz = size;
sg_init_table(sglist, max_segs);
if (min_sg_len > max_segs)
min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
if (min_sg_len && (size / min_sg_len < len))
len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
len = max_seg_sz;
if (sg)
sg = sg_next(sg);
else
sg = sglist;
if (!sg)
return -EINVAL;
sg_set_page(sg, mem->arr[i].page, len, 0);
sz -= len;
*sg_len += 1;
if (!sz)
break;
}
} while (sz && repeat);
if (sz)
return -EINVAL;
if (sg)
sg_mark_end(sg);
return 0;
}
/*
* Map memory into a scatterlist so that no pages are contiguous. Allow the
* same memory to be mapped more than once.
*/
static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
unsigned long sz,
struct scatterlist *sglist,
unsigned int max_segs,
unsigned int max_seg_sz,
unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i = mem->cnt, cnt;
unsigned long len;
void *base, *addr, *last_addr = NULL;
sg_init_table(sglist, max_segs);
*sg_len = 0;
while (sz) {
base = page_address(mem->arr[--i].page);
cnt = 1 << mem->arr[i].order;
while (sz && cnt) {
addr = base + PAGE_SIZE * --cnt;
if (last_addr && last_addr + PAGE_SIZE == addr)
continue;
last_addr = addr;
len = PAGE_SIZE;
if (len > max_seg_sz)
len = max_seg_sz;
if (len > sz)
len = sz;
if (sg)
sg = sg_next(sg);
else
sg = sglist;
if (!sg)
return -EINVAL;
sg_set_page(sg, virt_to_page(addr), len, 0);
sz -= len;
*sg_len += 1;
}
if (i == 0)
i = mem->cnt;
}
if (sg)
sg_mark_end(sg);
return 0;
}
/*
* Calculate transfer rate in bytes per second.
*/
static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
bytes >>= 1;
ns >>= 1;
}
if (!ns)
return 0;
do_div(bytes, (uint32_t)ns);
return bytes;
}
/*
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
if (!test->gr)
return;
tr = kmalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
return;
tr->count = count;
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
/*
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
struct timespec64 ts;
ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
(u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec64 *ts1,
struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec64 ts;
ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
* Return the card size in sectors.
*/
static unsigned int mmc_test_capacity(struct mmc_card *card)
{
if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
return card->ext_csd.sectors;
else
return card->csd.capacity << (card->csd.read_blkbits - 9);
}
/*******************************************************************/
/* Test preparation and cleanup */
/*******************************************************************/
/*
* Fill the first couple of sectors of the card with known data
* so that bad reads/writes can be detected
*/
static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
{
int ret, i;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
if (write)
memset(test->buffer, val, 512);
else {
for (i = 0; i < 512; i++)
test->buffer[i] = i;
}
for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_prepare_write(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 1, 0xDF);
}
static int mmc_test_prepare_read(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 0, 0);
}
static int mmc_test_cleanup(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 1, 0);
}
/*******************************************************************/
/* Test execution helpers */
/*******************************************************************/
/*
* Modifies the mmc_request to perform the "short transfer" tests
*/
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, int write)
{
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return;
if (mrq->data->blocks > 1) {
mrq->cmd->opcode = write ?
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
mrq->stop = NULL;
} else {
mrq->cmd->opcode = MMC_SEND_STATUS;
mrq->cmd->arg = test->card->rca << 16;
}
}
/*
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
struct mmc_request *mrq)
{
int ret;
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return -EINVAL;
ret = 0;
if (mrq->sbc && mrq->sbc->error)
ret = mrq->sbc->error;
if (!ret && mrq->cmd->error)
ret = mrq->cmd->error;
if (!ret && mrq->data->error)
ret = mrq->data->error;
if (!ret && mrq->stop && mrq->stop->error)
ret = mrq->stop->error;
if (!ret && mrq->data->bytes_xfered !=
mrq->data->blocks * mrq->data->blksz)
ret = RESULT_FAIL;
if (ret == -EINVAL)
ret = RESULT_UNSUP_HOST;
return ret;
}
/*
* Checks that a "short transfer" behaved as expected
*/
static int mmc_test_check_broken_result(struct mmc_test_card *test,
struct mmc_request *mrq)
{
int ret;
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return -EINVAL;
ret = 0;
if (!ret && mrq->cmd->error)
ret = mrq->cmd->error;
if (!ret && mrq->data->error == 0)
ret = RESULT_FAIL;
if (!ret && mrq->data->error != -ETIMEDOUT)
ret = mrq->data->error;
if (!ret && mrq->stop && mrq->stop->error)
ret = mrq->stop->error;
if (mrq->data->blocks > 1) {
if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
ret = RESULT_FAIL;
} else {
if (!ret && mrq->data->bytes_xfered > 0)
ret = RESULT_FAIL;
}
if (ret == -EINVAL)
ret = RESULT_UNSUP_HOST;
return ret;
}
struct mmc_test_req {
struct mmc_request mrq;
struct mmc_command sbc;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_command status;
struct mmc_data data;
};
/*
* Tests nonblock transfer with certain parameters
*/
static void mmc_test_req_reset(struct mmc_test_req *rq)
{
memset(rq, 0, sizeof(struct mmc_test_req));
rq->mrq.cmd = &rq->cmd;
rq->mrq.data = &rq->data;
rq->mrq.stop = &rq->stop;
}
static struct mmc_test_req *mmc_test_req_alloc(void)
{
struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
if (rq)
mmc_test_req_reset(rq);
return rq;
}
static void mmc_test_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
static int mmc_test_start_areq(struct mmc_test_card *test,
struct mmc_request *mrq,
struct mmc_request *prev_mrq)
{
struct mmc_host *host = test->card->host;
int err = 0;
if (mrq) {
init_completion(&mrq->completion);
mrq->done = mmc_test_wait_done;
mmc_pre_req(host, mrq);
}
if (prev_mrq) {
wait_for_completion(&prev_mrq->completion);
err = mmc_test_wait_busy(test);
if (!err)
err = mmc_test_check_result(test, prev_mrq);
}
if (!err && mrq) {
err = mmc_start_request(host, mrq);
if (err)
mmc_retune_release(host);
}
if (prev_mrq)
mmc_post_req(host, prev_mrq, 0);
if (err && mrq)
mmc_post_req(host, mrq, err);
return err;
}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int write,
int count)
{
struct mmc_test_req *rq1, *rq2;
struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
struct mmc_test_area *t = &test->area;
struct scatterlist *sg = t->sg;
struct scatterlist *sg_areq = t->sg_areq;
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
ret = RESULT_FAIL;
goto err;
}
mrq = &rq1->mrq;
prev_mrq = NULL;
for (i = 0; i < count; i++) {
mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
t->blocks, 512, write);
ret = mmc_test_start_areq(test, mrq, prev_mrq);
if (ret)
goto err;
if (!prev_mrq)
prev_mrq = &rq2->mrq;
swap(mrq, prev_mrq);
swap(sg, sg_areq);
dev_addr += t->blocks;
}
ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
return ret;
}
/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
mmc_wait_for_req(test->card->host, &mrq);
mmc_test_wait_busy(test);
return mmc_test_check_result(test, &mrq);
}
/*
* Tests a transfer where the card will fail completely or partly
*/
static int mmc_test_broken_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
sg_init_one(&sg, test->buffer, blocks * blksz);
mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
mmc_test_prepare_broken_mrq(test, &mrq, write);
mmc_wait_for_req(test->card->host, &mrq);
mmc_test_wait_busy(test);
return mmc_test_check_broken_result(test, &mrq);
}
/*
* Does a complete transfer test where data is also validated
*
* Note: mmc_test_prepare() must have been done before this call
*/
static int mmc_test_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
int ret, i;
if (write) {
for (i = 0; i < blocks * blksz; i++)
test->scratch[i] = i;
} else {
memset(test->scratch, 0, BUFFER_SIZE);
}
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
ret = mmc_test_set_blksize(test, blksz);
if (ret)
return ret;
ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
blocks, blksz, write);
if (ret)
return ret;
if (write) {
int sectors;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sectors = (blocks * blksz + 511) / 512;
if ((sectors * 512) == (blocks * blksz))
sectors++;
if ((sectors * 512) > BUFFER_SIZE)
return -EINVAL;
memset(test->buffer, 0, sectors * 512);
for (i = 0; i < sectors; i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
dev_addr + i, 512, 0);
if (ret)
return ret;
}
for (i = 0; i < blocks * blksz; i++) {
if (test->buffer[i] != (u8)i)
return RESULT_FAIL;
}
for (; i < sectors * 512; i++) {
if (test->buffer[i] != 0xDF)
return RESULT_FAIL;
}
} else {
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
for (i = 0; i < blocks * blksz; i++) {
if (test->scratch[i] != (u8)i)
return RESULT_FAIL;
}
}
return 0;
}
/*******************************************************************/
/* Tests */
/*******************************************************************/
struct mmc_test_case {
const char *name;
int (*prepare)(struct mmc_test_card *);
int (*run)(struct mmc_test_card *);
int (*cleanup)(struct mmc_test_card *);
};
static int mmc_test_basic_write(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_basic_read(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_verify_write(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_verify_read(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_one(&sg, test->buffer, size);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_one(&sg, test->buffer, size);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
static int mmc_test_pow2_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_pow2_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_weird_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_weird_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, 512);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, 512);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_multi_write(struct mmc_test_card *test)
{
int ret, i;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_multi_read(struct mmc_test_card *test)
{
int ret, i;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_xfersize_write(struct mmc_test_card *test)
{
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 1, 512, 1);
}
static int mmc_test_xfersize_read(struct mmc_test_card *test)
{
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 1, 512, 0);
}
static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
{
int ret;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 2, 512, 1);
}
static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
{
int ret;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 2, 512, 0);
}
#ifdef CONFIG_HIGHMEM
static int mmc_test_write_high(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_read_high(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write_high(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
#else
static int mmc_test_no_highmem(struct mmc_test_card *test)
{
pr_info("%s: Highmem not configured - test skipped\n",
mmc_hostname(test->card->host));
return 0;
}
#endif /* CONFIG_HIGHMEM */
/*
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int min_sg_len, bool nonblock)
{
struct mmc_test_area *t = &test->area;
int err;
unsigned int sg_len = 0;
t->blocks = sz >> 9;
if (max_scatter) {
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
t->max_segs, t->max_seg_sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err || !nonblock)
goto err;
if (max_scatter) {
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
t->max_segs, t->max_seg_sz,
&sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
t->max_seg_sz, &sg_len, min_sg_len);
}
if (!err && sg_len != t->sg_len)
err = -EINVAL;
err:
if (err)
pr_info("%s: Failed to map sg list\n",
mmc_hostname(test->card->host));
return err;
}
/*
* Transfer bytes mapped by mmc_test_area_map().
*/
static int mmc_test_area_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int write)
{
struct mmc_test_area *t = &test->area;
return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
t->blocks, 512, write);
}
/*
* Map and transfer bytes for multiple transfers.
*/
static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
unsigned int dev_addr, int write,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
struct timespec64 ts1, ts2;
int ret = 0;
int i;
/*
* In the case of a maximally scattered transfer, the maximum transfer
* size is further limited by using PAGE_SIZE segments.
*/
if (max_scatter) {
struct mmc_test_area *t = &test->area;
unsigned long max_tfr;
if (t->max_seg_sz >= PAGE_SIZE)
max_tfr = t->max_segs * PAGE_SIZE;
else
max_tfr = t->max_segs * t->max_seg_sz;
if (sz > max_tfr)
sz = max_tfr;
}
ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
if (ret)
return ret;
if (timed)
ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
else
for (i = 0; i < count && ret == 0; i++) {
ret = mmc_test_area_transfer(test, dev_addr, write);
dev_addr += sz >> 9;
}
if (ret)
return ret;
if (timed)
ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
unsigned int dev_addr, int write, int max_scatter,
int timed)
{
return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
timed, 1, false, 0);
}
/*
* Write the test area entirely.
*/
static int mmc_test_area_fill(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
}
/*
* Erase the test area entirely.
*/
static int mmc_test_area_erase(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
if (!mmc_can_erase(test->card))
return 0;
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
MMC_ERASE_ARG);
}
/*
* Cleanup struct mmc_test_area.
*/
static int mmc_test_area_cleanup(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
kfree(t->sg);
kfree(t->sg_areq);
mmc_test_free_mem(t->mem);
return 0;
}
/*
* Initialize an area for testing large transfers. The test area is set to the
* middle of the card because cards may have different characteristics at the
* front (for FAT file system optimization). Optionally, the area is erased
* (if the card supports it) which may improve write performance. Optionally,
* the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
/* Make the test area size about 4MiB */
sz = (unsigned long)test->card->pref_erase << 9;
t->max_sz = sz;
while (t->max_sz < 4 * 1024 * 1024)
t->max_sz += sz;
while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
t->max_seg_sz -= t->max_seg_sz % 512;
t->max_tfr = t->max_sz;
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
t->max_tfr = test->card->host->max_blk_count << 9;
if (t->max_tfr > test->card->host->max_req_size)
t->max_tfr = test->card->host->max_req_size;
if (t->max_tfr / t->max_seg_sz > t->max_segs)
t->max_tfr = t->max_segs * t->max_seg_sz;
/*
* Try to allocate enough memory for a max. sized transfer. Less is OK
* because the same memory can be mapped into the scatterlist more than
* once. Also, take into account the limits imposed on scatterlist
* segments by the host driver.
*/
t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
t->max_seg_sz);
if (!t->mem)
return -ENOMEM;
t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
goto out_free;
}
t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
GFP_KERNEL);
if (!t->sg_areq) {
ret = -ENOMEM;
goto out_free;
}
t->dev_addr = mmc_test_capacity(test->card) / 2;
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
if (erase) {
ret = mmc_test_area_erase(test);
if (ret)
goto out_free;
}
if (fill) {
ret = mmc_test_area_fill(test);
if (ret)
goto out_free;
}
return 0;
out_free:
mmc_test_area_cleanup(test);
return ret;
}
/*
* Prepare for large transfers. Do not erase the test area.
*/
static int mmc_test_area_prepare(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 0, 0);
}
/*
* Prepare for large transfers. Do erase the test area.
*/
static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 1, 0);
}
/*
* Prepare for large transfers. Erase and fill the test area.
*/
static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 1, 1);
}
/*
* Test best-case performance. Best-case performance is expected from
* a single large transfer.
*
* An additional option (max_scatter) allows the measurement of the same
* transfer but with no contiguous pages in the scatter list. This tests
* the efficiency of DMA to handle scattered pages.
*/
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
int max_scatter)
{
struct mmc_test_area *t = &test->area;
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
max_scatter, 1);
}
/*
* Best-case read performance.
*/
static int mmc_test_best_read_performance(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 0, 0);
}
/*
* Best-case write performance.
*/
static int mmc_test_best_write_performance(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 1, 0);
}
/*
* Best-case read performance into scattered pages.
*/
static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 0, 1);
}
/*
* Best-case write performance from scattered pages.
*/
static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 1, 1);
}
/*
* Single read performance by transfer size.
*/
static int mmc_test_profile_read_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
if (ret)
return ret;
}
sz = t->max_tfr;
dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
}
/*
* Single write performance by transfer size.
*/
static int mmc_test_profile_write_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
ret = mmc_test_area_erase(test);
if (ret)
return ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
if (ret)
return ret;
}
ret = mmc_test_area_erase(test);
if (ret)
return ret;
sz = t->max_tfr;
dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
}
/*
* Single trim performance by transfer size.
*/
static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
return RESULT_UNSUP_CARD;
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
/*
* Consecutive read performance by transfer size.
*/
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_read_perf(test, sz);
if (ret)
return ret;
}
sz = t->max_tfr;
return mmc_test_seq_read_perf(test, sz);
}
static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
if (ret)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
/*
* Consecutive write performance by transfer size.
*/
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_write_perf(test, sz);
if (ret)
return ret;
}
sz = t->max_tfr;
return mmc_test_seq_write_perf(test, sz);
}
/*
* Consecutive trim performance by transfer size.
*/
static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
return RESULT_UNSUP_CARD;
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
ret = mmc_test_area_erase(test);
if (ret)
return ret;
ret = mmc_test_area_fill(test);
if (ret)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
}
static unsigned int rnd_next = 1;
static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
{
uint64_t r;
rnd_next = rnd_next * 1103515245 + 12345;
r = (rnd_next >> 16) & 0x7fff;
return (r * rnd_cnt) >> 15;
}
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
unsigned long sz, int secs, int force_retuning)
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
rnd_addr = mmc_test_capacity(test->card) / 4;
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
ktime_get_ts64(&ts2);
ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= secs)
break;
ea = mmc_test_rnd_num(range1);
if (ea == last_ea)
ea -= 1;
last_ea = ea;
dev_addr = rnd_addr + test->card->pref_erase * ea +
ssz * mmc_test_rnd_num(range2);
if (force_retuning)
mmc_retune_needed(test->card->host);
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
if (ret)
return ret;
}
if (print)
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
static int mmc_test_random_perf(struct mmc_test_card *test, int write)
{
struct mmc_test_area *t = &test->area;
unsigned int next;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
/*
* When writing, try to get more consistent results by running
* the test twice with exactly the same I/O but outputting the
* results only for the 2nd run.
*/
if (write) {
next = rnd_next;
ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
if (ret)
return ret;
}
sz = t->max_tfr;
if (write) {
next = rnd_next;
ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
return mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
}
static int mmc_test_retuning(struct mmc_test_card *test)
{
if (!mmc_can_retune(test->card->host)) {
pr_info("%s: No retuning - test skipped\n",
mmc_hostname(test->card->host));
return RESULT_UNSUP_HOST;
}
return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1);
}
/*
* Random read performance by transfer size.
*/
static int mmc_test_random_read_perf(struct mmc_test_card *test)
{
return mmc_test_random_perf(test, 0);
}
/*
* Random write performance by transfer size.
*/
static int mmc_test_random_write_perf(struct mmc_test_card *test)
{
return mmc_test_random_perf(test, 1);
}
static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
unsigned int tot_sz, int max_scatter)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
/*
* In the case of a maximally scattered transfer, the maximum transfer
* size is further limited by using PAGE_SIZE segments.
*/
if (max_scatter) {
unsigned long max_tfr;
if (t->max_seg_sz >= PAGE_SIZE)
max_tfr = t->max_segs * PAGE_SIZE;
else
max_tfr = t->max_segs * t->max_seg_sz;
if (sz > max_tfr)
sz = max_tfr;
}
ssz = sz >> 9;
dev_addr = mmc_test_capacity(test->card) / 4;
if (tot_sz > dev_addr << 9)
tot_sz = dev_addr << 9;
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
if (ret)
return ret;
dev_addr += ssz;
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
{
int ret, i;
for (i = 0; i < 10; i++) {
ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
if (ret)
return ret;
}
for (i = 0; i < 5; i++) {
ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
if (ret)
return ret;
}
for (i = 0; i < 3; i++) {
ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
if (ret)
return ret;
}
return ret;
}
/*
* Large sequential read performance.
*/
static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
{
return mmc_test_large_seq_perf(test, 0);
}
/*
* Large sequential write performance.
*/
static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
{
return mmc_test_large_seq_perf(test, 1);
}
static int mmc_test_rw_multiple(struct mmc_test_card *test,
struct mmc_test_multiple_rw *tdata,
unsigned int reqsize, unsigned int size,
int min_sg_len)
{
unsigned int dev_addr;
struct mmc_test_area *t = &test->area;
int ret = 0;
/* Set up test area */
if (size > mmc_test_capacity(test->card) / 2 * 512)
size = mmc_test_capacity(test->card) / 2 * 512;
if (reqsize > t->max_tfr)
reqsize = t->max_tfr;
dev_addr = mmc_test_capacity(test->card) / 4;
if ((dev_addr & 0xffff0000))
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
else
dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
if (!dev_addr)
goto err;
if (reqsize > size)
return 0;
/* prepare test area */
if (mmc_can_erase(test->card) &&
tdata->prepare & MMC_TEST_PREP_ERASE) {
ret = mmc_erase(test->card, dev_addr,
size / 512, test->card->erase_arg);
if (ret)
ret = mmc_erase(test->card, dev_addr,
size / 512, MMC_ERASE_ARG);
if (ret)
goto err;
}
/* Run test */
ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
tdata->do_write, 0, 1, size / reqsize,
tdata->do_nonblock_req, min_sg_len);
if (ret)
goto err;
return ret;
err:
pr_info("[%s] error\n", __func__);
return ret;
}
static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
struct mmc_test_multiple_rw *rw)
{
int ret = 0;
int i;
void *pre_req = test->card->host->ops->pre_req;
void *post_req = test->card->host->ops->post_req;
if (rw->do_nonblock_req &&
((!pre_req && post_req) || (pre_req && !post_req))) {
pr_info("error: only one of pre/post is defined\n");
return -EINVAL;
}
for (i = 0 ; i < rw->len && ret == 0; i++) {
ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
if (ret)
break;
}
return ret;
}
static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
struct mmc_test_multiple_rw *rw)
{
int ret = 0;
int i;
for (i = 0 ; i < rw->len && ret == 0; i++) {
ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
rw->sg_len[i]);
if (ret)
break;
}
return ret;
}
/*
* Multiple blocking write 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = true,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_size(test, &test_data);
};
/*
* Multiple non-blocking write 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = true,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple blocking read 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = false,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple non-blocking read 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = false,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple blocking write 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = true,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
};
/*
* Multiple non-blocking write 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = true,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* Multiple blocking read 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = false,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* Multiple non-blocking read 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = false,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* eMMC hardware reset.
*/
static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
int err;
err = mmc_hw_reset(card);
if (!err) {
/*
* Reset will re-enable the card's command queue, but tests
* expect it to be disabled.
*/
if (card->ext_csd.cmdq_en)
mmc_cmdq_disable(card);
return RESULT_OK;
} else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
}
return RESULT_FAIL;
}
static int mmc_test_send_status(struct mmc_test_card *test,
struct mmc_command *cmd)
{
memset(cmd, 0, sizeof(*cmd));
cmd->opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(test->card->host))
cmd->arg = test->card->rca << 16;
cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(test->card->host, cmd, 0);
}
static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int use_sbc,
int repeat_cmd, int write, int use_areq)
{
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
if (!rq)
return -ENOMEM;
mrq = &rq->mrq;
if (use_sbc)
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
if (use_sbc && t->blocks > 1 && !mrq->sbc) {
ret = mmc_host_cmd23(host) ?
RESULT_UNSUP_CARD :
RESULT_UNSUP_HOST;
goto out_free;
}
/* Start ongoing data request */
if (use_areq) {
ret = mmc_test_start_areq(test, mrq, NULL);
if (ret)
goto out_free;
} else {
mmc_wait_for_req(host, mrq);
}
timeout = jiffies + msecs_to_jiffies(3000);
do {
count += 1;
/* Send status command while data transfer in progress */
cmd_ret = mmc_test_send_status(test, &rq->status);
if (cmd_ret)
break;
status = rq->status.resp[0];
if (status & R1_ERROR) {
cmd_ret = -EIO;
break;
}
if (mmc_is_req_done(host, mrq))
break;
expired = time_after(jiffies, timeout);
if (expired) {
pr_info("%s: timeout waiting for Tran state status %#x\n",
mmc_hostname(host), status);
cmd_ret = -ETIMEDOUT;
break;
}
} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
/* Wait for data request to complete */
if (use_areq) {
ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
/*
* For cap_cmd_during_tfr request, upper layer must send stop if
* required.
*/
if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
if (ret)
mmc_wait_for_cmd(host, mrq->data->stop, 0);
else
ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
}
if (ret)
goto out_free;
if (cmd_ret) {
pr_info("%s: Send Status failed: status %#x, error %d\n",
mmc_hostname(test->card->host), status, cmd_ret);
}
ret = mmc_test_check_result(test, mrq);
if (ret)
goto out_free;
ret = mmc_test_wait_busy(test);
if (ret)
goto out_free;
if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
pr_info("%s: %d commands completed during transfer of %u blocks\n",
mmc_hostname(test->card->host), count, t->blocks);
if (cmd_ret)
ret = cmd_ret;
out_free:
kfree(rq);
return ret;
}
static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
unsigned long sz, int use_sbc, int write,
int use_areq)
{
struct mmc_test_area *t = &test->area;
int ret;
if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
return RESULT_UNSUP_HOST;
ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
if (ret)
return ret;
ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
use_areq);
if (ret)
return ret;
return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
use_areq);
}
static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
int write, int use_areq)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz <= t->max_tfr; sz += 512) {
ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
use_areq);
if (ret)
return ret;
}
return 0;
}
/*
* Commands during read - no Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 0, 0, 0);
}
/*
* Commands during write - no Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 0, 1, 0);
}
/*
* Commands during read - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 0, 0);
}
/*
* Commands during write - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 1, 0);
}
/*
* Commands during non-blocking read - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 0, 1);
}
/*
* Commands during non-blocking write - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 1, 1);
}
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
.run = mmc_test_basic_write,
},
{
.name = "Basic read (no data verification)",
.run = mmc_test_basic_read,
},
{
.name = "Basic write (with data verification)",
.prepare = mmc_test_prepare_write,
.run = mmc_test_verify_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Basic read (with data verification)",
.prepare = mmc_test_prepare_read,
.run = mmc_test_verify_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_multi_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_multi_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Power of two block writes",
.prepare = mmc_test_prepare_write,
.run = mmc_test_pow2_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Power of two block reads",
.prepare = mmc_test_prepare_read,
.run = mmc_test_pow2_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Weird sized block writes",
.prepare = mmc_test_prepare_write,
.run = mmc_test_weird_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Weird sized block reads",
.prepare = mmc_test_prepare_read,
.run = mmc_test_weird_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_align_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_align_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned multi-block write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_align_multi_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned multi-block read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_align_multi_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Proper xfer_size at write (start failure)",
.run = mmc_test_xfersize_write,
},
{
.name = "Proper xfer_size at read (start failure)",
.run = mmc_test_xfersize_read,
},
{
.name = "Proper xfer_size at write (midway failure)",
.run = mmc_test_multi_xfersize_write,
},
{
.name = "Proper xfer_size at read (midway failure)",
.run = mmc_test_multi_xfersize_read,
},
#ifdef CONFIG_HIGHMEM
{
.name = "Highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_read_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_multi_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_multi_read_high,
.cleanup = mmc_test_cleanup,
},
#else
{
.name = "Highmem write",
.run = mmc_test_no_highmem,
},
{
.name = "Highmem read",
.run = mmc_test_no_highmem,
},
{
.name = "Multi-block highmem write",
.run = mmc_test_no_highmem,
},
{
.name = "Multi-block highmem read",
.run = mmc_test_no_highmem,
},
#endif /* CONFIG_HIGHMEM */
{
.name = "Best-case read performance",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_best_read_performance,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case write performance",
.prepare = mmc_test_area_prepare_erase,
.run = mmc_test_best_write_performance,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case read performance into scattered pages",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_best_read_perf_max_scatter,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case write performance from scattered pages",
.prepare = mmc_test_area_prepare_erase,
.run = mmc_test_best_write_perf_max_scatter,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single read performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single trim performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_trim_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive read performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_seq_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_seq_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive trim performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_seq_trim_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Random read performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_random_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Random write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_random_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Large sequential read into scattered pages",
.prepare = mmc_test_area_prepare,
.run = mmc_test_large_seq_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Large sequential write from scattered pages",
.prepare = mmc_test_area_prepare,
.run = mmc_test_large_seq_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance with blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_write_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance with non-blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_write_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance with blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_read_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance with non-blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_read_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_wr_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance non-blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_wr_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_r_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance non-blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_r_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Reset test",
.run = mmc_test_reset,
},
{
.name = "Commands during read - no Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during write - no Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during read - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read_cmd23,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during write - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write_cmd23,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Re-tuning reliability",
.prepare = mmc_test_area_prepare,
.run = mmc_test_retuning,
.cleanup = mmc_test_area_cleanup,
},
};
static DEFINE_MUTEX(mmc_test_lock);
static LIST_HEAD(mmc_test_result);
static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
pr_info("%s: Starting tests of card %s...\n",
mmc_hostname(test->card->host), mmc_card_id(test->card));
mmc_claim_host(test->card->host);
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
struct mmc_test_general_result *gr;
if (testcase && ((i + 1) != testcase))
continue;
pr_info("%s: Test case %d. %s...\n",
mmc_hostname(test->card->host), i + 1,
mmc_test_cases[i].name);
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
pr_info("%s: Result: Prepare stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
continue;
}
}
gr = kzalloc(sizeof(*gr), GFP_KERNEL);
if (gr) {
INIT_LIST_HEAD(&gr->tr_lst);
/* Assign data what we know already */
gr->card = test->card;
gr->testcase = i;
/* Append container to global one */
list_add_tail(&gr->link, &mmc_test_result);
/*
* Save the pointer to created container in our private
* structure.
*/
test->gr = gr;
}
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
pr_info("%s: Result: OK\n",
mmc_hostname(test->card->host));
break;
case RESULT_FAIL:
pr_info("%s: Result: FAILED\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
pr_info("%s: Result: UNSUPPORTED (by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
pr_info("%s: Result: UNSUPPORTED (by card)\n",
mmc_hostname(test->card->host));
break;
default:
pr_info("%s: Result: ERROR (%d)\n",
mmc_hostname(test->card->host), ret);
}
/* Save the result */
if (gr)
gr->result = ret;
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
}
}
}
mmc_release_host(test->card->host);
pr_info("%s: Tests completed.\n",
mmc_hostname(test->card->host));
}
static void mmc_test_free_result(struct mmc_card *card)
{
struct mmc_test_general_result *gr, *grs;
mutex_lock(&mmc_test_lock);
list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
struct mmc_test_transfer_result *tr, *trs;
if (card && gr->card != card)
continue;
list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
list_del(&tr->link);
kfree(tr);
}
list_del(&gr->link);
kfree(gr);
}
mutex_unlock(&mmc_test_lock);
}
static LIST_HEAD(mmc_test_file_test);
static int mtf_test_show(struct seq_file *sf, void *data)
{
struct mmc_card *card = sf->private;
struct mmc_test_general_result *gr;
mutex_lock(&mmc_test_lock);
list_for_each_entry(gr, &mmc_test_result, link) {
struct mmc_test_transfer_result *tr;
if (gr->card != card)
continue;
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
(u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
mutex_unlock(&mmc_test_lock);
return 0;
}
static int mtf_test_open(struct inode *inode, struct file *file)
{
return single_open(file, mtf_test_show, inode->i_private);
}
static ssize_t mtf_test_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct seq_file *sf = file->private_data;
struct mmc_card *card = sf->private;
struct mmc_test_card *test;
long testcase;
int ret;
ret = kstrtol_from_user(buf, count, 10, &testcase);
if (ret)
return ret;
test = kzalloc(sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
/*
* Remove all test cases associated with given card. Thus we have only
* actual data of the last run.
*/
mmc_test_free_result(card);
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
#ifdef CONFIG_HIGHMEM
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
if (!test->highmem) {
count = -ENOMEM;
goto free_test_buffer;
}
#endif
if (test->buffer) {
mutex_lock(&mmc_test_lock);
mmc_test_run(test, testcase);
mutex_unlock(&mmc_test_lock);
}
#ifdef CONFIG_HIGHMEM
__free_pages(test->highmem, BUFFER_ORDER);
free_test_buffer:
#endif
kfree(test->buffer);
kfree(test);
return count;
}
static const struct file_operations mmc_test_fops_test = {
.open = mtf_test_open,
.read = seq_read,
.write = mtf_test_write,
.llseek = seq_lseek,
.release = single_release,
};
static int mtf_testlist_show(struct seq_file *sf, void *data)
{
int i;
mutex_lock(&mmc_test_lock);
seq_puts(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
mutex_unlock(&mmc_test_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
static void mmc_test_free_dbgfs_file(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
mutex_lock(&mmc_test_lock);
list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
if (card && df->card != card)
continue;
debugfs_remove(df->file);
list_del(&df->link);
kfree(df);
}
mutex_unlock(&mmc_test_lock);
}
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
if (card->debugfs_root)
file = debugfs_create_file(name, mode, card->debugfs_root,
card, fops);
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
debugfs_remove(file);
return -ENOMEM;
}
df->card = card;
df->file = file;
list_add(&df->link, &mmc_test_file_test);
return 0;
}
static int mmc_test_register_dbgfs_file(struct mmc_card *card)
{
int ret;
mutex_lock(&mmc_test_lock);
ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
&mmc_test_fops_test);
if (ret)
goto err;
ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
&mtf_testlist_fops);
if (ret)
goto err;
err:
mutex_unlock(&mmc_test_lock);
return ret;
}
static int mmc_test_probe(struct mmc_card *card)
{
int ret;
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
if (mmc_card_ult_capacity(card)) {
pr_info("%s: mmc-test currently UNSUPPORTED for SDUC\n",
mmc_hostname(card->host));
return -EOPNOTSUPP;
}
ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
if (card->ext_csd.cmdq_en) {
mmc_claim_host(card->host);
ret = mmc_cmdq_disable(card);
mmc_release_host(card->host);
if (ret)
return ret;
}
dev_info(&card->dev, "Card claimed for testing.\n");
return 0;
}
static void mmc_test_remove(struct mmc_card *card)
{
if (card->reenable_cmdq) {
mmc_claim_host(card->host);
mmc_cmdq_enable(card);
mmc_release_host(card->host);
}
mmc_test_free_result(card);
mmc_test_free_dbgfs_file(card);
}
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmc_test",
},
.probe = mmc_test_probe,
.remove = mmc_test_remove,
};
static int __init mmc_test_init(void)
{
return mmc_register_driver(&mmc_driver);
}
static void __exit mmc_test_exit(void)
{
/* Clear stalled data if card is still plugged */
mmc_test_free_result(NULL);
mmc_test_free_dbgfs_file(NULL);
mmc_unregister_driver(&mmc_driver);
}
module_init(mmc_test_init);
module_exit(mmc_test_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
MODULE_AUTHOR("Pierre Ossman");
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bytedance */
#include "bench.h"
#include "bpf_hashmap_full_update_bench.skel.h"
#include "bpf_util.h"
/* BPF triggering benchmarks */
static struct ctx {
struct bpf_hashmap_full_update_bench *skel;
} ctx;
#define MAX_LOOP_NUM 10000
static void validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "benchmark doesn't support consumer!\n");
exit(1);
}
}
static void *producer(void *input)
{
while (true) {
/* trigger the bpf program */
syscall(__NR_getpgid);
}
return NULL;
}
static void measure(struct bench_res *res)
{
}
static void setup(void)
{
struct bpf_link *link;
int map_fd, i, max_entries;
setup_libbpf();
ctx.skel = bpf_hashmap_full_update_bench__open_and_load();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
ctx.skel->bss->nr_loops = MAX_LOOP_NUM;
link = bpf_program__attach(ctx.skel->progs.benchmark);
if (!link) {
fprintf(stderr, "failed to attach program!\n");
exit(1);
}
/* fill hash_map */
map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
max_entries = bpf_map__max_entries(ctx.skel->maps.hash_map_bench);
for (i = 0; i < max_entries; i++)
bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
}
static void hashmap_report_final(struct bench_res res[], int res_cnt)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
int i;
for (i = 0; i < nr_cpus; i++) {
u64 time = ctx.skel->bss->percpu_time[i];
if (!time)
continue;
printf("%d:hash_map_full_perf %lld events per sec\n",
i, ctx.skel->bss->nr_loops * 1000000000ll / time);
}
}
const struct bench bench_bpf_hashmap_full_update = {
.name = "bpf-hashmap-full-update",
.validate = validate,
.setup = setup,
.producer_thread = producer,
.measure = measure,
.report_progress = NULL,
.report_final = hashmap_report_final,
};
|
// SPDX-License-Identifier: GPL-2.0+
// fusionhdtv-mce.h - Keytable for fusionhdtv_mce Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* DViCO FUSION HDTV MCE remote */
static struct rc_map_table fusionhdtv_mce[] = {
{ 0x0b, KEY_NUMERIC_1 },
{ 0x17, KEY_NUMERIC_2 },
{ 0x1b, KEY_NUMERIC_3 },
{ 0x07, KEY_NUMERIC_4 },
{ 0x50, KEY_NUMERIC_5 },
{ 0x54, KEY_NUMERIC_6 },
{ 0x48, KEY_NUMERIC_7 },
{ 0x4c, KEY_NUMERIC_8 },
{ 0x58, KEY_NUMERIC_9 },
{ 0x03, KEY_NUMERIC_0 },
{ 0x5e, KEY_OK },
{ 0x51, KEY_UP },
{ 0x53, KEY_DOWN },
{ 0x5b, KEY_LEFT },
{ 0x5f, KEY_RIGHT },
{ 0x02, KEY_TV }, /* Labeled DTV on remote */
{ 0x0e, KEY_MP3 },
{ 0x1a, KEY_DVD },
{ 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
{ 0x16, KEY_SETUP },
{ 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
{ 0x0a, KEY_EPG }, /* Labeled Guide on remote */
{ 0x49, KEY_BACK },
{ 0x59, KEY_INFO }, /* Labeled MORE on remote */
{ 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
{ 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
{ 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
{ 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
{ 0x42, KEY_ENTER }, /* Labeled START with a green
MS windows logo on remote */
{ 0x15, KEY_VOLUMEUP },
{ 0x05, KEY_VOLUMEDOWN },
{ 0x11, KEY_CHANNELUP },
{ 0x09, KEY_CHANNELDOWN },
{ 0x52, KEY_CAMERA },
{ 0x5a, KEY_TUNER },
{ 0x19, KEY_OPEN },
{ 0x13, KEY_MODE }, /* 4:3 16:9 select */
{ 0x1f, KEY_ZOOM },
{ 0x43, KEY_REWIND },
{ 0x47, KEY_PLAYPAUSE },
{ 0x4f, KEY_FASTFORWARD },
{ 0x57, KEY_MUTE },
{ 0x0d, KEY_STOP },
{ 0x01, KEY_RECORD },
{ 0x4e, KEY_POWER },
};
static struct rc_map_list fusionhdtv_mce_map = {
.map = {
.scan = fusionhdtv_mce,
.size = ARRAY_SIZE(fusionhdtv_mce),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_FUSIONHDTV_MCE,
}
};
static int __init init_rc_map_fusionhdtv_mce(void)
{
return rc_map_register(&fusionhdtv_mce_map);
}
static void __exit exit_rc_map_fusionhdtv_mce(void)
{
rc_map_unregister(&fusionhdtv_mce_map);
}
module_init(init_rc_map_fusionhdtv_mce)
module_exit(exit_rc_map_fusionhdtv_mce)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_DESCRIPTION("DViCO FUSION HDTV MCE remote controller keytable");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*/
#ifndef __IA_CSS_ANR_PARAM_H
#define __IA_CSS_ANR_PARAM_H
#include "type_support.h"
/* ANR (Advanced Noise Reduction) */
struct sh_css_isp_anr_params {
s32 threshold;
};
#endif /* __IA_CSS_ANR_PARAM_H */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/******************************************************************************
* QLOGIC LINUX SOFTWARE
*
* QLogic ISP1280 (Ultra2) /12160 (Ultra3) SCSI driver
* Copyright (C) 2000 Qlogic Corporation
* (www.qlogic.com)
*
******************************************************************************/
#ifndef _QLA1280_H
#define _QLA1280_H
/*
* Data bit definitions.
*/
#define BIT_0 0x1
#define BIT_1 0x2
#define BIT_2 0x4
#define BIT_3 0x8
#define BIT_4 0x10
#define BIT_5 0x20
#define BIT_6 0x40
#define BIT_7 0x80
#define BIT_8 0x100
#define BIT_9 0x200
#define BIT_10 0x400
#define BIT_11 0x800
#define BIT_12 0x1000
#define BIT_13 0x2000
#define BIT_14 0x4000
#define BIT_15 0x8000
#define BIT_16 0x10000
#define BIT_17 0x20000
#define BIT_18 0x40000
#define BIT_19 0x80000
#define BIT_20 0x100000
#define BIT_21 0x200000
#define BIT_22 0x400000
#define BIT_23 0x800000
#define BIT_24 0x1000000
#define BIT_25 0x2000000
#define BIT_26 0x4000000
#define BIT_27 0x8000000
#define BIT_28 0x10000000
#define BIT_29 0x20000000
#define BIT_30 0x40000000
#define BIT_31 0x80000000
#if MEMORY_MAPPED_IO
#define RD_REG_WORD(addr) readw_relaxed(addr)
#define RD_REG_WORD_dmasync(addr) readw(addr)
#define WRT_REG_WORD(addr, data) writew(data, addr)
#else /* MEMORY_MAPPED_IO */
#define RD_REG_WORD(addr) inw((unsigned long)addr)
#define RD_REG_WORD_dmasync(addr) RD_REG_WORD(addr)
#define WRT_REG_WORD(addr, data) outw(data, (unsigned long)addr)
#endif /* MEMORY_MAPPED_IO */
/*
* Host adapter default definitions.
*/
#define MAX_BUSES 2 /* 2 */
#define MAX_B_BITS 1
#define MAX_TARGETS 16 /* 16 */
#define MAX_T_BITS 4 /* 4 */
#define MAX_LUNS 8 /* 32 */
#define MAX_L_BITS 3 /* 5 */
/*
* Watchdog time quantum
*/
#define QLA1280_WDG_TIME_QUANTUM 5 /* In seconds */
/* Command retry count (0-65535) */
#define COMMAND_RETRY_COUNT 255
/* Maximum outstanding commands in ISP queues */
#define MAX_OUTSTANDING_COMMANDS 512
#define COMPLETED_HANDLE ((unsigned char *) \
(MAX_OUTSTANDING_COMMANDS + 2))
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */
/*
* SCSI Request Block structure (sp) that occurs after each struct scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8/16) LU queue */
struct scsi_cmnd *cmd; /* (4/8) SCSI command block */
/* NOTE: the sp->cmd will be NULL when this completion is
* called, so you should know the scsi_cmnd when using this */
struct completion *wait;
dma_addr_t saved_dma_handle; /* for unmap of single transfers */
uint8_t flags; /* (1) Status flags. */
uint8_t dir; /* direction of transfer */
};
/*
* SRB flag definitions
*/
#define SRB_TIMEOUT (1 << 0) /* Command timed out */
#define SRB_SENT (1 << 1) /* Command sent to ISP */
#define SRB_ABORT_PENDING (1 << 2) /* Command abort sent to device */
#define SRB_ABORTED (1 << 3) /* Command aborted command already */
/*
* ISP I/O Register Set structure definitions.
*/
struct device_reg {
uint16_t id_l; /* ID low */
uint16_t id_h; /* ID high */
uint16_t cfg_0; /* Configuration 0 */
#define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */
#define ISP_CFG0_1020 1 /* ISP1020 */
#define ISP_CFG0_1020A 2 /* ISP1020A */
#define ISP_CFG0_1040 3 /* ISP1040 */
#define ISP_CFG0_1040A 4 /* ISP1040A */
#define ISP_CFG0_1040B 5 /* ISP1040B */
#define ISP_CFG0_1040C 6 /* ISP1040C */
uint16_t cfg_1; /* Configuration 1 */
#define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */
#define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */
#define ISP_CFG1_F32 BIT_5 /* 128-byte FIFO threshold */
#define ISP_CFG1_F16 BIT_4 /* 128-byte FIFO threshold */
#define ISP_CFG1_BENAB BIT_2 /* Global Bus burst enable */
#define ISP_CFG1_SXP BIT_0 /* SXP register select */
uint16_t ictrl; /* Interface control */
#define ISP_RESET BIT_0 /* ISP soft reset */
#define ISP_EN_INT BIT_1 /* ISP enable interrupts. */
#define ISP_EN_RISC BIT_2 /* ISP enable RISC interrupts. */
#define ISP_FLASH_ENABLE BIT_8 /* Flash BIOS Read/Write enable */
#define ISP_FLASH_UPPER BIT_9 /* Flash upper bank select */
uint16_t istatus; /* Interface status */
#define PCI_64BIT_SLOT BIT_14 /* PCI 64-bit slot indicator. */
#define RISC_INT BIT_2 /* RISC interrupt */
#define PCI_INT BIT_1 /* PCI interrupt */
uint16_t semaphore; /* Semaphore */
uint16_t nvram; /* NVRAM register. */
#define NV_DESELECT 0
#define NV_CLOCK BIT_0
#define NV_SELECT BIT_1
#define NV_DATA_OUT BIT_2
#define NV_DATA_IN BIT_3
uint16_t flash_data; /* Flash BIOS data */
uint16_t flash_address; /* Flash BIOS address */
uint16_t unused_1[0x06];
/* cdma_* and ddma_* are 1040 only */
uint16_t cdma_cfg;
#define CDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */
#define CDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */
#define CDMA_CONF_BENAB BIT_1 /* Bus burst enable */
#define CDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */
uint16_t cdma_ctrl;
uint16_t cdma_status;
uint16_t cdma_fifo_status;
uint16_t cdma_count;
uint16_t cdma_reserved;
uint16_t cdma_address_count_0;
uint16_t cdma_address_count_1;
uint16_t cdma_address_count_2;
uint16_t cdma_address_count_3;
uint16_t unused_2[0x06];
uint16_t ddma_cfg;
#define DDMA_CONF_SENAB BIT_3 /* SXP to DMA Data enable */
#define DDMA_CONF_RIRQ BIT_2 /* RISC interrupt enable */
#define DDMA_CONF_BENAB BIT_1 /* Bus burst enable */
#define DDMA_CONF_DIR BIT_0 /* DMA direction (0=fifo->host 1=host->fifo) */
uint16_t ddma_ctrl;
uint16_t ddma_status;
uint16_t ddma_fifo_status;
uint16_t ddma_xfer_count_low;
uint16_t ddma_xfer_count_high;
uint16_t ddma_addr_count_0;
uint16_t ddma_addr_count_1;
uint16_t ddma_addr_count_2;
uint16_t ddma_addr_count_3;
uint16_t unused_3[0x0e];
uint16_t mailbox0; /* Mailbox 0 */
uint16_t mailbox1; /* Mailbox 1 */
uint16_t mailbox2; /* Mailbox 2 */
uint16_t mailbox3; /* Mailbox 3 */
uint16_t mailbox4; /* Mailbox 4 */
uint16_t mailbox5; /* Mailbox 5 */
uint16_t mailbox6; /* Mailbox 6 */
uint16_t mailbox7; /* Mailbox 7 */
uint16_t unused_4[0x20];/* 0x80-0xbf Gap */
uint16_t host_cmd; /* Host command and control */
#define HOST_INT BIT_7 /* host interrupt bit */
#define BIOS_ENABLE BIT_0
uint16_t unused_5[0x5]; /* 0xc2-0xcb Gap */
uint16_t gpio_data;
uint16_t gpio_enable;
uint16_t unused_6[0x11]; /* d0-f0 */
uint16_t scsiControlPins; /* f2 */
};
#define MAILBOX_REGISTER_COUNT 8
/*
* ISP product identification definitions in mailboxes after reset.
*/
#define PROD_ID_1 0x4953
#define PROD_ID_2 0x0000
#define PROD_ID_2a 0x5020
#define PROD_ID_3 0x2020
#define PROD_ID_4 0x1
/*
* ISP host command and control register command definitions
*/
#define HC_RESET_RISC 0x1000 /* Reset RISC */
#define HC_PAUSE_RISC 0x2000 /* Pause RISC */
#define HC_RELEASE_RISC 0x3000 /* Release RISC from reset. */
#define HC_SET_HOST_INT 0x5000 /* Set host interrupt */
#define HC_CLR_HOST_INT 0x6000 /* Clear HOST interrupt */
#define HC_CLR_RISC_INT 0x7000 /* Clear RISC interrupt */
#define HC_DISABLE_BIOS 0x9000 /* Disable BIOS. */
/*
* ISP mailbox Self-Test status codes
*/
#define MBS_FRM_ALIVE 0 /* Firmware Alive. */
#define MBS_CHKSUM_ERR 1 /* Checksum Error. */
#define MBS_SHADOW_LD_ERR 2 /* Shadow Load Error. */
#define MBS_BUSY 4 /* Busy. */
/*
* ISP mailbox command complete status codes
*/
#define MBS_CMD_CMP 0x4000 /* Command Complete. */
#define MBS_INV_CMD 0x4001 /* Invalid Command. */
#define MBS_HOST_INF_ERR 0x4002 /* Host Interface Error. */
#define MBS_TEST_FAILED 0x4003 /* Test Failed. */
#define MBS_CMD_ERR 0x4005 /* Command Error. */
#define MBS_CMD_PARAM_ERR 0x4006 /* Command Parameter Error. */
/*
* ISP mailbox asynchronous event status codes
*/
#define MBA_ASYNC_EVENT 0x8000 /* Asynchronous event. */
#define MBA_BUS_RESET 0x8001 /* SCSI Bus Reset. */
#define MBA_SYSTEM_ERR 0x8002 /* System Error. */
#define MBA_REQ_TRANSFER_ERR 0x8003 /* Request Transfer Error. */
#define MBA_RSP_TRANSFER_ERR 0x8004 /* Response Transfer Error. */
#define MBA_WAKEUP_THRES 0x8005 /* Request Queue Wake-up. */
#define MBA_TIMEOUT_RESET 0x8006 /* Execution Timeout Reset. */
#define MBA_DEVICE_RESET 0x8007 /* Bus Device Reset. */
#define MBA_BUS_MODE_CHANGE 0x800E /* SCSI bus mode transition. */
#define MBA_SCSI_COMPLETION 0x8020 /* Completion response. */
/*
* ISP mailbox commands
*/
#define MBC_NOP 0 /* No Operation */
#define MBC_LOAD_RAM 1 /* Load RAM */
#define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware */
#define MBC_DUMP_RAM 3 /* Dump RAM contents */
#define MBC_WRITE_RAM_WORD 4 /* Write ram word */
#define MBC_READ_RAM_WORD 5 /* Read ram word */
#define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */
#define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */
#define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */
#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */
#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */
#define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */
#define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */
#define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */
#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command */
#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN) */
#define MBC_ABORT_TARGET 0x17 /* Abort target (ID) */
#define MBC_BUS_RESET 0x18 /* SCSI bus reset */
#define MBC_GET_RETRY_COUNT 0x22 /* Get retry count and delay */
#define MBC_GET_TARGET_PARAMETERS 0x28 /* Get target parameters */
#define MBC_SET_INITIATOR_ID 0x30 /* Set initiator SCSI ID */
#define MBC_SET_SELECTION_TIMEOUT 0x31 /* Set selection timeout */
#define MBC_SET_RETRY_COUNT 0x32 /* Set retry count and delay */
#define MBC_SET_TAG_AGE_LIMIT 0x33 /* Set tag age limit */
#define MBC_SET_CLOCK_RATE 0x34 /* Set clock rate */
#define MBC_SET_ACTIVE_NEGATION 0x35 /* Set active negation state */
#define MBC_SET_ASYNC_DATA_SETUP 0x36 /* Set async data setup time */
#define MBC_SET_PCI_CONTROL 0x37 /* Set BUS control parameters */
#define MBC_SET_TARGET_PARAMETERS 0x38 /* Set target parameters */
#define MBC_SET_DEVICE_QUEUE 0x39 /* Set device queue parameters */
#define MBC_SET_RESET_DELAY_PARAMETERS 0x3A /* Set reset delay parameters */
#define MBC_SET_SYSTEM_PARAMETER 0x45 /* Set system parameter word */
#define MBC_SET_FIRMWARE_FEATURES 0x4A /* Set firmware feature word */
#define MBC_INIT_REQUEST_QUEUE_A64 0x52 /* Initialize request queue A64 */
#define MBC_INIT_RESPONSE_QUEUE_A64 0x53 /* Initialize response q A64 */
#define MBC_ENABLE_TARGET_MODE 0x55 /* Enable target mode */
#define MBC_SET_DATA_OVERRUN_RECOVERY 0x5A /* Set data overrun recovery mode */
/*
* ISP Get/Set Target Parameters mailbox command control flags.
*/
#define TP_PPR BIT_5 /* PPR */
#define TP_RENEGOTIATE BIT_8 /* Renegotiate on error. */
#define TP_STOP_QUEUE BIT_9 /* Stop que on check condition */
#define TP_AUTO_REQUEST_SENSE BIT_10 /* Automatic request sense. */
#define TP_TAGGED_QUEUE BIT_11 /* Tagged queuing. */
#define TP_SYNC BIT_12 /* Synchronous data transfers. */
#define TP_WIDE BIT_13 /* Wide data transfers. */
#define TP_PARITY BIT_14 /* Parity checking. */
#define TP_DISCONNECT BIT_15 /* Disconnect privilege. */
/*
* NVRAM Command values.
*/
#define NV_START_BIT BIT_2
#define NV_WRITE_OP (BIT_26 | BIT_24)
#define NV_READ_OP (BIT_26 | BIT_25)
#define NV_ERASE_OP (BIT_26 | BIT_25 | BIT_24)
#define NV_MASK_OP (BIT_26 | BIT_25 | BIT_24)
#define NV_DELAY_COUNT 10
/*
* QLogic ISP1280/ISP12160 NVRAM structure definition.
*/
struct nvram {
uint8_t id0; /* 0 */
uint8_t id1; /* 1 */
uint8_t id2; /* 2 */
uint8_t id3; /* 3 */
uint8_t version; /* 4 */
struct {
uint8_t bios_configuration_mode:2;
uint8_t bios_disable:1;
uint8_t selectable_scsi_boot_enable:1;
uint8_t cd_rom_boot_enable:1;
uint8_t disable_loading_risc_code:1;
uint8_t enable_64bit_addressing:1;
uint8_t unused_7:1;
} cntr_flags_1; /* 5 */
struct {
uint8_t boot_lun_number:5;
uint8_t scsi_bus_number:1;
uint8_t unused_6:1;
uint8_t unused_7:1;
} cntr_flags_2l; /* 7 */
struct {
uint8_t boot_target_number:4;
uint8_t unused_12:1;
uint8_t unused_13:1;
uint8_t unused_14:1;
uint8_t unused_15:1;
} cntr_flags_2h; /* 8 */
uint16_t unused_8; /* 8, 9 */
uint16_t unused_10; /* 10, 11 */
uint16_t unused_12; /* 12, 13 */
uint16_t unused_14; /* 14, 15 */
struct {
uint8_t reserved:2;
uint8_t burst_enable:1;
uint8_t reserved_1:1;
uint8_t fifo_threshold:4;
} isp_config; /* 16 */
/* Termination
* 0 = Disable, 1 = high only, 3 = Auto term
*/
struct {
uint8_t scsi_bus_1_control:2;
uint8_t scsi_bus_0_control:2;
uint8_t unused_0:1;
uint8_t unused_1:1;
uint8_t unused_2:1;
uint8_t auto_term_support:1;
} termination; /* 17 */
uint16_t isp_parameter; /* 18, 19 */
union {
uint16_t w;
struct {
uint16_t enable_fast_posting:1;
uint16_t report_lvd_bus_transition:1;
uint16_t unused_2:1;
uint16_t unused_3:1;
uint16_t disable_iosbs_with_bus_reset_status:1;
uint16_t disable_synchronous_backoff:1;
uint16_t unused_6:1;
uint16_t synchronous_backoff_reporting:1;
uint16_t disable_reselection_fairness:1;
uint16_t unused_9:1;
uint16_t unused_10:1;
uint16_t unused_11:1;
uint16_t unused_12:1;
uint16_t unused_13:1;
uint16_t unused_14:1;
uint16_t unused_15:1;
} f;
} firmware_feature; /* 20, 21 */
uint16_t unused_22; /* 22, 23 */
struct {
struct {
uint8_t initiator_id:4;
uint8_t scsi_reset_disable:1;
uint8_t scsi_bus_size:1;
uint8_t scsi_bus_type:1;
uint8_t unused_7:1;
} config_1; /* 24 */
uint8_t bus_reset_delay; /* 25 */
uint8_t retry_count; /* 26 */
uint8_t retry_delay; /* 27 */
struct {
uint8_t async_data_setup_time:4;
uint8_t req_ack_active_negation:1;
uint8_t data_line_active_negation:1;
uint8_t unused_6:1;
uint8_t unused_7:1;
} config_2; /* 28 */
uint8_t unused_29; /* 29 */
uint16_t selection_timeout; /* 30, 31 */
uint16_t max_queue_depth; /* 32, 33 */
uint16_t unused_34; /* 34, 35 */
uint16_t unused_36; /* 36, 37 */
uint16_t unused_38; /* 38, 39 */
struct {
struct {
uint8_t renegotiate_on_error:1;
uint8_t stop_queue_on_check:1;
uint8_t auto_request_sense:1;
uint8_t tag_queuing:1;
uint8_t enable_sync:1;
uint8_t enable_wide:1;
uint8_t parity_checking:1;
uint8_t disconnect_allowed:1;
} parameter; /* 40 */
uint8_t execution_throttle; /* 41 */
uint8_t sync_period; /* 42 */
union { /* 43 */
uint8_t flags_43;
struct {
uint8_t sync_offset:4;
uint8_t device_enable:1;
uint8_t lun_disable:1;
uint8_t unused_6:1;
uint8_t unused_7:1;
} flags1x80;
struct {
uint8_t sync_offset:5;
uint8_t device_enable:1;
uint8_t unused_6:1;
uint8_t unused_7:1;
} flags1x160;
} flags;
union { /* PPR flags for the 1x160 controllers */
uint8_t unused_44;
struct {
uint8_t ppr_options:4;
uint8_t ppr_bus_width:2;
uint8_t unused_8:1;
uint8_t enable_ppr:1;
} flags; /* 44 */
} ppr_1x160;
uint8_t unused_45; /* 45 */
} target[MAX_TARGETS];
} bus[MAX_BUSES];
uint16_t unused_248; /* 248, 249 */
uint16_t subsystem_id[2]; /* 250, 251, 252, 253 */
union { /* 254 */
uint8_t unused_254;
uint8_t system_id_pointer;
} sysid_1x160;
uint8_t chksum; /* 255 */
};
/*
* ISP queue - command entry structure definition.
*/
#define MAX_CMDSZ 12 /* SCSI maximum CDB size. */
struct cmd_entry {
uint8_t entry_type; /* Entry type. */
#define COMMAND_TYPE 1 /* Command entry */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */
uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */
__le16 cdb_len; /* SCSI command length. */
__le16 control_flags; /* Control flags. */
__le16 reserved;
__le16 timeout; /* Command timeout. */
__le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
__le32 dseg_0_address; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address; /* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
__le32 dseg_2_address; /* Data segment 2 address. */
__le32 dseg_2_length; /* Data segment 2 length. */
__le32 dseg_3_address; /* Data segment 3 address. */
__le32 dseg_3_length; /* Data segment 3 length. */
};
/*
* ISP queue - continuation entry structure definition.
*/
struct cont_entry {
uint8_t entry_type; /* Entry type. */
#define CONTINUE_TYPE 2 /* Continuation entry. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
__le32 reserved; /* Reserved */
__le32 dseg_0_address; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address; /* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
__le32 dseg_2_address; /* Data segment 2 address. */
__le32 dseg_2_length; /* Data segment 2 length. */
__le32 dseg_3_address; /* Data segment 3 address. */
__le32 dseg_3_length; /* Data segment 3 length. */
__le32 dseg_4_address; /* Data segment 4 address. */
__le32 dseg_4_length; /* Data segment 4 length. */
__le32 dseg_5_address; /* Data segment 5 address. */
__le32 dseg_5_length; /* Data segment 5 length. */
__le32 dseg_6_address; /* Data segment 6 address. */
__le32 dseg_6_length; /* Data segment 6 length. */
};
/*
* ISP queue - status entry structure definition.
*/
struct response {
uint8_t entry_type; /* Entry type. */
#define STATUS_TYPE 3 /* Status entry. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
#define RF_CONT BIT_0 /* Continuation. */
#define RF_FULL BIT_1 /* Full */
#define RF_BAD_HEADER BIT_2 /* Bad header. */
#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */
__le32 handle; /* System handle. */
__le16 scsi_status; /* SCSI status. */
__le16 comp_status; /* Completion status. */
__le16 state_flags; /* State flags. */
#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */
#define SF_GOT_SENSE BIT_13 /* Got Sense */
#define SF_GOT_STATUS BIT_12 /* Got Status */
#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */
#define SF_SENT_CDB BIT_10 /* Send CDB */
#define SF_GOT_TARGET BIT_9 /* */
#define SF_GOT_BUS BIT_8 /* */
__le16 status_flags; /* Status flags. */
__le16 time; /* Time. */
__le16 req_sense_length;/* Request sense data length. */
__le32 residual_length; /* Residual transfer length. */
__le16 reserved[4];
uint8_t req_sense_data[32]; /* Request sense data. */
};
/*
* ISP queue - marker entry structure definition.
*/
struct mrk_entry {
uint8_t entry_type; /* Entry type. */
#define MARKER_TYPE 4 /* Marker entry. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
__le32 reserved;
uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */
uint8_t modifier; /* Modifier (7-0). */
#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
#define MK_SYNC_ID 1 /* Synchronize ID */
#define MK_SYNC_ALL 2 /* Synchronize all ID/LUN */
uint8_t reserved_1[53];
};
/*
* ISP queue - extended command entry structure definition.
*
* Unused by the driver!
*/
struct ecmd_entry {
uint8_t entry_type; /* Entry type. */
#define EXTENDED_CMD_TYPE 5 /* Extended command entry. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */
__le16 cdb_len; /* SCSI command length. */
__le16 control_flags; /* Control flags. */
__le16 reserved;
__le16 timeout; /* Command timeout. */
__le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[88]; /* SCSI command words. */
};
/*
* ISP queue - 64-Bit addressing, command entry structure definition.
*/
typedef struct {
uint8_t entry_type; /* Entry type. */
#define COMMAND_A64_TYPE 9 /* Command A64 entry */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
__le32 handle; /* System handle. */
uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */
__le16 cdb_len; /* SCSI command length. */
__le16 control_flags; /* Control flags. */
__le16 reserved;
__le16 timeout; /* Command timeout. */
__le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
__le32 reserved_1[2]; /* unused */
__le32 dseg_0_address[2]; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address[2]; /* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
} cmd_a64_entry_t, request_t;
/*
* ISP queue - 64-Bit addressing, continuation entry structure definition.
*/
struct cont_a64_entry {
uint8_t entry_type; /* Entry type. */
#define CONTINUE_A64_TYPE 0xA /* Continuation A64 entry. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
__le32 dseg_0_address[2]; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address[2]; /* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
__le32 dseg_2_address[2]; /* Data segment 2 address. */
__le32 dseg_2_length; /* Data segment 2 length. */
__le32 dseg_3_address[2]; /* Data segment 3 address. */
__le32 dseg_3_length; /* Data segment 3 length. */
__le32 dseg_4_address[2]; /* Data segment 4 address. */
__le32 dseg_4_length; /* Data segment 4 length. */
};
/*
* ISP queue - enable LUN entry structure definition.
*/
struct elun_entry {
uint8_t entry_type; /* Entry type. */
#define ENABLE_LUN_TYPE 0xB /* Enable LUN entry. */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status not used. */
__le32 reserved_2;
__le16 lun; /* Bit 15 is bus number. */
__le16 reserved_4;
__le32 option_flags;
uint8_t status;
uint8_t reserved_5;
uint8_t command_count; /* Number of ATIOs allocated. */
uint8_t immed_notify_count; /* Number of Immediate Notify */
/* entries allocated. */
uint8_t group_6_length; /* SCSI CDB length for group 6 */
/* commands (2-26). */
uint8_t group_7_length; /* SCSI CDB length for group 7 */
/* commands (2-26). */
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 reserved_6[20];
};
/*
* ISP queue - modify LUN entry structure definition.
*
* Unused by the driver!
*/
struct modify_lun_entry {
uint8_t entry_type; /* Entry type. */
#define MODIFY_LUN_TYPE 0xC /* Modify LUN entry. */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun; /* SCSI LUN */
uint8_t reserved_3;
uint8_t operators;
uint8_t reserved_4;
__le32 option_flags;
uint8_t status;
uint8_t reserved_5;
uint8_t command_count; /* Number of ATIOs allocated. */
uint8_t immed_notify_count; /* Number of Immediate Notify */
/* entries allocated. */
__le16 reserved_6;
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 reserved_7[20];
};
/*
* ISP queue - immediate notify entry structure definition.
*/
struct notify_entry {
uint8_t entry_type; /* Entry type. */
#define IMMED_NOTIFY_TYPE 0xD /* Immediate notify entry. */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun;
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t reserved_4;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
/* entries allocated. */
__le16 seq_id;
uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */
__le16 reserved_5[8];
uint8_t sense_data[18];
};
/*
* ISP queue - notify acknowledge entry structure definition.
*/
struct nack_entry {
uint8_t entry_type; /* Entry type. */
#define NOTIFY_ACK_TYPE 0xE /* Notify acknowledge entry. */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun;
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t event;
__le16 seq_id;
__le16 reserved_4[22];
};
/*
* ISP queue - Accept Target I/O (ATIO) entry structure definition.
*/
struct atio_entry {
uint8_t entry_type; /* Entry type. */
#define ACCEPT_TGT_IO_TYPE 6 /* Accept target I/O entry. */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun;
uint8_t initiator_id;
uint8_t cdb_len;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
uint8_t cdb[26];
uint8_t sense_data[18];
};
/*
* ISP queue - Continue Target I/O (CTIO) entry structure definition.
*/
struct ctio_entry {
uint8_t entry_type; /* Entry type. */
#define CONTINUE_TGT_IO_TYPE 7 /* CTIO entry */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun; /* SCSI LUN */
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
__le32 transfer_length;
__le32 residual;
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 dseg_count; /* Data segment count. */
__le32 dseg_0_address; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address; /* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
__le32 dseg_2_address; /* Data segment 2 address. */
__le32 dseg_2_length; /* Data segment 2 length. */
__le32 dseg_3_address; /* Data segment 3 address. */
__le32 dseg_3_length; /* Data segment 3 length. */
};
/*
* ISP queue - CTIO returned entry structure definition.
*/
struct ctio_ret_entry {
uint8_t entry_type; /* Entry type. */
#define CTIO_RET_TYPE 7 /* CTIO return entry */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun; /* SCSI LUN */
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
__le32 transfer_length;
__le32 residual;
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 dseg_count; /* Data segment count. */
__le32 dseg_0_address; /* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address; /* Data segment 1 address. */
__le16 dseg_1_length; /* Data segment 1 length. */
uint8_t sense_data[18];
};
/*
* ISP queue - CTIO A64 entry structure definition.
*/
struct ctio_a64_entry {
uint8_t entry_type; /* Entry type. */
#define CTIO_A64_TYPE 0xF /* CTIO A64 entry */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun; /* SCSI LUN */
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
__le32 transfer_length;
__le32 residual;
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 dseg_count; /* Data segment count. */
__le32 reserved_4[2];
__le32 dseg_0_address[2];/* Data segment 0 address. */
__le32 dseg_0_length; /* Data segment 0 length. */
__le32 dseg_1_address[2];/* Data segment 1 address. */
__le32 dseg_1_length; /* Data segment 1 length. */
};
/*
* ISP queue - CTIO returned entry structure definition.
*/
struct ctio_a64_ret_entry {
uint8_t entry_type; /* Entry type. */
#define CTIO_A64_RET_TYPE 0xF /* CTIO A64 returned entry */
uint8_t entry_count; /* Entry count. */
uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */
__le32 reserved_2;
uint8_t lun; /* SCSI LUN */
uint8_t initiator_id;
uint8_t reserved_3;
uint8_t target_id;
__le32 option_flags;
uint8_t status;
uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */
__le32 transfer_length;
__le32 residual;
__le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
__le16 dseg_count; /* Data segment count. */
__le16 reserved_4[7];
uint8_t sense_data[18];
};
/*
* ISP request and response queue entry sizes
*/
#define RESPONSE_ENTRY_SIZE (sizeof(struct response))
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
/*
* ISP status entry - completion status definitions.
*/
#define CS_COMPLETE 0x0 /* No errors */
#define CS_INCOMPLETE 0x1 /* Incomplete transfer of cmd. */
#define CS_DMA 0x2 /* A DMA direction error. */
#define CS_TRANSPORT 0x3 /* Transport error. */
#define CS_RESET 0x4 /* SCSI bus reset occurred */
#define CS_ABORTED 0x5 /* System aborted command. */
#define CS_TIMEOUT 0x6 /* Timeout error. */
#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
#define CS_COMMAND_OVERRUN 0x8 /* Command Overrun. */
#define CS_STATUS_OVERRUN 0x9 /* Status Overrun. */
#define CS_BAD_MSG 0xA /* Bad msg after status phase. */
#define CS_NO_MSG_OUT 0xB /* No msg out after selection. */
#define CS_EXTENDED_ID 0xC /* Extended ID failed. */
#define CS_IDE_MSG 0xD /* Target rejected IDE msg. */
#define CS_ABORT_MSG 0xE /* Target rejected abort msg. */
#define CS_REJECT_MSG 0xF /* Target rejected reject msg. */
#define CS_NOP_MSG 0x10 /* Target rejected NOP msg. */
#define CS_PARITY_MSG 0x11 /* Target rejected parity msg. */
#define CS_DEV_RESET_MSG 0x12 /* Target rejected dev rst msg. */
#define CS_ID_MSG 0x13 /* Target rejected ID msg. */
#define CS_FREE 0x14 /* Unexpected bus free. */
#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
#define CS_TRANACTION_1 0x18 /* Transaction error 1 */
#define CS_TRANACTION_2 0x19 /* Transaction error 2 */
#define CS_TRANACTION_3 0x1a /* Transaction error 3 */
#define CS_INV_ENTRY_TYPE 0x1b /* Invalid entry type */
#define CS_DEV_QUEUE_FULL 0x1c /* Device queue full */
#define CS_PHASED_SKIPPED 0x1d /* SCSI phase skipped */
#define CS_ARS_FAILED 0x1e /* ARS failed */
#define CS_LVD_BUS_ERROR 0x21 /* LVD bus error */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
/*
* ISP target entries - Option flags bit definitions.
*/
#define OF_ENABLE_TAG BIT_1 /* Tagged queue action enable */
#define OF_DATA_IN BIT_6 /* Data in to initiator */
/* (data from target to initiator) */
#define OF_DATA_OUT BIT_7 /* Data out from initiator */
/* (data from initiator to target) */
#define OF_NO_DATA (BIT_7 | BIT_6)
#define OF_DISC_DISABLED BIT_15 /* Disconnects disabled */
#define OF_DISABLE_SDP BIT_24 /* Disable sending save data ptr */
#define OF_SEND_RDP BIT_26 /* Send restore data pointers msg */
#define OF_FORCE_DISC BIT_30 /* Disconnects mandatory */
#define OF_SSTS BIT_31 /* Send SCSI status */
/*
* BUS parameters/settings structure - UNUSED
*/
struct bus_param {
uint8_t id; /* Host adapter SCSI id */
uint8_t bus_reset_delay; /* SCSI bus reset delay. */
uint8_t failed_reset_count; /* number of time reset failed */
uint8_t unused;
uint16_t device_enables; /* Device enable bits. */
uint16_t lun_disables; /* LUN disable bits. */
uint16_t qtag_enables; /* Tag queue enables. */
uint16_t hiwat; /* High water mark per device. */
uint8_t reset_marker:1;
uint8_t disable_scsi_reset:1;
uint8_t scsi_bus_dead:1; /* SCSI Bus is Dead, when 5 back to back resets failed */
};
struct qla_driver_setup {
uint32_t no_sync:1;
uint32_t no_wide:1;
uint32_t no_ppr:1;
uint32_t no_nvram:1;
uint16_t sync_mask;
uint16_t wide_mask;
uint16_t ppr_mask;
};
/*
* Linux Host Adapter structure
*/
struct scsi_qla_host {
/* Linux adapter configuration data */
struct Scsi_Host *host; /* pointer to host data */
struct scsi_qla_host *next;
struct device_reg __iomem *iobase; /* Base Memory-mapped I/O address */
unsigned char __iomem *mmpbase; /* memory mapped address */
unsigned long host_no;
struct pci_dev *pdev;
uint8_t devnum;
uint8_t revision;
uint8_t ports;
unsigned long actthreads;
unsigned long isr_count; /* Interrupt count */
unsigned long spurious_int;
/* Outstandings ISP commands. */
struct srb *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
/* BUS configuration data */
struct bus_param bus_settings[MAX_BUSES];
/* Received ISP mailbox data. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
dma_addr_t request_dma; /* Physical Address */
request_t *request_ring; /* Base virtual address */
request_t *request_ring_ptr; /* Current address. */
uint16_t req_ring_index; /* Current index. */
uint16_t req_q_cnt; /* Number of available entries. */
dma_addr_t response_dma; /* Physical address. */
struct response *response_ring; /* Base virtual address */
struct response *response_ring_ptr; /* Current address. */
uint16_t rsp_ring_index; /* Current index. */
struct list_head done_q; /* Done queue */
struct completion *mailbox_wait;
struct timer_list mailbox_timer;
volatile struct {
uint32_t online:1; /* 0 */
uint32_t reset_marker:1; /* 1 */
uint32_t disable_host_adapter:1; /* 2 */
uint32_t reset_active:1; /* 3 */
uint32_t abort_isp_active:1; /* 4 */
uint32_t disable_risc_code_load:1; /* 5 */
} flags;
struct nvram nvram;
int nvram_valid;
/* Firmware Info */
unsigned short fwstart; /* start address for F/W */
unsigned char fwver1; /* F/W version first char */
unsigned char fwver2; /* F/W version second char */
unsigned char fwver3; /* F/W version third char */
};
#endif /* _QLA1280_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USBF USB Function driver
*
* Copyright 2022 Schneider Electric
* Author: Herve Codina <[email protected]>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
#include <linux/usb/role.h>
#define USBF_NUM_ENDPOINTS 16
#define USBF_EP0_MAX_PCKT_SIZE 64
/* EPC registers */
#define USBF_REG_USB_CONTROL 0x000
#define USBF_USB_PUE2 BIT(2)
#define USBF_USB_CONNECTB BIT(3)
#define USBF_USB_DEFAULT BIT(4)
#define USBF_USB_CONF BIT(5)
#define USBF_USB_SUSPEND BIT(6)
#define USBF_USB_RSUM_IN BIT(7)
#define USBF_USB_SOF_RCV BIT(8)
#define USBF_USB_FORCEFS BIT(9)
#define USBF_USB_INT_SEL BIT(10)
#define USBF_USB_SOF_CLK_MODE BIT(11)
#define USBF_REG_USB_STATUS 0x004
#define USBF_USB_RSUM_OUT BIT(1)
#define USBF_USB_SPND_OUT BIT(2)
#define USBF_USB_USB_RST BIT(3)
#define USBF_USB_DEFAULT_ST BIT(4)
#define USBF_USB_CONF_ST BIT(5)
#define USBF_USB_SPEED_MODE BIT(6)
#define USBF_USB_SOF_DELAY_STATUS BIT(31)
#define USBF_REG_USB_ADDRESS 0x008
#define USBF_USB_SOF_STATUS BIT(15)
#define USBF_USB_SET_USB_ADDR(_a) ((_a) << 16)
#define USBF_USB_GET_FRAME(_r) ((_r) & 0x7FF)
#define USBF_REG_SETUP_DATA0 0x018
#define USBF_REG_SETUP_DATA1 0x01C
#define USBF_REG_USB_INT_STA 0x020
#define USBF_USB_RSUM_INT BIT(1)
#define USBF_USB_SPND_INT BIT(2)
#define USBF_USB_USB_RST_INT BIT(3)
#define USBF_USB_SOF_INT BIT(4)
#define USBF_USB_SOF_ERROR_INT BIT(5)
#define USBF_USB_SPEED_MODE_INT BIT(6)
#define USBF_USB_EPN_INT(_n) (BIT(8) << (_n)) /* n=0..15 */
#define USBF_REG_USB_INT_ENA 0x024
#define USBF_USB_RSUM_EN BIT(1)
#define USBF_USB_SPND_EN BIT(2)
#define USBF_USB_USB_RST_EN BIT(3)
#define USBF_USB_SOF_EN BIT(4)
#define USBF_USB_SOF_ERROR_EN BIT(5)
#define USBF_USB_SPEED_MODE_EN BIT(6)
#define USBF_USB_EPN_EN(_n) (BIT(8) << (_n)) /* n=0..15 */
#define USBF_BASE_EP0 0x028
/* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
#define USBF_REG_EP0_CONTROL 0x00
#define USBF_EP0_ONAK BIT(0)
#define USBF_EP0_INAK BIT(1)
#define USBF_EP0_STL BIT(2)
#define USBF_EP0_PERR_NAK_CLR BIT(3)
#define USBF_EP0_INAK_EN BIT(4)
#define USBF_EP0_DW_MASK (0x3 << 5)
#define USBF_EP0_DW(_s) ((_s) << 5)
#define USBF_EP0_DEND BIT(7)
#define USBF_EP0_BCLR BIT(8)
#define USBF_EP0_PIDCLR BIT(9)
#define USBF_EP0_AUTO BIT(16)
#define USBF_EP0_OVERSEL BIT(17)
#define USBF_EP0_STGSEL BIT(18)
#define USBF_REG_EP0_STATUS 0x04
#define USBF_EP0_SETUP_INT BIT(0)
#define USBF_EP0_STG_START_INT BIT(1)
#define USBF_EP0_STG_END_INT BIT(2)
#define USBF_EP0_STALL_INT BIT(3)
#define USBF_EP0_IN_INT BIT(4)
#define USBF_EP0_OUT_INT BIT(5)
#define USBF_EP0_OUT_OR_INT BIT(6)
#define USBF_EP0_OUT_NULL_INT BIT(7)
#define USBF_EP0_IN_EMPTY BIT(8)
#define USBF_EP0_IN_FULL BIT(9)
#define USBF_EP0_IN_DATA BIT(10)
#define USBF_EP0_IN_NAK_INT BIT(11)
#define USBF_EP0_OUT_EMPTY BIT(12)
#define USBF_EP0_OUT_FULL BIT(13)
#define USBF_EP0_OUT_NULL BIT(14)
#define USBF_EP0_OUT_NAK_INT BIT(15)
#define USBF_EP0_PERR_NAK_INT BIT(16)
#define USBF_EP0_PERR_NAK BIT(17)
#define USBF_EP0_PID BIT(18)
#define USBF_REG_EP0_INT_ENA 0x08
#define USBF_EP0_SETUP_EN BIT(0)
#define USBF_EP0_STG_START_EN BIT(1)
#define USBF_EP0_STG_END_EN BIT(2)
#define USBF_EP0_STALL_EN BIT(3)
#define USBF_EP0_IN_EN BIT(4)
#define USBF_EP0_OUT_EN BIT(5)
#define USBF_EP0_OUT_OR_EN BIT(6)
#define USBF_EP0_OUT_NULL_EN BIT(7)
#define USBF_EP0_IN_NAK_EN BIT(11)
#define USBF_EP0_OUT_NAK_EN BIT(15)
#define USBF_EP0_PERR_NAK_EN BIT(16)
#define USBF_REG_EP0_LENGTH 0x0C
#define USBF_EP0_LDATA (0x7FF << 0)
#define USBF_REG_EP0_READ 0x10
#define USBF_REG_EP0_WRITE 0x14
#define USBF_BASE_EPN(_n) (0x040 + (_n) * 0x020)
/* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
#define USBF_REG_EPN_CONTROL 0x000
#define USBF_EPN_ONAK BIT(0)
#define USBF_EPN_OSTL BIT(2)
#define USBF_EPN_ISTL BIT(3)
#define USBF_EPN_OSTL_EN BIT(4)
#define USBF_EPN_DW_MASK (0x3 << 5)
#define USBF_EPN_DW(_s) ((_s) << 5)
#define USBF_EPN_DEND BIT(7)
#define USBF_EPN_CBCLR BIT(8)
#define USBF_EPN_BCLR BIT(9)
#define USBF_EPN_OPIDCLR BIT(10)
#define USBF_EPN_IPIDCLR BIT(11)
#define USBF_EPN_AUTO BIT(16)
#define USBF_EPN_OVERSEL BIT(17)
#define USBF_EPN_MODE_MASK (0x3 << 24)
#define USBF_EPN_MODE_BULK (0x0 << 24)
#define USBF_EPN_MODE_INTR (0x1 << 24)
#define USBF_EPN_MODE_ISO (0x2 << 24)
#define USBF_EPN_DIR0 BIT(26)
#define USBF_EPN_BUF_TYPE_DOUBLE BIT(30)
#define USBF_EPN_EN BIT(31)
#define USBF_REG_EPN_STATUS 0x004
#define USBF_EPN_IN_EMPTY BIT(0)
#define USBF_EPN_IN_FULL BIT(1)
#define USBF_EPN_IN_DATA BIT(2)
#define USBF_EPN_IN_INT BIT(3)
#define USBF_EPN_IN_STALL_INT BIT(4)
#define USBF_EPN_IN_NAK_ERR_INT BIT(5)
#define USBF_EPN_IN_END_INT BIT(7)
#define USBF_EPN_IPID BIT(10)
#define USBF_EPN_OUT_EMPTY BIT(16)
#define USBF_EPN_OUT_FULL BIT(17)
#define USBF_EPN_OUT_NULL_INT BIT(18)
#define USBF_EPN_OUT_INT BIT(19)
#define USBF_EPN_OUT_STALL_INT BIT(20)
#define USBF_EPN_OUT_NAK_ERR_INT BIT(21)
#define USBF_EPN_OUT_OR_INT BIT(22)
#define USBF_EPN_OUT_END_INT BIT(23)
#define USBF_EPN_ISO_CRC BIT(24)
#define USBF_EPN_ISO_OR BIT(26)
#define USBF_EPN_OUT_NOTKN BIT(27)
#define USBF_EPN_ISO_OPID BIT(28)
#define USBF_EPN_ISO_PIDERR BIT(29)
#define USBF_REG_EPN_INT_ENA 0x008
#define USBF_EPN_IN_EN BIT(3)
#define USBF_EPN_IN_STALL_EN BIT(4)
#define USBF_EPN_IN_NAK_ERR_EN BIT(5)
#define USBF_EPN_IN_END_EN BIT(7)
#define USBF_EPN_OUT_NULL_EN BIT(18)
#define USBF_EPN_OUT_EN BIT(19)
#define USBF_EPN_OUT_STALL_EN BIT(20)
#define USBF_EPN_OUT_NAK_ERR_EN BIT(21)
#define USBF_EPN_OUT_OR_EN BIT(22)
#define USBF_EPN_OUT_END_EN BIT(23)
#define USBF_REG_EPN_DMA_CTRL 0x00C
#define USBF_EPN_DMAMODE0 BIT(0)
#define USBF_EPN_DMA_EN BIT(4)
#define USBF_EPN_STOP_SET BIT(8)
#define USBF_EPN_BURST_SET BIT(9)
#define USBF_EPN_DEND_SET BIT(10)
#define USBF_EPN_STOP_MODE BIT(11)
#define USBF_REG_EPN_PCKT_ADRS 0x010
#define USBF_EPN_MPKT(_l) ((_l) << 0)
#define USBF_EPN_BASEAD(_a) ((_a) << 16)
#define USBF_REG_EPN_LEN_DCNT 0x014
#define USBF_EPN_GET_LDATA(_r) ((_r) & 0x7FF)
#define USBF_EPN_SET_DMACNT(_c) ((_c) << 16)
#define USBF_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x1ff)
#define USBF_REG_EPN_READ 0x018
#define USBF_REG_EPN_WRITE 0x01C
/* AHB-EPC Bridge registers */
#define USBF_REG_AHBSCTR 0x1000
#define USBF_REG_AHBMCTR 0x1004
#define USBF_SYS_WBURST_TYPE BIT(2)
#define USBF_SYS_ARBITER_CTR BIT(31)
#define USBF_REG_AHBBINT 0x1008
#define USBF_SYS_ERR_MASTER (0x0F << 0)
#define USBF_SYS_SBUS_ERRINT0 BIT(4)
#define USBF_SYS_SBUS_ERRINT1 BIT(5)
#define USBF_SYS_MBUS_ERRINT BIT(6)
#define USBF_SYS_VBUS_INT BIT(13)
#define USBF_SYS_DMA_ENDINT_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
#define USBF_REG_AHBBINTEN 0x100C
#define USBF_SYS_SBUS_ERRINT0EN BIT(4)
#define USBF_SYS_SBUS_ERRINT1EN BIT(5)
#define USBF_SYS_MBUS_ERRINTEN BIT(6)
#define USBF_SYS_VBUS_INTEN BIT(13)
#define USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
#define USBF_REG_EPCTR 0x1010
#define USBF_SYS_EPC_RST BIT(0)
#define USBF_SYS_PLL_RST BIT(2)
#define USBF_SYS_PLL_LOCK BIT(4)
#define USBF_SYS_PLL_RESUME BIT(5)
#define USBF_SYS_VBUS_LEVEL BIT(8)
#define USBF_SYS_DIRPD BIT(12)
#define USBF_REG_USBSSVER 0x1020
#define USBF_REG_USBSSCONF 0x1024
#define USBF_SYS_DMA_AVAILABLE(_n) (BIT(0) << (_n)) /* _n=0..15 */
#define USBF_SYS_EP_AVAILABLE(_n) (BIT(16) << (_n)) /* _n=0..15 */
#define USBF_BASE_DMA_EPN(_n) (0x1110 + (_n) * 0x010)
/* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
#define USBF_REG_DMA_EPN_DCR1 0x00
#define USBF_SYS_EPN_REQEN BIT(0)
#define USBF_SYS_EPN_DIR0 BIT(1)
#define USBF_SYS_EPN_SET_DMACNT(_c) ((_c) << 16)
#define USBF_SYS_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x0FF)
#define USBF_REG_DMA_EPN_DCR2 0x04
#define USBF_SYS_EPN_MPKT(_s) ((_s) << 0)
#define USBF_SYS_EPN_LMPKT(_l) ((_l) << 16)
#define USBF_REG_DMA_EPN_TADR 0x08
/* USB request */
struct usbf_req {
struct usb_request req;
struct list_head queue;
unsigned int is_zero_sent : 1;
unsigned int is_mapped : 1;
enum {
USBF_XFER_START,
USBF_XFER_WAIT_DMA,
USBF_XFER_SEND_NULL,
USBF_XFER_WAIT_END,
USBF_XFER_WAIT_DMA_SHORT,
USBF_XFER_WAIT_BRIDGE,
} xfer_step;
size_t dma_size;
};
/* USB Endpoint */
struct usbf_ep {
struct usb_ep ep;
char name[32];
struct list_head queue;
unsigned int is_processing : 1;
unsigned int is_in : 1;
struct usbf_udc *udc;
void __iomem *regs;
void __iomem *dma_regs;
unsigned int id : 8;
unsigned int disabled : 1;
unsigned int is_wedged : 1;
unsigned int delayed_status : 1;
u32 status;
void (*bridge_on_dma_end)(struct usbf_ep *ep);
};
enum usbf_ep0state {
EP0_IDLE,
EP0_IN_DATA_PHASE,
EP0_OUT_DATA_PHASE,
EP0_OUT_STATUS_START_PHASE,
EP0_OUT_STATUS_PHASE,
EP0_OUT_STATUS_END_PHASE,
EP0_IN_STATUS_START_PHASE,
EP0_IN_STATUS_PHASE,
EP0_IN_STATUS_END_PHASE,
};
struct usbf_udc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct device *dev;
void __iomem *regs;
spinlock_t lock;
bool is_remote_wakeup;
bool is_usb_suspended;
struct usbf_ep ep[USBF_NUM_ENDPOINTS];
/* for EP0 control messages */
enum usbf_ep0state ep0state;
struct usbf_req setup_reply;
u8 ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
};
struct usbf_ep_info {
const char *name;
struct usb_ep_caps caps;
u16 base_addr;
unsigned int is_double : 1;
u16 maxpacket_limit;
};
#define USBF_SINGLE_BUFFER 0
#define USBF_DOUBLE_BUFFER 1
#define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit) \
{ \
.name = _name, \
.caps = _caps, \
.base_addr = _base_addr, \
.is_double = _is_double, \
.maxpacket_limit = _maxpacket_limit, \
}
/* This table is computed from the recommended values provided in the SOC
* datasheet. The buffer type (single/double) and the endpoint type cannot
* be changed. The mapping in internal RAM (base_addr and number of words)
* for each endpoints depends on the max packet size and the buffer type.
*/
static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
/* ep0: buf @0x0000 64 bytes, fixed 32 words */
[0] = USBF_EP_INFO("ep0-ctrl",
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
USB_EP_CAPS_DIR_ALL),
0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
/* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
[1] = USBF_EP_INFO("ep1-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0020, USBF_DOUBLE_BUFFER, 512),
/* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
[2] = USBF_EP_INFO("ep2-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0120, USBF_DOUBLE_BUFFER, 512),
/* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
[3] = USBF_EP_INFO("ep3-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0220, USBF_SINGLE_BUFFER, 512),
/* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
[4] = USBF_EP_INFO("ep4-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x02A0, USBF_SINGLE_BUFFER, 512),
/* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
[5] = USBF_EP_INFO("ep5-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
USB_EP_CAPS_DIR_ALL),
0x0320, USBF_SINGLE_BUFFER, 512),
/* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[6] = USBF_EP_INFO("ep6-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x03A0, USBF_SINGLE_BUFFER, 1024),
/* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[7] = USBF_EP_INFO("ep7-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x04A0, USBF_SINGLE_BUFFER, 1024),
/* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[8] = USBF_EP_INFO("ep8-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x0520, USBF_SINGLE_BUFFER, 1024),
/* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
[9] = USBF_EP_INFO("ep9-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
USB_EP_CAPS_DIR_ALL),
0x0620, USBF_SINGLE_BUFFER, 1024),
/* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[10] = USBF_EP_INFO("ep10-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0720, USBF_DOUBLE_BUFFER, 1024),
/* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[11] = USBF_EP_INFO("ep11-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0920, USBF_DOUBLE_BUFFER, 1024),
/* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[12] = USBF_EP_INFO("ep12-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0B20, USBF_DOUBLE_BUFFER, 1024),
/* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[13] = USBF_EP_INFO("ep13-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0D20, USBF_DOUBLE_BUFFER, 1024),
/* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[14] = USBF_EP_INFO("ep14-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x0F20, USBF_DOUBLE_BUFFER, 1024),
/* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
[15] = USBF_EP_INFO("ep15-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
USB_EP_CAPS_DIR_ALL),
0x1120, USBF_DOUBLE_BUFFER, 1024),
};
static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
{
return readl(udc->regs + offset);
}
static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
{
writel(val, udc->regs + offset);
}
static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp |= set;
usbf_reg_writel(udc, offset, tmp);
}
static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp &= ~clr;
usbf_reg_writel(udc, offset, tmp);
}
static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
u32 clr, u32 set)
{
u32 tmp;
tmp = usbf_reg_readl(udc, offset);
tmp &= ~clr;
tmp |= set;
usbf_reg_writel(udc, offset, tmp);
}
static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
{
return readl(ep->regs + offset);
}
static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
void *dst, uint count)
{
readsl(ep->regs + offset, dst, count);
}
static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
{
writel(val, ep->regs + offset);
}
static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
const void *src, uint count)
{
writesl(ep->regs + offset, src, count);
}
static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp |= set;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp &= ~clr;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
u32 clr, u32 set)
{
u32 tmp;
tmp = usbf_ep_reg_readl(ep, offset);
tmp &= ~clr;
tmp |= set;
usbf_ep_reg_writel(ep, offset, tmp);
}
static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
{
return readl(ep->dma_regs + offset);
}
static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
u32 val)
{
writel(val, ep->dma_regs + offset);
}
static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
u32 set)
{
u32 tmp;
tmp = usbf_ep_dma_reg_readl(ep, offset);
tmp |= set;
usbf_ep_dma_reg_writel(ep, offset, tmp);
}
static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
u32 clr)
{
u32 tmp;
tmp = usbf_ep_dma_reg_readl(ep, offset);
tmp &= ~clr;
usbf_ep_dma_reg_writel(ep, offset, tmp);
}
static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
{
u32 set;
set = USBF_EP0_DEND;
if (is_data1)
set |= USBF_EP0_PIDCLR;
usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
}
static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
{
unsigned int left;
unsigned int nb;
const void *buf;
u32 ctrl;
u32 last;
left = req->req.length - req->req.actual;
if (left == 0) {
if (!req->is_zero_sent) {
if (req->req.length == 0) {
dev_dbg(ep0->udc->dev, "ep0 send null\n");
usbf_ep0_send_null(ep0, false);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
if ((req->req.actual % ep0->ep.maxpacket) == 0) {
if (req->req.zero) {
dev_dbg(ep0->udc->dev, "ep0 send null\n");
usbf_ep0_send_null(ep0, false);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
}
}
return 0;
}
if (left > ep0->ep.maxpacket)
left = ep0->ep.maxpacket;
buf = req->req.buf;
buf += req->req.actual;
nb = left / sizeof(u32);
if (nb) {
usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
left -= (nb * sizeof(u32));
}
ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
ctrl &= ~USBF_EP0_DW_MASK;
if (left) {
memcpy(&last, buf, left);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
ctrl |= USBF_EP0_DW(left);
req->req.actual += left;
}
usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
req->req.actual, req->req.length);
return -EINPROGRESS;
}
static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
{
int req_status = 0;
unsigned int count;
unsigned int recv;
unsigned int left;
unsigned int nb;
void *buf;
u32 last;
if (ep0->status & USBF_EP0_OUT_INT) {
recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
count = recv;
buf = req->req.buf;
buf += req->req.actual;
left = req->req.length - req->req.actual;
dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
if (left > ep0->ep.maxpacket)
left = ep0->ep.maxpacket;
if (count > left) {
req_status = -EOVERFLOW;
count = left;
}
if (count) {
nb = count / sizeof(u32);
if (nb) {
usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
count -= (nb * sizeof(u32));
}
if (count) {
last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
memcpy(buf, &last, count);
req->req.actual += count;
}
}
dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
req->req.actual, req->req.length);
if (req_status) {
dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
req->req.status = req_status;
return 0;
}
if (recv < ep0->ep.maxpacket) {
dev_dbg(ep0->udc->dev, "ep0 short packet\n");
/* This is a short packet -> It is the end */
req->req.status = 0;
return 0;
}
/* The Data stage of a control transfer from an endpoint to the
* host is complete when the endpoint does one of the following:
* - Has transferred exactly the expected amount of data
* - Transfers a packet with a payload size less than
* wMaxPacketSize or transfers a zero-length packet
*/
if (req->req.actual == req->req.length) {
req->req.status = 0;
return 0;
}
}
if (ep0->status & USBF_EP0_OUT_NULL_INT) {
/* NULL packet received */
dev_dbg(ep0->udc->dev, "ep0 null packet\n");
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
return -EINPROGRESS;
}
static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
{
u32 sts;
int ret;
usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
(sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
0, 10000);
if (ret)
dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
}
static void usbf_epn_send_null(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
}
static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
unsigned int size)
{
u32 tmp;
memcpy(&tmp, buf, size);
usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DW_MASK,
USBF_EPN_DW(size) | USBF_EPN_DEND);
}
static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int left;
unsigned int nb;
const void *buf;
left = req->req.length - req->req.actual;
if (left == 0) {
if (!req->is_zero_sent) {
if (req->req.length == 0) {
dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
usbf_epn_send_null(epn);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
if ((req->req.actual % epn->ep.maxpacket) == 0) {
if (req->req.zero) {
dev_dbg(epn->udc->dev, "ep%u send_null\n",
epn->id);
usbf_epn_send_null(epn);
req->is_zero_sent = 1;
return -EINPROGRESS;
}
}
}
return 0;
}
if (left > epn->ep.maxpacket)
left = epn->ep.maxpacket;
buf = req->req.buf;
buf += req->req.actual;
nb = left / sizeof(u32);
if (nb) {
usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
left -= (nb * sizeof(u32));
}
if (left) {
usbf_epn_send_residue(epn, buf, left);
req->req.actual += left;
} else {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DW_MASK,
USBF_EPN_DEND);
}
dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
req->req.length);
return -EINPROGRESS;
}
static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
}
static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int left;
u32 npkt;
u32 lastpkt;
int ret;
if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
epn->id);
return usbf_epn_pio_in(epn, req);
}
left = req->req.length - req->req.actual;
switch (req->xfer_step) {
default:
case USBF_XFER_START:
if (left == 0) {
dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
usbf_epn_send_null(epn);
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
if (left < 4) {
dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
left);
usbf_epn_send_residue(epn,
req->req.buf + req->req.actual, left);
req->req.actual += left;
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
if (ret < 0) {
dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
lastpkt = (left % epn->ep.maxpacket);
if (lastpkt == 0)
lastpkt = epn->ep.maxpacket;
lastpkt &= ~0x3; /* DMA is done on 32bit units */
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
req->req.dma);
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(npkt));
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
/* The end of DMA transfer at the USBF level needs to be handle
* after the detection of the end of DMA transfer at the brige
* level.
* To force this sequence, EPN_IN_END_EN will be set by the
* detection of the end of transfer at bridge level (ie. bridge
* interrupt).
*/
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
/* Clear any pending IN_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA;
break;
case USBF_XFER_WAIT_DMA:
if (!(epn->status & USBF_EPN_IN_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
req->is_mapped = 0;
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_END_EN,
USBF_EPN_IN_EN);
req->req.actual += req->dma_size;
left = req->req.length - req->req.actual;
if (left) {
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
left);
usbf_epn_send_residue(epn,
req->req.buf + req->req.actual, left);
req->req.actual += left;
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
if (req->req.actual % epn->ep.maxpacket) {
/* last packet was a short packet. Tell the hardware to
* send it right now.
*/
dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_IN_INT);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_DEND);
req->xfer_step = USBF_XFER_WAIT_END;
break;
}
/* Last packet size was a maxpacket size
* Send null packet if needed
*/
if (req->req.zero) {
req->xfer_step = USBF_XFER_SEND_NULL;
break;
}
/* No more action to do. Wait for the end of the USB transfer */
req->xfer_step = USBF_XFER_WAIT_END;
break;
case USBF_XFER_SEND_NULL:
dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
usbf_epn_send_null(epn);
req->xfer_step = USBF_XFER_WAIT_END;
break;
case USBF_XFER_WAIT_END:
if (!(epn->status & USBF_EPN_IN_INT)) {
dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
return -EINPROGRESS;
}
static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
unsigned int size)
{
u32 last;
last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
memcpy(buf, &last, size);
}
static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
{
int req_status = 0;
unsigned int count;
unsigned int recv;
unsigned int left;
unsigned int nb;
void *buf;
if (epn->status & USBF_EPN_OUT_INT) {
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
count = recv;
buf = req->req.buf;
buf += req->req.actual;
left = req->req.length - req->req.actual;
dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
recv, left, epn->ep.maxpacket);
if (left > epn->ep.maxpacket)
left = epn->ep.maxpacket;
if (count > left) {
req_status = -EOVERFLOW;
count = left;
}
if (count) {
nb = count / sizeof(u32);
if (nb) {
usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
buf, nb);
buf += (nb * sizeof(u32));
req->req.actual += (nb * sizeof(u32));
count -= (nb * sizeof(u32));
}
if (count) {
usbf_epn_recv_residue(epn, buf, count);
req->req.actual += count;
}
}
dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
req->req.actual, req->req.length);
if (req_status) {
dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
req_status);
req->req.status = req_status;
return 0;
}
if (recv < epn->ep.maxpacket) {
dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
/* This is a short packet -> It is the end */
req->req.status = 0;
return 0;
}
/* Request full -> complete */
if (req->req.actual == req->req.length) {
req->req.status = 0;
return 0;
}
}
if (epn->status & USBF_EPN_OUT_NULL_INT) {
/* NULL packet received */
dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
return -EINPROGRESS;
}
static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
{
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
}
static void usbf_epn_process_queue(struct usbf_ep *epn);
static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
{
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
if (is_short) {
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
USBF_EPN_SET_DMACNT(0));
/* The end of DMA transfer at the USBF level needs to be handled
* after the detection of the end of DMA transfer at the brige
* level.
* To force this sequence, enabling the OUT_END interrupt will
* be donee by the detection of the end of transfer at bridge
* level (ie. bridge interrupt).
*/
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
/* Clear any pending OUT_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
return;
}
usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
USBF_EPN_SET_DMACNT(npkt));
/* Here, the bridge may or may not generate an interrupt to signal the
* end of DMA transfer.
* Keep only OUT_END interrupt and let handle the bridge later during
* the OUT_END processing.
*/
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
USBF_EPN_OUT_END_EN);
/* Disable bridge interrupt. It will be renabled later */
usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
/* Clear any pending DMA_END interrupt at bridge level */
usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
USBF_SYS_DMA_ENDINT_EPN(epn->id));
/* Clear any pending OUT_END interrupt */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_END_INT);
usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
USBF_EPN_DMA_EN);
}
static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
{
u32 dmacnt;
u32 tmp;
int ret;
/* Restore interrupt mask */
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
if (is_short) {
/* Nothing more to do when the DMA was for a short packet */
return 0;
}
/* Enable the bridge interrupt */
usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
dmacnt = USBF_EPN_GET_DMACNT(tmp);
if (dmacnt) {
/* Some packet were not received (halted by a short or a null
* packet.
* The bridge never raises an interrupt in this case.
* Wait for the end of transfer at bridge level
*/
ret = readl_poll_timeout_atomic(
epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
0, 10000);
if (ret) {
dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
epn->id);
}
usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
USBF_SYS_EPN_REQEN);
/* The dmacnt value tells how many packet were not transferred
* from the maximum number of packet we set for the DMA transfer.
* Compute the left DMA size based on this value.
*/
return dmacnt * epn->ep.maxpacket;
}
return 0;
}
static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
{
unsigned int dma_left;
unsigned int count;
unsigned int recv;
unsigned int left;
u32 npkt;
int ret;
if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
epn->id);
return usbf_epn_pio_out(epn, req);
}
switch (req->xfer_step) {
default:
case USBF_XFER_START:
if (epn->status & USBF_EPN_OUT_NULL_INT) {
dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
return 0;
}
if (!(epn->status & USBF_EPN_OUT_INT)) {
dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
epn->id);
break;
}
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
if (!recv) {
dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
epn->id);
break;
}
left = req->req.length - req->req.actual;
dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
recv, left, epn->ep.maxpacket);
if (recv > left) {
dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
epn->id, recv, left);
req->req.status = -EOVERFLOW;
return -EOVERFLOW;
}
if (recv < epn->ep.maxpacket) {
/* Short packet received */
dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
if (recv <= 3) {
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, recv);
req->req.actual += recv;
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
epn->id, req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
if (ret < 0) {
dev_err(epn->udc->dev, "map request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
1, true);
req->dma_size = recv & ~0x3;
dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
break;
}
ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
if (ret < 0) {
dev_err(epn->udc->dev, "map request failed (%d)\n",
ret);
return ret;
}
req->is_mapped = 1;
/* Use the maximum DMA size according to the request buffer.
* We will adjust the received size later at the end of the DMA
* transfer with the left size computed from
* usbf_epn_dma_out_complete_dma().
*/
npkt = left / epn->ep.maxpacket;
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
npkt, false);
req->dma_size = npkt * epn->ep.maxpacket;
dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
req->dma_size, npkt);
req->xfer_step = USBF_XFER_WAIT_DMA;
break;
case USBF_XFER_WAIT_DMA_SHORT:
if (!(epn->status & USBF_EPN_OUT_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
usbf_epn_dma_out_complete_dma(epn, true);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
req->req.actual += req->dma_size;
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
count = recv & 0x3;
if (count) {
dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
count);
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, count);
req->req.actual += count;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
case USBF_XFER_WAIT_DMA:
if (!(epn->status & USBF_EPN_OUT_END_INT)) {
dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
break;
}
dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
dma_left = usbf_epn_dma_out_complete_dma(epn, false);
if (dma_left) {
/* Adjust the final DMA size with */
count = req->dma_size - dma_left;
dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
count);
req->req.actual += count;
if (epn->status & USBF_EPN_OUT_NULL_INT) {
/* DMA was stopped by a null packet reception */
dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
epn->id);
usb_gadget_unmap_request(&epn->udc->gadget,
&req->req, 0);
req->is_mapped = 0;
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
~(u32)USBF_EPN_OUT_NULL_INT);
if (req->req.actual != req->req.length) {
req->req.status = req->req.short_not_ok ?
-EREMOTEIO : 0;
} else {
req->req.status = 0;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
epn->id, req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
recv = USBF_EPN_GET_LDATA(
usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
left = req->req.length - req->req.actual;
if (recv > left) {
dev_err(epn->udc->dev,
"ep%u overflow (%u/%u)\n", epn->id,
recv, left);
req->req.status = -EOVERFLOW;
usb_gadget_unmap_request(&epn->udc->gadget,
&req->req, 0);
req->is_mapped = 0;
req->xfer_step = USBF_XFER_START;
return -EOVERFLOW;
}
if (recv > 3) {
usbf_epn_dma_out_send_dma(epn,
req->req.dma + req->req.actual,
1, true);
req->dma_size = recv & ~0x3;
dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
epn->id, req->dma_size);
req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
break;
}
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
count = recv & 0x3;
if (count) {
dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
epn->id, count);
usbf_epn_recv_residue(epn,
req->req.buf + req->req.actual, count);
req->req.actual += count;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
req->xfer_step = USBF_XFER_START;
return 0;
}
/* Process queue at bridge interrupt only */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
epn->status = 0;
epn->bridge_on_dma_end = usbf_epn_process_queue;
req->xfer_step = USBF_XFER_WAIT_BRIDGE;
break;
case USBF_XFER_WAIT_BRIDGE:
dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
/* Restore interrupt mask */
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
req->is_mapped = 0;
req->req.actual += req->dma_size;
req->xfer_step = USBF_XFER_START;
left = req->req.length - req->req.actual;
if (!left) {
/* No more data can be added to the buffer */
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
req->req.actual, req->req.length);
return 0;
}
dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
epn->id, req->req.actual, req->req.length);
break;
}
return -EINPROGRESS;
}
static void usbf_epn_dma_stop(struct usbf_ep *epn)
{
usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
/* In the datasheet:
* If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
* after 1 packet transfer completed.
* Therefore, wait sufficient time for ensuring DMA transfer
* completion. The WAIT time depends on the system, especially AHB
* bus activity
* So arbitrary 10ms would be sufficient.
*/
mdelay(10);
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
}
static void usbf_epn_dma_abort(struct usbf_ep *epn, struct usbf_req *req)
{
dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
epn->is_in ? "in" : "out");
epn->bridge_on_dma_end = NULL;
usbf_epn_dma_stop(epn);
usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
epn->is_in ? 1 : 0);
req->is_mapped = 0;
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
if (epn->is_in) {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_IN_END_EN,
USBF_EPN_IN_EN);
} else {
usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_END_EN,
USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
}
/* As dma is stopped, be sure that no DMA interrupt are pending */
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
/* Enable DMA interrupt the bridge level */
usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
/* Reset transfer step */
req->xfer_step = USBF_XFER_START;
}
static void usbf_epn_fifo_flush(struct usbf_ep *epn)
{
u32 ctrl;
u32 sts;
int ret;
dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
epn->is_in ? "in" : "out");
ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
if (ctrl & USBF_EPN_DIR0)
return;
ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
(sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
0, 10000);
if (ret)
dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
}
static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
int status)
{
list_del_init(&req->queue);
if (status) {
req->req.status = status;
} else {
if (req->req.status == -EINPROGRESS)
req->req.status = status;
}
dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
ep->is_in ? "in" : "out",
req->req.actual, req->req.length, req->req.status);
if (req->is_mapped)
usbf_epn_dma_abort(ep, req);
spin_unlock(&ep->udc->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
}
static void usbf_ep_nuke(struct usbf_ep *ep, int status)
{
struct usbf_req *req;
dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
ep->is_in ? "in" : "out",
status);
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct usbf_req, queue);
usbf_ep_req_done(ep, req, status);
}
if (ep->id == 0)
usbf_ep0_fifo_flush(ep);
else
usbf_epn_fifo_flush(ep);
}
static bool usbf_ep_is_stalled(struct usbf_ep *ep)
{
u32 ctrl;
if (ep->id == 0) {
ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
return (ctrl & USBF_EP0_STL) ? true : false;
}
ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
if (ep->is_in)
return (ctrl & USBF_EPN_ISTL) ? true : false;
return (ctrl & USBF_EPN_OSTL) ? true : false;
}
static int usbf_epn_start_queue(struct usbf_ep *epn)
{
struct usbf_req *req;
int ret;
if (usbf_ep_is_stalled(epn))
return 0;
req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
if (epn->is_in) {
if (req && !epn->is_processing) {
ret = epn->dma_regs ?
usbf_epn_dma_in(epn, req) :
usbf_epn_pio_in(epn, req);
if (ret != -EINPROGRESS) {
dev_err(epn->udc->dev,
"queued next request not in progress\n");
/* The request cannot be completed (ie
* ret == 0) on the first call.
* stall and nuke the endpoint
*/
return ret ? ret : -EIO;
}
}
} else {
if (req) {
/* Clear ONAK to accept OUT tokens */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
/* Enable interrupts */
usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
} else {
/* Disable incoming data and interrupt.
* They will be enable on next usb_eb_queue call
*/
usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
}
}
return 0;
}
static int usbf_ep_process_queue(struct usbf_ep *ep)
{
int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
struct usbf_req *req;
int is_processing;
int ret;
if (ep->is_in) {
usbf_ep_xfer = usbf_ep0_pio_in;
if (ep->id) {
usbf_ep_xfer = ep->dma_regs ?
usbf_epn_dma_in : usbf_epn_pio_in;
}
} else {
usbf_ep_xfer = usbf_ep0_pio_out;
if (ep->id) {
usbf_ep_xfer = ep->dma_regs ?
usbf_epn_dma_out : usbf_epn_pio_out;
}
}
req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
if (!req) {
dev_err(ep->udc->dev,
"no request available for ep%u %s process\n", ep->id,
ep->is_in ? "in" : "out");
return -ENOENT;
}
do {
/* Were going to read the FIFO for this current request.
* NAK any other incoming data to avoid a race condition if no
* more request are available.
*/
if (!ep->is_in && ep->id != 0) {
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
}
ret = usbf_ep_xfer(ep, req);
if (ret == -EINPROGRESS) {
if (!ep->is_in && ep->id != 0) {
/* The current request needs more data.
* Allow incoming data
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
}
return ret;
}
is_processing = ep->is_processing;
ep->is_processing = 1;
usbf_ep_req_done(ep, req, ret);
ep->is_processing = is_processing;
if (ret) {
/* An error was detected during the request transfer.
* Any pending DMA transfers were aborted by the
* usbf_ep_req_done() call.
* It's time to flush the fifo
*/
if (ep->id == 0)
usbf_ep0_fifo_flush(ep);
else
usbf_epn_fifo_flush(ep);
}
req = list_first_entry_or_null(&ep->queue, struct usbf_req,
queue);
if (ep->is_in)
continue;
if (ep->id != 0) {
if (req) {
/* An other request is available.
* Allow incoming data
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ONAK);
} else {
/* No request queued. Disable interrupts.
* They will be enabled on usb_ep_queue
*/
usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
}
}
/* Do not recall usbf_ep_xfer() */
return req ? -EINPROGRESS : 0;
} while (req);
return 0;
}
static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
{
struct usbf_req *first;
dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
ep->is_in ? "in" : "out",
stall ? "stall" : "unstall");
if (ep->id == 0) {
if (stall)
usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
else
usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
return;
}
if (stall) {
if (ep->is_in)
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ISTL);
else
usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
} else {
first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
if (first && first->is_mapped) {
/* This can appear if the host halts an endpoint using
* SET_FEATURE and then un-halts the endpoint
*/
usbf_epn_dma_abort(ep, first);
}
usbf_epn_fifo_flush(ep);
if (ep->is_in) {
usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_ISTL,
USBF_EPN_IPIDCLR);
} else {
usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
USBF_EPN_OSTL,
USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
}
usbf_epn_start_queue(ep);
}
}
static void usbf_ep0_enable(struct usbf_ep *ep0)
{
usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
ep0->udc->ep0state = EP0_IDLE;
ep0->disabled = 0;
/* enable interrupts for the ep0 */
usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
}
static int usbf_epn_enable(struct usbf_ep *epn)
{
u32 base_addr;
u32 ctrl;
base_addr = usbf_ep_info[epn->id].base_addr;
usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
/* OUT transfer interrupt are enabled during usb_ep_queue */
if (epn->is_in) {
/* Will be changed in DMA processing */
usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
}
/* Clear, set endpoint direction, set IN/OUT STL, and enable
* Send NAK for Data out as request are not queued yet
*/
ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
if (epn->is_in)
ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
else
ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
return 0;
}
static int usbf_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
usb_endpoint_dir_in(desc) ? "in" : "out",
usb_endpoint_maxp(desc));
spin_lock_irqsave(&ep->udc->lock, flags);
ep->is_in = usb_endpoint_dir_in(desc);
ep->ep.maxpacket = usb_endpoint_maxp(desc);
ret = usbf_epn_enable(ep);
if (ret)
goto end;
ep->disabled = 0;
/* enable interrupts for this endpoint */
usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
/* enable DMA interrupt at bridge level if DMA is used */
if (ep->dma_regs) {
ep->bridge_on_dma_end = NULL;
usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
}
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_epn_disable(struct usbf_ep *epn)
{
/* Disable interrupts */
usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
/* Disable endpoint */
usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
/* remove anything that was pending */
usbf_ep_nuke(epn, -ESHUTDOWN);
return 0;
}
static int usbf_ep_disable(struct usb_ep *_ep)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
ep->is_in ? "in" : "out", ep->ep.maxpacket);
spin_lock_irqsave(&ep->udc->lock, flags);
ep->disabled = 1;
/* Disable DMA interrupt */
if (ep->dma_regs) {
usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
ep->bridge_on_dma_end = NULL;
}
/* disable interrupts for this endpoint */
usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
/* and the endpoint itself */
ret = usbf_epn_disable(ep);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
gfp_t gfp_flags)
{
int ret;
req->req.actual = 0;
req->req.status = -EINPROGRESS;
req->is_zero_sent = 0;
list_add_tail(&req->queue, &ep0->queue);
if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
return 0;
if (!ep0->is_in)
return 0;
if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
if (req->req.length) {
dev_err(ep0->udc->dev,
"request lng %u for ep0 in status phase\n",
req->req.length);
return -EINVAL;
}
ep0->delayed_status = 0;
}
if (!ep0->is_processing) {
ret = usbf_ep0_pio_in(ep0, req);
if (ret != -EINPROGRESS) {
dev_err(ep0->udc->dev,
"queued request not in progress\n");
/* The request cannot be completed (ie
* ret == 0) on the first call
*/
return ret ? ret : -EIO;
}
}
return 0;
}
static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
gfp_t gfp_flags)
{
int was_empty;
int ret;
if (ep->disabled) {
dev_err(ep->udc->dev, "ep%u request queue while disable\n",
ep->id);
return -ESHUTDOWN;
}
req->req.actual = 0;
req->req.status = -EINPROGRESS;
req->is_zero_sent = 0;
req->xfer_step = USBF_XFER_START;
was_empty = list_empty(&ep->queue);
list_add_tail(&req->queue, &ep->queue);
if (was_empty) {
ret = usbf_epn_start_queue(ep);
if (ret)
return ret;
}
return 0;
}
static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct usbf_req *req = container_of(_req, struct usbf_req, req);
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
struct usbf_udc *udc = ep->udc;
unsigned long flags;
int ret;
if (!_req || !_req->buf)
return -EINVAL;
if (!udc || !udc->driver)
return -EINVAL;
dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
ep->id, ep->is_in ? "in" : "out",
req->req.length, req->req.zero, req->req.short_not_ok);
spin_lock_irqsave(&ep->udc->lock, flags);
if (ep->id == 0)
ret = usbf_ep0_queue(ep, req, gfp_flags);
else
ret = usbf_epn_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usbf_req *req = container_of(_req, struct usbf_req, req);
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int is_processing;
int first;
int ret;
spin_lock_irqsave(&ep->udc->lock, flags);
dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
ep->id, ep->is_in ? "in" : "out",
req->req.actual, req->req.length);
first = list_is_first(&req->queue, &ep->queue);
/* Complete the request but avoid any operation that could be done
* if a new request is queued during the request completion
*/
is_processing = ep->is_processing;
ep->is_processing = 1;
usbf_ep_req_done(ep, req, -ECONNRESET);
ep->is_processing = is_processing;
if (first) {
/* The first item in the list was dequeued.
* This item could already be submitted to the hardware.
* So, flush the fifo
*/
if (ep->id)
usbf_epn_fifo_flush(ep);
else
usbf_ep0_fifo_flush(ep);
}
if (ep->id == 0) {
/* We dequeue a request on ep0. On this endpoint, we can have
* 1 request related to the data stage and/or 1 request
* related to the status stage.
* We dequeue one of them and so the USB control transaction
* is no more coherent. The simple way to be consistent after
* dequeuing is to stall and nuke the endpoint and wait the
* next SETUP packet.
*/
usbf_ep_stall(ep, true);
usbf_ep_nuke(ep, -ECONNRESET);
ep->udc->ep0state = EP0_IDLE;
goto end;
}
if (!first)
goto end;
ret = usbf_epn_start_queue(ep);
if (ret) {
usbf_ep_stall(ep, true);
usbf_ep_nuke(ep, -EIO);
}
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{
struct usbf_req *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct usbf_req *req;
unsigned long flags;
struct usbf_ep *ep;
if (!_ep || !_req)
return;
req = container_of(_req, struct usbf_req, req);
ep = container_of(_ep, struct usbf_ep, ep);
spin_lock_irqsave(&ep->udc->lock, flags);
list_del_init(&req->queue);
spin_unlock_irqrestore(&ep->udc->lock, flags);
kfree(req);
}
static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
goto end;
}
usbf_ep_stall(ep, halt);
if (!halt)
ep->is_wedged = 0;
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static int usbf_ep_set_wedge(struct usb_ep *_ep)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
unsigned long flags;
int ret;
if (ep->id == 0)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
goto end;
}
usbf_ep_stall(ep, 1);
ep->is_wedged = 1;
ret = 0;
end:
spin_unlock_irqrestore(&ep->udc->lock, flags);
return ret;
}
static struct usb_ep_ops usbf_ep_ops = {
.enable = usbf_ep_enable,
.disable = usbf_ep_disable,
.queue = usbf_ep_queue,
.dequeue = usbf_ep_dequeue,
.set_halt = usbf_ep_set_halt,
.set_wedge = usbf_ep_set_wedge,
.alloc_request = usbf_ep_alloc_request,
.free_request = usbf_ep_free_request,
};
static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
{
}
static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
void *buf, unsigned int length,
void (*complete)(struct usb_ep *_ep,
struct usb_request *_req))
{
if (buf && length)
memcpy(ep0->udc->ep0_buf, buf, length);
req->req.buf = ep0->udc->ep0_buf;
req->req.length = length;
req->req.dma = 0;
req->req.zero = true;
req->req.complete = complete ? complete : usbf_ep0_req_complete;
req->req.status = -EINPROGRESS;
req->req.context = NULL;
req->req.actual = 0;
}
static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
{
struct usbf_ep *ep;
unsigned int i;
if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
return &udc->ep[0];
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (!ep->ep.desc)
continue;
if (ep->ep.desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
static int usbf_req_delegate(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
int ret;
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget, ctrlrequest);
spin_lock(&udc->lock);
if (ret < 0) {
dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
return ret;
}
if (ret == USB_GADGET_DELAYED_STATUS) {
dev_dbg(udc->dev, "delayed status set\n");
udc->ep[0].delayed_status = 1;
return 0;
}
return ret;
}
static int usbf_req_get_status(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
struct usbf_ep *ep;
u16 status_data;
u16 wLength;
u16 wValue;
u16 wIndex;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
switch (ctrlrequest->bRequestType) {
case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
goto delegate;
status_data = 0;
if (udc->gadget.is_selfpowered)
status_data |= BIT(USB_DEVICE_SELF_POWERED);
if (udc->is_remote_wakeup)
status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
break;
case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
if ((wValue != 0) || (wLength != 2))
goto delegate;
ep = usbf_get_ep_by_addr(udc, wIndex);
if (!ep)
return -EINVAL;
status_data = 0;
if (usbf_ep_is_stalled(ep))
status_data |= cpu_to_le16(1);
break;
case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
if ((wValue != 0) || (wLength != 2))
goto delegate;
status_data = 0;
break;
default:
goto delegate;
}
usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
sizeof(status_data), NULL);
usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static int usbf_req_clear_set_feature(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest,
bool is_set)
{
struct usbf_ep *ep;
u16 wLength;
u16 wValue;
u16 wIndex;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
switch (ctrlrequest->bRequestType) {
case USB_DIR_OUT | USB_RECIP_DEVICE:
if ((wIndex != 0) || (wLength != 0))
goto delegate;
if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
goto delegate;
udc->is_remote_wakeup = is_set;
break;
case USB_DIR_OUT | USB_RECIP_ENDPOINT:
if (wLength != 0)
goto delegate;
ep = usbf_get_ep_by_addr(udc, wIndex);
if (!ep)
return -EINVAL;
if ((ep->id == 0) && is_set) {
/* Endpoint 0 cannot be halted (stalled)
* Returning an error code leads to a STALL on this ep0
* but keep the automate in a consistent state.
*/
return -EINVAL;
}
if (ep->is_wedged && !is_set) {
/* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
* endpoint is wedged
*/
break;
}
usbf_ep_stall(ep, is_set);
break;
default:
goto delegate;
}
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
struct usb_request *_req)
{
struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
/* The status phase of the SET_ADDRESS request is completed ... */
if (_req->status == 0) {
/* ... without any errors -> Signaled the state to the core. */
usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
}
/* In case of request failure, there is no need to revert the address
* value set to the hardware as the hardware will take care of the
* value only if the status stage is completed normally.
*/
}
static int usbf_req_set_address(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
u16 wLength;
u16 wValue;
u16 wIndex;
u32 addr;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
goto delegate;
if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
return -EINVAL;
addr = wValue;
/* The hardware will take care of this USB address after the status
* stage of the SET_ADDRESS request is completed normally.
* It is safe to write it now
*/
usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
/* Queued the status request */
usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
usbf_ep0_req_set_address_complete);
usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
return 0;
delegate:
return usbf_req_delegate(udc, ctrlrequest);
}
static int usbf_req_set_configuration(struct usbf_udc *udc,
const struct usb_ctrlrequest *ctrlrequest)
{
u16 wLength;
u16 wValue;
u16 wIndex;
int ret;
ret = usbf_req_delegate(udc, ctrlrequest);
if (ret)
return ret;
wValue = le16_to_cpu(ctrlrequest->wValue);
wLength = le16_to_cpu(ctrlrequest->wLength);
wIndex = le16_to_cpu(ctrlrequest->wIndex);
if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
(wIndex != 0) || (wLength != 0)) {
/* No error detected by driver->setup() but it is not an USB2.0
* Ch9 SET_CONFIGURATION.
* Nothing more to do
*/
return 0;
}
if (wValue & 0x00FF) {
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
} else {
usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
/* Go back to Address State */
spin_unlock(&udc->lock);
usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
spin_lock(&udc->lock);
}
return 0;
}
static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
{
union {
struct usb_ctrlrequest ctrlreq;
u32 raw[2];
} crq;
struct usbf_udc *udc = ep0->udc;
int ret;
/* Read setup data (ie the USB control request) */
crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
dev_dbg(ep0->udc->dev,
"ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
/* Set current EP0 state according to the received request */
if (crq.ctrlreq.wLength) {
if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
udc->ep0state = EP0_IN_DATA_PHASE;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_INAK,
USBF_EP0_INAK_EN);
ep0->is_in = 1;
} else {
udc->ep0state = EP0_OUT_DATA_PHASE;
usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_ONAK);
ep0->is_in = 0;
}
} else {
udc->ep0state = EP0_IN_STATUS_START_PHASE;
ep0->is_in = 1;
}
/* We starts a new control transfer -> Clear the delayed status flag */
ep0->delayed_status = 0;
if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
/* This is not a USB standard request -> delegate */
goto delegate;
}
switch (crq.ctrlreq.bRequest) {
case USB_REQ_GET_STATUS:
ret = usbf_req_get_status(udc, &crq.ctrlreq);
break;
case USB_REQ_CLEAR_FEATURE:
ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
break;
case USB_REQ_SET_FEATURE:
ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
break;
case USB_REQ_SET_ADDRESS:
ret = usbf_req_set_address(udc, &crq.ctrlreq);
break;
case USB_REQ_SET_CONFIGURATION:
ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
break;
default:
goto delegate;
}
return ret;
delegate:
return usbf_req_delegate(udc, &crq.ctrlreq);
}
static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
const char *ep0state_name,
enum usbf_ep0state next_ep0state)
{
struct usbf_udc *udc = ep0->udc;
int ret;
ret = usbf_ep_process_queue(ep0);
switch (ret) {
case -ENOENT:
dev_err(udc->dev,
"no request available for ep0 %s phase\n",
ep0state_name);
break;
case -EINPROGRESS:
/* More data needs to be processed */
ret = 0;
break;
case 0:
/* All requests in the queue are processed */
udc->ep0state = next_ep0state;
break;
default:
dev_err(udc->dev,
"process queue failed for ep0 %s phase (%d)\n",
ep0state_name, ret);
break;
}
return ret;
}
static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
struct usbf_req *req;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_ONAK,
USBF_EP0_PIDCLR);
ep0->is_in = 0;
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
if (!req) {
usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
} else {
if (req->req.length) {
dev_err(udc->dev,
"queued request length %u for ep0 out status phase\n",
req->req.length);
}
}
udc->ep0state = EP0_OUT_STATUS_PHASE;
return 0;
}
static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
struct usbf_req *req;
int ret;
usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
USBF_EP0_INAK,
USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
ep0->is_in = 1;
/* Queue request for status if needed */
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
if (!req) {
if (ep0->delayed_status) {
dev_dbg(ep0->udc->dev,
"EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
udc->ep0state = EP0_IN_STATUS_PHASE;
return 0;
}
usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
0, NULL);
usbf_ep0_queue(ep0, &udc->setup_reply,
GFP_ATOMIC);
req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
} else {
if (req->req.length) {
dev_err(udc->dev,
"queued request length %u for ep0 in status phase\n",
req->req.length);
}
}
ret = usbf_ep0_pio_in(ep0, req);
if (ret != -EINPROGRESS) {
usbf_ep_req_done(ep0, req, ret);
udc->ep0state = EP0_IN_STATUS_END_PHASE;
return 0;
}
udc->ep0state = EP0_IN_STATUS_PHASE;
return 0;
}
static void usbf_ep0_interrupt(struct usbf_ep *ep0)
{
struct usbf_udc *udc = ep0->udc;
u32 sts, prev_sts;
int prev_ep0state;
int ret;
ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
ep0->status,
usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
USBF_EP0_STG_END_INT);
ret = 0;
do {
dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
prev_sts = sts;
prev_ep0state = udc->ep0state;
switch (udc->ep0state) {
case EP0_IDLE:
if (!(sts & USBF_EP0_SETUP_INT))
break;
sts &= ~USBF_EP0_SETUP_INT;
dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
ret = usbf_handle_ep0_setup(ep0);
break;
case EP0_IN_DATA_PHASE:
if (!(sts & USBF_EP0_IN_INT))
break;
sts &= ~USBF_EP0_IN_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"in data", EP0_OUT_STATUS_START_PHASE);
break;
case EP0_OUT_STATUS_START_PHASE:
if (!(sts & USBF_EP0_STG_START_INT))
break;
sts &= ~USBF_EP0_STG_START_INT;
dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
ret = usbf_handle_ep0_out_status_start(ep0);
break;
case EP0_OUT_STATUS_PHASE:
if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
break;
sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"out status",
EP0_OUT_STATUS_END_PHASE);
break;
case EP0_OUT_STATUS_END_PHASE:
if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
break;
sts &= ~USBF_EP0_STG_END_INT;
dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
udc->ep0state = EP0_IDLE;
break;
case EP0_OUT_DATA_PHASE:
if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
break;
sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"out data", EP0_IN_STATUS_START_PHASE);
break;
case EP0_IN_STATUS_START_PHASE:
if (!(sts & USBF_EP0_STG_START_INT))
break;
sts &= ~USBF_EP0_STG_START_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
ret = usbf_handle_ep0_in_status_start(ep0);
break;
case EP0_IN_STATUS_PHASE:
if (!(sts & USBF_EP0_IN_INT))
break;
sts &= ~USBF_EP0_IN_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
ret = usbf_handle_ep0_data_status(ep0,
"in status", EP0_IN_STATUS_END_PHASE);
break;
case EP0_IN_STATUS_END_PHASE:
if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
break;
sts &= ~USBF_EP0_STG_END_INT;
dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
udc->ep0state = EP0_IDLE;
break;
default:
udc->ep0state = EP0_IDLE;
break;
}
if (ret) {
dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
/* Failure -> stall.
* This stall state will be automatically cleared when
* the IP receives the next SETUP packet
*/
usbf_ep_stall(ep0, true);
/* Remove anything that was pending */
usbf_ep_nuke(ep0, -EPROTO);
udc->ep0state = EP0_IDLE;
break;
}
} while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
udc->ep0state, sts,
usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
}
static void usbf_epn_process_queue(struct usbf_ep *epn)
{
int ret;
ret = usbf_ep_process_queue(epn);
switch (ret) {
case -ENOENT:
dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
epn->id, epn->is_in ? "in" : "out");
break;
case -EINPROGRESS:
/* More data needs to be processed */
ret = 0;
break;
case 0:
/* All requests in the queue are processed */
break;
default:
dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
epn->id, epn->is_in ? "in" : "out", ret);
break;
}
if (ret) {
dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
epn->is_in ? "in" : "out", ret);
usbf_ep_stall(epn, true);
usbf_ep_nuke(epn, ret);
}
}
static void usbf_epn_interrupt(struct usbf_ep *epn)
{
u32 sts;
u32 ena;
epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
epn->id, epn->is_in ? "in" : "out", epn->status, ena,
usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
if (epn->disabled) {
dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
epn->id, epn->is_in ? "in" : "out");
return;
}
sts = epn->status & ena;
if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
epn->id, epn->is_in ? "in" : "out");
usbf_epn_process_queue(epn);
}
if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
epn->id, epn->is_in ? "in" : "out");
usbf_epn_process_queue(epn);
}
dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
epn->id, epn->is_in ? "in" : "out",
sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
}
static void usbf_ep_reset(struct usbf_ep *ep)
{
ep->status = 0;
/* Remove anything that was pending */
usbf_ep_nuke(ep, -ESHUTDOWN);
}
static void usbf_reset(struct usbf_udc *udc)
{
int i;
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
if (udc->ep[i].disabled)
continue;
usbf_ep_reset(&udc->ep[i]);
}
if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
/* Remote wakeup feature must be disabled on USB bus reset */
udc->is_remote_wakeup = false;
/* Enable endpoint zero */
usbf_ep0_enable(&udc->ep[0]);
if (udc->driver) {
/* Signal the reset */
spin_unlock(&udc->lock);
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_lock(&udc->lock);
}
}
static void usbf_driver_suspend(struct usbf_udc *udc)
{
if (udc->is_usb_suspended) {
dev_dbg(udc->dev, "already suspended\n");
return;
}
dev_dbg(udc->dev, "do usb suspend\n");
udc->is_usb_suspended = true;
if (udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
/* The datasheet tells to set the USB_CONTROL register SUSPEND
* bit when the USB bus suspend is detected.
* This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
* these clocks seems not used only by the USB device. Some
* UARTs can be lost ...
* So, do not set the USB_CONTROL register SUSPEND bit.
*/
}
}
static void usbf_driver_resume(struct usbf_udc *udc)
{
if (!udc->is_usb_suspended)
return;
dev_dbg(udc->dev, "do usb resume\n");
udc->is_usb_suspended = false;
if (udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
static irqreturn_t usbf_epc_irq(int irq, void *_udc)
{
struct usbf_udc *udc = (struct usbf_udc *)_udc;
unsigned long flags;
struct usbf_ep *ep;
u32 int_sts;
u32 int_en;
int i;
spin_lock_irqsave(&udc->lock, flags);
int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
if (int_sts & USBF_USB_RSUM_INT) {
dev_dbg(udc->dev, "handle resume\n");
usbf_driver_resume(udc);
}
if (int_sts & USBF_USB_USB_RST_INT) {
dev_dbg(udc->dev, "handle bus reset\n");
usbf_driver_resume(udc);
usbf_reset(udc);
}
if (int_sts & USBF_USB_SPEED_MODE_INT) {
if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
dev_dbg(udc->dev, "handle speed change (%s)\n",
udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
}
if (int_sts & USBF_USB_EPN_INT(0)) {
usbf_driver_resume(udc);
usbf_ep0_interrupt(&udc->ep[0]);
}
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (int_sts & USBF_USB_EPN_INT(i)) {
usbf_driver_resume(udc);
usbf_epn_interrupt(ep);
}
}
if (int_sts & USBF_USB_SPND_INT) {
dev_dbg(udc->dev, "handle suspend\n");
usbf_driver_suspend(udc);
}
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
{
struct usbf_udc *udc = (struct usbf_udc *)_udc;
unsigned long flags;
struct usbf_ep *epn;
u32 sysbint;
void (*ep_action)(struct usbf_ep *epn);
int i;
spin_lock_irqsave(&udc->lock, flags);
/* Read and ack interrupts */
sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
dev_dbg(udc->dev, "handle vbus (1)\n");
spin_unlock(&udc->lock);
usb_udc_vbus_handler(&udc->gadget, true);
usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
spin_lock(&udc->lock);
} else {
dev_dbg(udc->dev, "handle vbus (0)\n");
udc->is_usb_suspended = false;
spin_unlock(&udc->lock);
usb_udc_vbus_handler(&udc->gadget, false);
usb_gadget_set_state(&udc->gadget,
USB_STATE_NOTATTACHED);
spin_lock(&udc->lock);
}
}
for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
epn = &udc->ep[i];
dev_dbg(epn->udc->dev,
"ep%u handle DMA complete. action=%ps\n",
epn->id, epn->bridge_on_dma_end);
ep_action = epn->bridge_on_dma_end;
if (ep_action) {
epn->bridge_on_dma_end = NULL;
ep_action(epn);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
static int usbf_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
udc->driver = driver;
/* Enable VBUS interrupt */
usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_stop(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* Disable VBUS interrupt */
usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
udc->driver = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
dev_info(udc->dev, "stopped\n");
return 0;
}
static int usbf_get_frame(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
}
static void usbf_attach(struct usbf_udc *udc)
{
/* Enable USB signal to Function PHY
* D+ signal Pull-up
* Disable endpoint 0, it will be automatically enable when a USB reset
* is received.
* Disable the other endpoints
*/
usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
USBF_USB_PUE2);
/* Enable reset and mode change interrupts */
usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
}
static void usbf_detach(struct usbf_udc *udc)
{
int i;
/* Disable interrupts */
usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
if (udc->ep[i].disabled)
continue;
usbf_ep_reset(&udc->ep[i]);
}
/* Disable USB signal to Function PHY
* Do not Pull-up D+ signal
* Disable endpoint 0
* Disable the other endpoints
*/
usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
USBF_USB_CONNECTB);
}
static int usbf_pullup(struct usb_gadget *gadget, int is_on)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
dev_dbg(udc->dev, "pullup %d\n", is_on);
spin_lock_irqsave(&udc->lock, flags);
if (is_on)
usbf_attach(udc);
else
usbf_detach(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
gadget->is_selfpowered = (is_selfpowered != 0);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usbf_udc_wakeup(struct usb_gadget *gadget)
{
struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
unsigned long flags;
int ret;
spin_lock_irqsave(&udc->lock, flags);
if (!udc->is_remote_wakeup) {
dev_dbg(udc->dev, "remote wakeup not allowed\n");
ret = -EINVAL;
goto end;
}
dev_dbg(udc->dev, "do wakeup\n");
/* Send the resume signal */
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
ret = 0;
end:
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static struct usb_gadget_ops usbf_gadget_ops = {
.get_frame = usbf_get_frame,
.pullup = usbf_pullup,
.udc_start = usbf_udc_start,
.udc_stop = usbf_udc_stop,
.set_selfpowered = usbf_udc_set_selfpowered,
.wakeup = usbf_udc_wakeup,
};
static int usbf_epn_check(struct usbf_ep *epn)
{
const char *type_txt;
const char *buf_txt;
int ret = 0;
u32 ctrl;
ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
switch (ctrl & USBF_EPN_MODE_MASK) {
case USBF_EPN_MODE_BULK:
type_txt = "bulk";
if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
!epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, bulk expected\n", epn->id);
ret = -EINVAL;
}
break;
case USBF_EPN_MODE_INTR:
type_txt = "intr";
if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, int expected\n", epn->id);
ret = -EINVAL;
}
break;
case USBF_EPN_MODE_ISO:
type_txt = "iso";
if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
dev_err(epn->udc->dev,
"ep%u caps mismatch, iso expected\n", epn->id);
ret = -EINVAL;
}
break;
default:
type_txt = "unknown";
dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
ret = -EINVAL;
break;
}
if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
buf_txt = "double";
if (!usbf_ep_info[epn->id].is_double) {
dev_err(epn->udc->dev,
"ep%u buffer mismatch, double expected\n",
epn->id);
ret = -EINVAL;
}
} else {
buf_txt = "single";
if (usbf_ep_info[epn->id].is_double) {
dev_err(epn->udc->dev,
"ep%u buffer mismatch, single expected\n",
epn->id);
ret = -EINVAL;
}
}
dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
epn->id, epn->ep.name, type_txt, buf_txt,
epn->ep.maxpacket_limit, ret ? "failed" : "ok");
return ret;
}
static int usbf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usbf_udc *udc;
struct usbf_ep *ep;
unsigned int i;
int irq;
int ret;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
platform_set_drvdata(pdev, udc);
udc->dev = dev;
spin_lock_init(&udc->lock);
udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
devm_pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
dev_info(dev, "USBF version: %08x\n",
usbf_reg_readl(udc, USBF_REG_USBSSVER));
/* Resetting the PLL is handled via the clock driver as it has common
* registers with USB Host
*/
usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
/* modify in register gadget process */
udc->gadget.speed = USB_SPEED_FULL;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.ops = &usbf_gadget_ops;
udc->gadget.name = dev->driver->name;
udc->gadget.dev.parent = dev;
udc->gadget.ep0 = &udc->ep[0].ep;
/* The hardware DMA controller needs dma addresses aligned on 32bit.
* A fallback to pio is done if DMA addresses are not aligned.
*/
udc->gadget.quirk_avoids_skb_reserve = 1;
INIT_LIST_HEAD(&udc->gadget.ep_list);
/* we have a canned request structure to allow sending packets as reply
* to get_status requests
*/
INIT_LIST_HEAD(&udc->setup_reply.queue);
for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
ep = &udc->ep[i];
if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
USBF_SYS_EP_AVAILABLE(i))) {
continue;
}
INIT_LIST_HEAD(&ep->queue);
ep->id = i;
ep->disabled = 1;
ep->udc = udc;
ep->ep.ops = &usbf_ep_ops;
ep->ep.name = usbf_ep_info[i].name;
ep->ep.caps = usbf_ep_info[i].caps;
usb_ep_set_maxpacket_limit(&ep->ep,
usbf_ep_info[i].maxpacket_limit);
if (ep->id == 0) {
ep->regs = ep->udc->regs + USBF_BASE_EP0;
} else {
ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
ret = usbf_epn_check(ep);
if (ret)
return ret;
if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
USBF_SYS_DMA_AVAILABLE(i)) {
ep->dma_regs = ep->udc->regs +
USBF_BASE_DMA_EPN(ep->id - 1);
}
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
if (ret) {
dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
return ret;
}
irq = platform_get_irq(pdev, 1);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
if (ret) {
dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
return ret;
}
usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
ret = usb_add_gadget_udc(dev, &udc->gadget);
if (ret)
return ret;
return 0;
}
static void usbf_remove(struct platform_device *pdev)
{
struct usbf_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
pm_runtime_put(&pdev->dev);
}
static const struct of_device_id usbf_match[] = {
{ .compatible = "renesas,rzn1-usbf" },
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, usbf_match);
static struct platform_driver udc_driver = {
.driver = {
.name = "usbf_renesas",
.of_match_table = usbf_match,
},
.probe = usbf_probe,
.remove = usbf_remove,
};
module_platform_driver(udc_driver);
MODULE_AUTHOR("Herve Codina <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
MODULE_LICENSE("GPL");
|
/*
* Copyright © 2016-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Portions of this file (derived from panel-simple.c) are:
*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Raspberry Pi 7" touchscreen panel driver.
*
* The 7" touchscreen consists of a DPI LCD panel, a Toshiba
* TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
* controlling power management, the LCD PWM, and initial register
* setup of the Tohsiba.
*
* This driver controls the TC358762 and ATTINY88, presenting a DSI
* device with a drm_panel.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
/* I2C registers of the Atmel microcontroller. */
enum REG_ADDR {
REG_ID = 0x80,
REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
REG_PORTB,
REG_PORTC,
REG_PORTD,
REG_POWERON,
REG_PWM,
REG_DDRA,
REG_DDRB,
REG_DDRC,
REG_DDRD,
REG_TEST,
REG_WR_ADDRL,
REG_WR_ADDRH,
REG_READH,
REG_READL,
REG_WRITEH,
REG_WRITEL,
REG_ID2,
};
/* DSI D-PHY Layer Registers */
#define D0W_DPHYCONTTX 0x0004
#define CLW_DPHYCONTRX 0x0020
#define D0W_DPHYCONTRX 0x0024
#define D1W_DPHYCONTRX 0x0028
#define COM_DPHYCONTRX 0x0038
#define CLW_CNTRL 0x0040
#define D0W_CNTRL 0x0044
#define D1W_CNTRL 0x0048
#define DFTMODE_CNTRL 0x0054
/* DSI PPI Layer Registers */
#define PPI_STARTPPI 0x0104
#define PPI_BUSYPPI 0x0108
#define PPI_LINEINITCNT 0x0110
#define PPI_LPTXTIMECNT 0x0114
#define PPI_CLS_ATMR 0x0140
#define PPI_D0S_ATMR 0x0144
#define PPI_D1S_ATMR 0x0148
#define PPI_D0S_CLRSIPOCOUNT 0x0164
#define PPI_D1S_CLRSIPOCOUNT 0x0168
#define CLS_PRE 0x0180
#define D0S_PRE 0x0184
#define D1S_PRE 0x0188
#define CLS_PREP 0x01A0
#define D0S_PREP 0x01A4
#define D1S_PREP 0x01A8
#define CLS_ZERO 0x01C0
#define D0S_ZERO 0x01C4
#define D1S_ZERO 0x01C8
#define PPI_CLRFLG 0x01E0
#define PPI_CLRSIPO 0x01E4
#define HSTIMEOUT 0x01F0
#define HSTIMEOUTENABLE 0x01F4
/* DSI Protocol Layer Registers */
#define DSI_STARTDSI 0x0204
#define DSI_BUSYDSI 0x0208
#define DSI_LANEENABLE 0x0210
# define DSI_LANEENABLE_CLOCK BIT(0)
# define DSI_LANEENABLE_D0 BIT(1)
# define DSI_LANEENABLE_D1 BIT(2)
#define DSI_LANESTATUS0 0x0214
#define DSI_LANESTATUS1 0x0218
#define DSI_INTSTATUS 0x0220
#define DSI_INTMASK 0x0224
#define DSI_INTCLR 0x0228
#define DSI_LPTXTO 0x0230
#define DSI_MODE 0x0260
#define DSI_PAYLOAD0 0x0268
#define DSI_PAYLOAD1 0x026C
#define DSI_SHORTPKTDAT 0x0270
#define DSI_SHORTPKTREQ 0x0274
#define DSI_BTASTA 0x0278
#define DSI_BTACLR 0x027C
/* DSI General Registers */
#define DSIERRCNT 0x0300
#define DSISIGMOD 0x0304
/* DSI Application Layer Registers */
#define APLCTRL 0x0400
#define APLSTAT 0x0404
#define APLERR 0x0408
#define PWRMOD 0x040C
#define RDPKTLN 0x0410
#define PXLFMT 0x0414
#define MEMWRCMD 0x0418
/* LCDC/DPI Host Registers */
#define LCDCTRL 0x0420
#define HSR 0x0424
#define HDISPR 0x0428
#define VSR 0x042C
#define VDISPR 0x0430
#define VFUEN 0x0434
/* DBI-B Host Registers */
#define DBIBCTRL 0x0440
/* SPI Master Registers */
#define SPICMR 0x0450
#define SPITCR 0x0454
/* System Controller Registers */
#define SYSSTAT 0x0460
#define SYSCTRL 0x0464
#define SYSPLL1 0x0468
#define SYSPLL2 0x046C
#define SYSPLL3 0x0470
#define SYSPMCTRL 0x047C
/* GPIO Registers */
#define GPIOC 0x0480
#define GPIOO 0x0484
#define GPIOI 0x0488
/* I2C Registers */
#define I2CCLKCTRL 0x0490
/* Chip/Rev Registers */
#define IDREG 0x04A0
/* Debug Registers */
#define WCMDQUEUE 0x0500
#define RCMDQUEUE 0x0504
struct rpi_touchscreen {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct i2c_client *i2c;
};
static const struct drm_display_mode rpi_touchscreen_modes[] = {
{
/* Modeline comes from the Raspberry Pi firmware, with HFP=1
* plugged in and clock re-computed from that.
*/
.clock = 25979400 / 1000,
.hdisplay = 800,
.hsync_start = 800 + 1,
.hsync_end = 800 + 1 + 2,
.htotal = 800 + 1 + 2 + 46,
.vdisplay = 480,
.vsync_start = 480 + 7,
.vsync_end = 480 + 7 + 2,
.vtotal = 480 + 7 + 2 + 21,
},
};
static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
{
return container_of(panel, struct rpi_touchscreen, base);
}
static int rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
{
return i2c_smbus_read_byte_data(ts->i2c, reg);
}
static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
u8 reg, u8 val)
{
int ret;
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
{
u8 msg[] = {
reg,
reg >> 8,
val,
val >> 8,
val >> 16,
val >> 24,
};
mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg));
return 0;
}
static int rpi_touchscreen_disable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
udelay(1);
return 0;
}
static int rpi_touchscreen_noop(struct drm_panel *panel)
{
return 0;
}
static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
/* Wait for nPWRDWN to go low to indicate poweron is done. */
for (i = 0; i < 100; i++) {
if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
break;
}
rpi_touchscreen_write(ts, DSI_LANEENABLE,
DSI_LANEENABLE_CLOCK |
DSI_LANEENABLE_D0);
rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
rpi_touchscreen_write(ts, SPICMR, 0x00);
rpi_touchscreen_write(ts, LCDCTRL, 0x00100150);
rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
msleep(100);
rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
/* Default to the same orientation as the closed source
* firmware used for the panel. Runtime rotation
* configuration will be supported using VC4's plane
* orientation bits.
*/
rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
return 0;
}
static int rpi_touchscreen_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
unsigned int i, num = 0;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
m->hdisplay, m->vdisplay,
drm_mode_vrefresh(m));
continue;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
if (i == 0)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
num++;
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = 154;
connector->display_info.height_mm = 86;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
return num;
}
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
static int rpi_touchscreen_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct rpi_touchscreen *ts;
struct device_node *endpoint, *dsi_host_node;
struct mipi_dsi_host *host;
int ver;
struct mipi_dsi_device_info info = {
.type = RPI_DSI_DRIVER_NAME,
.channel = 0,
.node = NULL,
};
ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
i2c_set_clientdata(i2c, ts);
ts->i2c = i2c;
ver = rpi_touchscreen_i2c_read(ts, REG_ID);
if (ver < 0) {
dev_err(dev, "Atmel I2C read failed: %d\n", ver);
return -ENODEV;
}
switch (ver) {
case 0xde: /* ver 1 */
case 0xc3: /* ver 2 */
break;
default:
dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
return -ENODEV;
}
/* Turn off at boot, so we can cleanly sequence powering on. */
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
/* Look up the DSI host. It needs to probe before we do. */
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
return -ENODEV;
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
if (!dsi_host_node)
goto error;
host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (!host) {
of_node_put(endpoint);
return -EPROBE_DEFER;
}
info.node = of_graph_get_remote_port(endpoint);
if (!info.node)
goto error;
of_node_put(endpoint);
ts->dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(ts->dsi)) {
dev_err(dev, "DSI device registration failed: %ld\n",
PTR_ERR(ts->dsi));
return PTR_ERR(ts->dsi);
}
drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
drm_panel_add(&ts->base);
return 0;
error:
of_node_put(endpoint);
return -ENODEV;
}
static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
mipi_dsi_detach(ts->dsi);
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
{
int ret;
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM);
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 1;
ret = mipi_dsi_attach(dsi);
if (ret)
dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
return ret;
}
static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
.driver.name = RPI_DSI_DRIVER_NAME,
.probe = rpi_touchscreen_dsi_probe,
};
static const struct of_device_id rpi_touchscreen_of_ids[] = {
{ .compatible = "raspberrypi,7inch-touchscreen-panel" },
{ } /* sentinel */
};
MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
static struct i2c_driver rpi_touchscreen_driver = {
.driver = {
.name = "rpi_touchscreen",
.of_match_table = rpi_touchscreen_of_ids,
},
.probe = rpi_touchscreen_probe,
.remove = rpi_touchscreen_remove,
};
static int __init rpi_touchscreen_init(void)
{
mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
return i2c_add_driver(&rpi_touchscreen_driver);
}
module_init(rpi_touchscreen_init);
static void __exit rpi_touchscreen_exit(void)
{
i2c_del_driver(&rpi_touchscreen_driver);
mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
}
module_exit(rpi_touchscreen_exit);
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define ATTR __always_inline
#include "test_jhash.h"
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int nh_off, i = 0;
nh_off = 14;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
char _license[] SEC("license") = "GPL";
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HDAC_HDMI_H__
#define __HDAC_HDMI_H__
int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
struct snd_soc_jack *jack);
int hdac_hdmi_jack_port_init(struct snd_soc_component *component,
struct snd_soc_dapm_context *dapm);
#endif /* __HDAC_HDMI_H__ */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*/
#ifndef _MMC_SD_OPS_H
#define _MMC_SD_OPS_H
#include <linux/types.h>
struct mmc_card;
struct mmc_host;
struct mmc_request;
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
int mmc_send_if_cond_pcie(struct mmc_host *host, u32 ocr);
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
int mmc_app_send_scr(struct mmc_card *card);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
int mmc_send_ext_addr(struct mmc_host *host, u32 addr);
void mmc_uhs2_prepare_cmd(struct mmc_host *host, struct mmc_request *mrq);
#endif
|
/*
* include/asm-xtensa/cacheasm.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Tensilica Inc.
*/
#include <asm/cache.h>
#include <asm/asmmacro.h>
#include <linux/stringify.h>
/*
* Define cache functions as macros here so that they can be used
* by the kernel and boot loader. We should consider moving them to a
* library that can be linked by both.
*
* Locking
*
* ___unlock_dcache_all
* ___unlock_icache_all
*
* Flush and invaldating
*
* ___flush_invalidate_dcache_{all|range|page}
* ___flush_dcache_{all|range|page}
* ___invalidate_dcache_{all|range|page}
* ___invalidate_icache_{all|range|page}
*
*/
.macro __loop_cache_unroll ar at insn size line_width max_immed
.if (1 << (\line_width)) > (\max_immed)
.set _reps, 1
.elseif (2 << (\line_width)) > (\max_immed)
.set _reps, 2
.else
.set _reps, 4
.endif
__loopi \ar, \at, \size, (_reps << (\line_width))
.set _index, 0
.rep _reps
\insn \ar, _index << (\line_width)
.set _index, _index + 1
.endr
__endla \ar, \at, _reps << (\line_width)
.endm
.macro __loop_cache_all ar at insn size line_width max_immed
movi \ar, 0
__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
.endm
.macro __loop_cache_range ar as at insn line_width
extui \at, \ar, 0, \line_width
add \as, \as, \at
__loops \ar, \as, \at, \line_width
\insn \ar, 0
__endla \ar, \at, (1 << (\line_width))
.endm
.macro __loop_cache_page ar at insn line_width max_immed
__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
.endm
.macro ___unlock_dcache_all ar at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
.macro ___unlock_icache_all ar at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 240
#endif
.endm
.macro ___flush_invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
.macro ___flush_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
.macro ___invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
.macro ___invalidate_icache_all ar at
#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
.macro ___flush_invalidate_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___flush_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_dcache_range ar as at
#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
#endif
.endm
.macro ___invalidate_icache_range ar as at
#if XCHAL_ICACHE_SIZE
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
#endif
.endm
.macro ___flush_invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
.macro ___flush_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
.macro ___invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
.macro ___invalidate_icache_page ar as
#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
*
* Copyright (C) 2012 Sylwester Nawrocki <[email protected]>
* Copyright (C) 2012 Tomasz Figa <[email protected]>
*/
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "camif-core.h"
static char *camif_clocks[CLK_MAX_NUM] = {
/* HCLK CAMIF clock */
[CLK_GATE] = "camif",
/* CAMIF / external camera sensor master clock */
[CLK_CAM] = "camera",
};
static const struct camif_fmt camif_formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16,
.ybpp = 1,
.color = IMG_FMT_YCBCR422P,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.ybpp = 1,
.color = IMG_FMT_YCBCR420,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.depth = 12,
.ybpp = 1,
.color = IMG_FMT_YCRCB420,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.ybpp = 2,
.color = IMG_FMT_RGB565,
.colplanes = 1,
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.ybpp = 4,
.color = IMG_FMT_XRGB8888,
.colplanes = 1,
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = 32,
.ybpp = 4,
.color = IMG_FMT_RGB666,
.colplanes = 1,
.flags = FMT_FL_S3C64XX,
}
};
/**
* s3c_camif_find_format() - lookup camif color format by fourcc or an index
* @vp: video path (DMA) description (codec/preview)
* @pixelformat: fourcc to match, ignored if null
* @index: index to the camif_formats array, ignored if negative
*/
const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
const u32 *pixelformat,
int index)
{
const struct camif_fmt *fmt, *def_fmt = NULL;
unsigned int i;
int id = 0;
if (index >= (int)ARRAY_SIZE(camif_formats))
return NULL;
for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
fmt = &camif_formats[i];
if (vp && !(vp->fmt_flags & fmt->flags))
continue;
if (pixelformat && fmt->fourcc == *pixelformat)
return fmt;
if (index == id)
def_fmt = fmt;
id++;
}
return def_fmt;
}
static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
{
unsigned int sh = 6;
if (src >= 64 * tar)
return -EINVAL;
while (sh--) {
unsigned int tmp = 1 << sh;
if (src >= tar * tmp) {
*shift = sh;
*ratio = tmp;
return 0;
}
}
*shift = 0;
*ratio = 1;
return 0;
}
int s3c_camif_get_scaler_config(struct camif_vp *vp,
struct camif_scaler *scaler)
{
struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
int source_x = camif_crop->width;
int source_y = camif_crop->height;
int target_x = vp->out_frame.rect.width;
int target_y = vp->out_frame.rect.height;
int ret;
if (vp->rotation == 90 || vp->rotation == 270)
swap(target_x, target_y);
ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
&scaler->h_shift);
if (ret < 0)
return ret;
ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
&scaler->v_shift);
if (ret < 0)
return ret;
scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
scaler->scaleup_h = (target_x >= source_x);
scaler->scaleup_v = (target_y >= source_y);
scaler->copy = 0;
pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
scaler->pre_h_ratio, scaler->h_shift,
scaler->pre_v_ratio, scaler->v_shift);
pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
source_x, source_y, target_x, target_y,
scaler->scaleup_h, scaler->scaleup_v);
return 0;
}
static int camif_register_sensor(struct camif_dev *camif)
{
struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
struct i2c_adapter *adapter;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *sd;
int ret;
camif->sensor.sd = NULL;
if (sensor->i2c_board_info.addr == 0)
return -EINVAL;
adapter = i2c_get_adapter(sensor->i2c_bus_num);
if (adapter == NULL) {
v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
sensor->i2c_bus_num);
return -EPROBE_DEFER;
}
sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
&sensor->i2c_board_info, NULL);
if (sd == NULL) {
i2c_put_adapter(adapter);
v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
sensor->i2c_board_info.type);
return -EPROBE_DEFER;
}
camif->sensor.sd = sd;
v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
/* Get initial pixel format and set it at the camif sink pad */
format.pad = 0;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
if (ret < 0)
return 0;
format.pad = CAMIF_SD_PAD_SINK;
v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
format.format.width, format.format.height,
format.format.code);
return 0;
}
static void camif_unregister_sensor(struct camif_dev *camif)
{
struct v4l2_subdev *sd = camif->sensor.sd;
struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
struct i2c_adapter *adapter;
if (client == NULL)
return;
adapter = client->adapter;
v4l2_device_unregister_subdev(sd);
camif->sensor.sd = NULL;
i2c_unregister_device(client);
i2c_put_adapter(adapter);
}
static int camif_create_media_links(struct camif_dev *camif)
{
int i, ret;
ret = media_create_pad_link(&camif->sensor.sd->entity, 0,
&camif->subdev.entity, CAMIF_SD_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
ret = media_create_pad_link(&camif->subdev.entity, i,
&camif->vp[i - 1].vdev.entity, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
}
return ret;
}
static int camif_register_video_nodes(struct camif_dev *camif)
{
int ret = s3c_camif_register_video_node(camif, VP_CODEC);
if (ret < 0)
return ret;
return s3c_camif_register_video_node(camif, VP_PREVIEW);
}
static void camif_unregister_video_nodes(struct camif_dev *camif)
{
s3c_camif_unregister_video_node(camif, VP_CODEC);
s3c_camif_unregister_video_node(camif, VP_PREVIEW);
}
static void camif_unregister_media_entities(struct camif_dev *camif)
{
camif_unregister_video_nodes(camif);
camif_unregister_sensor(camif);
}
/*
* Media device
*/
static int camif_media_dev_init(struct camif_dev *camif)
{
struct media_device *md = &camif->media_dev;
struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
unsigned int ip_rev = camif->variant->ip_revision;
int ret;
memset(md, 0, sizeof(*md));
snprintf(md->model, sizeof(md->model), "Samsung S3C%s CAMIF",
ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
strscpy(md->bus_info, "platform", sizeof(md->bus_info));
md->hw_revision = ip_rev;
md->dev = camif->dev;
strscpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
v4l2_dev->mdev = md;
media_device_init(md);
ret = v4l2_device_register(camif->dev, v4l2_dev);
if (ret < 0)
return ret;
return ret;
}
static void camif_clk_put(struct camif_dev *camif)
{
int i;
for (i = 0; i < CLK_MAX_NUM; i++) {
if (IS_ERR(camif->clock[i]))
continue;
clk_unprepare(camif->clock[i]);
clk_put(camif->clock[i]);
camif->clock[i] = ERR_PTR(-EINVAL);
}
}
static int camif_clk_get(struct camif_dev *camif)
{
int ret, i;
for (i = 1; i < CLK_MAX_NUM; i++)
camif->clock[i] = ERR_PTR(-EINVAL);
for (i = 0; i < CLK_MAX_NUM; i++) {
camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
if (IS_ERR(camif->clock[i])) {
ret = PTR_ERR(camif->clock[i]);
goto err;
}
ret = clk_prepare(camif->clock[i]);
if (ret < 0) {
clk_put(camif->clock[i]);
camif->clock[i] = NULL;
goto err;
}
}
return 0;
err:
camif_clk_put(camif);
dev_err(camif->dev, "failed to get clock: %s\n",
camif_clocks[i]);
return ret;
}
/*
* The CAMIF device has two relatively independent data processing paths
* that can source data from memory or the common camera input frontend.
* Register interrupts for each data processing path (camif_vp).
*/
static int camif_request_irqs(struct platform_device *pdev,
struct camif_dev *camif)
{
int irq, ret, i;
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct camif_vp *vp = &camif->vp[i];
init_waitqueue_head(&vp->irq_queue);
irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
0, dev_name(&pdev->dev), vp);
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
break;
}
}
return ret;
}
static int s3c_camif_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c_camif_plat_data *pdata = dev->platform_data;
struct s3c_camif_drvdata *drvdata;
struct camif_dev *camif;
int ret = 0;
camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
if (!camif)
return -ENOMEM;
spin_lock_init(&camif->slock);
mutex_init(&camif->lock);
camif->dev = dev;
if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
dev_err(dev, "wrong platform data\n");
return -EINVAL;
}
camif->pdata = *pdata;
drvdata = (void *)platform_get_device_id(pdev)->driver_data;
camif->variant = drvdata->variant;
camif->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(camif->io_base))
return PTR_ERR(camif->io_base);
ret = camif_request_irqs(pdev, camif);
if (ret < 0)
return ret;
ret = pdata->gpio_get();
if (ret < 0)
return ret;
ret = s3c_camif_create_subdev(camif);
if (ret < 0)
goto err_sd;
ret = camif_clk_get(camif);
if (ret < 0)
goto err_clk;
platform_set_drvdata(pdev, camif);
clk_set_rate(camif->clock[CLK_CAM],
camif->pdata.sensor.clock_frequency);
dev_info(dev, "sensor clock frequency: %lu\n",
clk_get_rate(camif->clock[CLK_CAM]));
/*
* Set initial pixel format, resolution and crop rectangle.
* Must be done before a sensor subdev is registered as some
* settings are overrode with values from sensor subdev.
*/
s3c_camif_set_defaults(camif);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto err_disable;
ret = camif_media_dev_init(camif);
if (ret < 0)
goto err_pm;
ret = camif_register_sensor(camif);
if (ret < 0)
goto err_sens;
ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
if (ret < 0)
goto err_sens;
ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
if (ret < 0)
goto err_sens;
ret = camif_register_video_nodes(camif);
if (ret < 0)
goto err_sens;
ret = camif_create_media_links(camif);
if (ret < 0)
goto err_sens;
ret = media_device_register(&camif->media_dev);
if (ret < 0)
goto err_sens;
pm_runtime_put(dev);
return 0;
err_sens:
v4l2_device_unregister(&camif->v4l2_dev);
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
err_pm:
pm_runtime_put(dev);
err_disable:
pm_runtime_disable(dev);
camif_clk_put(camif);
err_clk:
s3c_camif_unregister_subdev(camif);
err_sd:
pdata->gpio_put();
return ret;
}
static void s3c_camif_remove(struct platform_device *pdev)
{
struct camif_dev *camif = platform_get_drvdata(pdev);
struct s3c_camif_plat_data *pdata = &camif->pdata;
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
v4l2_device_unregister(&camif->v4l2_dev);
pm_runtime_disable(&pdev->dev);
camif_clk_put(camif);
s3c_camif_unregister_subdev(camif);
pdata->gpio_put();
}
static int s3c_camif_runtime_resume(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
clk_enable(camif->clock[CLK_GATE]);
/* null op on s3c244x */
clk_enable(camif->clock[CLK_CAM]);
return 0;
}
static int s3c_camif_runtime_suspend(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
/* null op on s3c244x */
clk_disable(camif->clock[CLK_CAM]);
clk_disable(camif->clock[CLK_GATE]);
return 0;
}
static const struct s3c_camif_variant s3c244x_camif_variant = {
.vp_pix_limits = {
[VP_CODEC] = {
.max_out_width = 4096,
.max_sc_out_width = 2048,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
},
[VP_PREVIEW] = {
.max_out_width = 640,
.max_sc_out_width = 640,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 480,
}
},
.pix_limits = {
.win_hor_offset_align = 8,
},
.ip_revision = S3C244X_CAMIF_IP_REV,
};
static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
.variant = &s3c244x_camif_variant,
.bus_clk_freq = 24000000UL,
};
static const struct s3c_camif_variant s3c6410_camif_variant = {
.vp_pix_limits = {
[VP_CODEC] = {
.max_out_width = 4096,
.max_sc_out_width = 2048,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
},
[VP_PREVIEW] = {
.max_out_width = 4096,
.max_sc_out_width = 720,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
}
},
.pix_limits = {
.win_hor_offset_align = 8,
},
.ip_revision = S3C6410_CAMIF_IP_REV,
.has_img_effect = 1,
.vp_offset = 0x20,
};
static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
.variant = &s3c6410_camif_variant,
.bus_clk_freq = 133000000UL,
};
static const struct platform_device_id s3c_camif_driver_ids[] = {
{
.name = "s3c2440-camif",
.driver_data = (unsigned long)&s3c244x_camif_drvdata,
}, {
.name = "s3c6410-camif",
.driver_data = (unsigned long)&s3c6410_camif_drvdata,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
static const struct dev_pm_ops s3c_camif_pm_ops = {
.runtime_suspend = s3c_camif_runtime_suspend,
.runtime_resume = s3c_camif_runtime_resume,
};
static struct platform_driver s3c_camif_driver = {
.probe = s3c_camif_probe,
.remove = s3c_camif_remove,
.id_table = s3c_camif_driver_ids,
.driver = {
.name = S3C_CAMIF_DRIVER_NAME,
.pm = &s3c_camif_pm_ops,
}
};
module_platform_driver(s3c_camif_driver);
MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
MODULE_AUTHOR("Tomasz Figa <[email protected]>");
MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ff-hwdep.c - a part of driver for RME Fireface series
*
* Copyright (c) 2015-2017 Takashi Sakamoto
*/
/*
* This codes give three functionality.
*
* 1.get firewire node information
* 2.get notification about starting/stopping stream
* 3.lock/unlock stream
*/
#include "ff.h"
static bool has_msg(struct snd_ff *ff)
{
if (ff->spec->protocol->has_msg)
return ff->spec->protocol->has_msg(ff);
else
return 0;
}
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
loff_t *offset)
{
struct snd_ff *ff = hwdep->private_data;
DEFINE_WAIT(wait);
spin_lock_irq(&ff->lock);
while (!ff->dev_lock_changed && !has_msg(ff)) {
prepare_to_wait(&ff->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&ff->lock);
schedule();
finish_wait(&ff->hwdep_wait, &wait);
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ff->lock);
}
if (ff->dev_lock_changed && count >= sizeof(struct snd_firewire_event_lock_status)) {
struct snd_firewire_event_lock_status ev = {
.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
.status = (ff->dev_lock_count > 0),
};
ff->dev_lock_changed = false;
spin_unlock_irq(&ff->lock);
if (copy_to_user(buf, &ev, sizeof(ev)))
return -EFAULT;
count = sizeof(ev);
} else if (has_msg(ff)) {
// NOTE: Acquired spin lock should be released before accessing to user space in the
// callback since the access can cause page fault.
count = ff->spec->protocol->copy_msg_to_user(ff, buf, count);
spin_unlock_irq(&ff->lock);
} else {
spin_unlock_irq(&ff->lock);
count = 0;
}
return count;
}
static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_ff *ff = hwdep->private_data;
__poll_t events;
poll_wait(file, &ff->hwdep_wait, wait);
spin_lock_irq(&ff->lock);
if (ff->dev_lock_changed || has_msg(ff))
events = EPOLLIN | EPOLLRDNORM;
else
events = 0;
spin_unlock_irq(&ff->lock);
return events;
}
static int hwdep_get_info(struct snd_ff *ff, void __user *arg)
{
struct fw_device *dev = fw_parent_device(ff->unit);
struct snd_firewire_get_info info;
memset(&info, 0, sizeof(info));
info.type = SNDRV_FIREWIRE_TYPE_FIREFACE;
info.card = dev->card->index;
*(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
*(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
strscpy(info.device_name, dev_name(&dev->device),
sizeof(info.device_name));
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int hwdep_lock(struct snd_ff *ff)
{
int err;
spin_lock_irq(&ff->lock);
if (ff->dev_lock_count == 0) {
ff->dev_lock_count = -1;
err = 0;
} else {
err = -EBUSY;
}
spin_unlock_irq(&ff->lock);
return err;
}
static int hwdep_unlock(struct snd_ff *ff)
{
int err;
spin_lock_irq(&ff->lock);
if (ff->dev_lock_count == -1) {
ff->dev_lock_count = 0;
err = 0;
} else {
err = -EBADFD;
}
spin_unlock_irq(&ff->lock);
return err;
}
static int hwdep_release(struct snd_hwdep *hwdep, struct file *file)
{
struct snd_ff *ff = hwdep->private_data;
spin_lock_irq(&ff->lock);
if (ff->dev_lock_count == -1)
ff->dev_lock_count = 0;
spin_unlock_irq(&ff->lock);
return 0;
}
static int hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct snd_ff *ff = hwdep->private_data;
switch (cmd) {
case SNDRV_FIREWIRE_IOCTL_GET_INFO:
return hwdep_get_info(ff, (void __user *)arg);
case SNDRV_FIREWIRE_IOCTL_LOCK:
return hwdep_lock(ff);
case SNDRV_FIREWIRE_IOCTL_UNLOCK:
return hwdep_unlock(ff);
default:
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMPAT
static int hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
unsigned int cmd, unsigned long arg)
{
return hwdep_ioctl(hwdep, file, cmd,
(unsigned long)compat_ptr(arg));
}
#else
#define hwdep_compat_ioctl NULL
#endif
int snd_ff_create_hwdep_devices(struct snd_ff *ff)
{
static const struct snd_hwdep_ops hwdep_ops = {
.read = hwdep_read,
.release = hwdep_release,
.poll = hwdep_poll,
.ioctl = hwdep_ioctl,
.ioctl_compat = hwdep_compat_ioctl,
};
struct snd_hwdep *hwdep;
int err;
err = snd_hwdep_new(ff->card, ff->card->driver, 0, &hwdep);
if (err < 0)
return err;
strcpy(hwdep->name, ff->card->driver);
hwdep->iface = SNDRV_HWDEP_IFACE_FW_FIREFACE;
hwdep->ops = hwdep_ops;
hwdep->private_data = ff;
hwdep->exclusive = true;
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
//
// Spreadtrum divider clock driver
//
// Copyright (C) 2017 Spreadtrum, Inc.
// Author: Chunyan Zhang <[email protected]>
#include <linux/clk-provider.h>
#include "div.h"
static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL,
cd->div.width, 0);
}
unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common,
const struct sprd_div_internal *div,
unsigned long parent_rate)
{
unsigned long val;
unsigned int reg;
regmap_read(common->regmap, common->reg + div->offset, ®);
val = reg >> div->shift;
val &= (1 << div->width) - 1;
return divider_recalc_rate(&common->hw, parent_rate, val, NULL, 0,
div->width);
}
EXPORT_SYMBOL_GPL(sprd_div_helper_recalc_rate);
static unsigned long sprd_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
return sprd_div_helper_recalc_rate(&cd->common, &cd->div, parent_rate);
}
int sprd_div_helper_set_rate(const struct sprd_clk_common *common,
const struct sprd_div_internal *div,
unsigned long rate,
unsigned long parent_rate)
{
unsigned long val;
unsigned int reg;
val = divider_get_val(rate, parent_rate, NULL,
div->width, 0);
regmap_read(common->regmap, common->reg + div->offset, ®);
reg &= ~GENMASK(div->width + div->shift - 1, div->shift);
regmap_write(common->regmap, common->reg + div->offset,
reg | (val << div->shift));
return 0;
}
EXPORT_SYMBOL_GPL(sprd_div_helper_set_rate);
static int sprd_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
return sprd_div_helper_set_rate(&cd->common, &cd->div,
rate, parent_rate);
}
const struct clk_ops sprd_div_ops = {
.recalc_rate = sprd_div_recalc_rate,
.round_rate = sprd_div_round_rate,
.set_rate = sprd_div_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_div_ops);
|
// SPDX-License-Identifier: GPL-2.0
/*
* Based on arch/arm64/kernel/ftrace.c
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <asm/inst.h>
#include <asm/module.h>
static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
{
u32 replaced;
if (validate) {
if (larch_insn_read((void *)pc, &replaced))
return -EFAULT;
if (replaced != old)
return -EINVAL;
}
if (larch_insn_patch_text((void *)pc, new))
return -EPERM;
return 0;
}
#ifdef CONFIG_MODULES
static bool reachable_by_bl(unsigned long addr, unsigned long pc)
{
long offset = (long)addr - (long)pc;
return offset >= -SZ_128M && offset < SZ_128M;
}
static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
{
struct plt_entry *plt = mod->arch.ftrace_trampolines;
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
if (addr == FTRACE_REGS_ADDR &&
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
return NULL;
}
/*
* Find the address the callsite must branch to in order to reach '*addr'.
*
* Due to the limited range of 'bl' instruction, modules may be placed too far
* away to branch directly and we must use a PLT.
*
* Returns true when '*addr' contains a reachable target address, or has been
* modified to contain a PLT address. Returns false otherwise.
*/
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
{
unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
struct plt_entry *plt;
/*
* If a custom trampoline is unreachable, rely on the ftrace_regs_caller
* trampoline which knows how to indirectly reach that trampoline through
* ops->direct_call.
*/
if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
*addr = FTRACE_REGS_ADDR;
/*
* When the target is within range of the 'bl' instruction, use 'addr'
* as-is and branch to that directly.
*/
if (reachable_by_bl(*addr, pc))
return true;
/*
* 'mod' is only set at module load time, but if we end up
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*
* NOTE: __module_text_address() must be called with preemption
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code.
*/
if (!mod) {
preempt_disable();
mod = __module_text_address(pc);
preempt_enable();
}
if (WARN_ON(!mod))
return false;
plt = get_ftrace_plt(mod, *addr);
if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return false;
}
*addr = (unsigned long)plt;
return true;
}
#else /* !CONFIG_MODULES */
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
{
return true;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL;
new = larch_insn_gen_bl(pc, addr);
old = larch_insn_gen_bl(pc, old_addr);
return ftrace_modify_code(pc, old, new, true);
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
int ftrace_update_ftrace_func(ftrace_func_t func)
{
u32 new;
unsigned long pc;
pc = (unsigned long)&ftrace_call;
new = larch_insn_gen_bl(pc, (unsigned long)func);
return ftrace_modify_code(pc, 0, new, false);
}
/*
* The compiler has inserted 2 NOPs before the regular function prologue.
* T series registers are available and safe because of LoongArch's psABI.
*
* At runtime, we can replace nop with bl to enable ftrace call and replace bl
* with nop to disable ftrace call. The bl requires us to save the original RA
* value, so it saves RA at t0 here.
*
* Details are:
*
* | Compiled | Disabled | Enabled |
* +------------+------------------------+------------------------+
* | nop | move t0, ra | move t0, ra |
* | nop | nop | bl ftrace_caller |
* | func_body | func_body | func_body |
*
* The RA value will be recovered by ftrace_regs_entry, and restored into RA
* before returning to the regular function prologue. When a function is not
* being traced, the "move t0, ra" is not harmful.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
u32 old, new;
unsigned long pc;
pc = rec->ip;
old = larch_insn_gen_nop();
new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
old = larch_insn_gen_nop();
new = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
u32 old, new;
unsigned long pc;
pc = rec->ip + LOONGARCH_INSN_SIZE;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
new = larch_insn_gen_nop();
old = larch_insn_gen_bl(pc, addr);
return ftrace_modify_code(pc, old, new, true);
}
void arch_ftrace_update_code(int command)
{
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
{
unsigned long old;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, parent))
*parent = return_hooker;
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
unsigned long *parent = (unsigned long *)®s->regs[1];
prepare_ftrace_return(ip, (unsigned long *)parent);
}
#else
static int ftrace_modify_graph_caller(bool enable)
{
u32 branch, nop;
unsigned long pc, func;
extern void ftrace_graph_call(void);
pc = (unsigned long)&ftrace_graph_call;
func = (unsigned long)&ftrace_graph_caller;
nop = larch_insn_gen_nop();
branch = larch_insn_gen_b(pc, func);
if (enable)
return ftrace_modify_code(pc, nop, branch, true);
else
return ftrace_modify_code(pc, branch, nop, true);
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KPROBES_ON_FTRACE
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
int bit;
struct pt_regs *regs;
struct kprobe *p;
struct kprobe_ctlblk *kcb;
if (unlikely(kprobe_ftrace_disabled))
return;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
regs = ftrace_get_regs(fregs);
if (!regs)
goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
unsigned long orig_ip = instruction_pointer(regs);
instruction_pointer_set(regs, ip);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->csr_era)
* as if there is a nop
*/
instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
instruction_pointer_set(regs, orig_ip);
}
/*
* If pre_handler returns !0, it changes regs->csr_era. We have to
* skip emulating post_handler.
*/
__this_cpu_write(current_kprobe, NULL);
}
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif /* CONFIG_KPROBES_ON_FTRACE */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Samsung S5H1411 VSB/QAM demodulator driver
Copyright (C) 2008 Steven Toth <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <media/dvb_frontend.h>
#include "s5h1411.h"
struct s5h1411_state {
struct i2c_adapter *i2c;
/* configuration settings */
const struct s5h1411_config *config;
struct dvb_frontend frontend;
enum fe_modulation current_modulation;
unsigned int first_tune:1;
u32 current_frequency;
int if_freq;
u8 inversion;
};
static int debug;
#define dprintk(arg...) do { \
if (debug) \
printk(arg); \
} while (0)
/* Register values to initialise the demod, defaults to VSB */
static struct init_tab {
u8 addr;
u8 reg;
u16 data;
} init_tab[] = {
{ S5H1411_I2C_TOP_ADDR, 0x00, 0x0071, },
{ S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
{ S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
{ S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
{ S5H1411_I2C_TOP_ADDR, 0x1f, 0x342c, },
{ S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
{ S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
{ S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
{ S5H1411_I2C_TOP_ADDR, 0x27, 0x0f04, },
{ S5H1411_I2C_TOP_ADDR, 0x28, 0x070f, },
{ S5H1411_I2C_TOP_ADDR, 0x29, 0x2820, },
{ S5H1411_I2C_TOP_ADDR, 0x2a, 0x102e, },
{ S5H1411_I2C_TOP_ADDR, 0x2b, 0x0220, },
{ S5H1411_I2C_TOP_ADDR, 0x2e, 0x0d0e, },
{ S5H1411_I2C_TOP_ADDR, 0x2f, 0x1013, },
{ S5H1411_I2C_TOP_ADDR, 0x31, 0x171b, },
{ S5H1411_I2C_TOP_ADDR, 0x32, 0x0e0f, },
{ S5H1411_I2C_TOP_ADDR, 0x33, 0x0f10, },
{ S5H1411_I2C_TOP_ADDR, 0x34, 0x170e, },
{ S5H1411_I2C_TOP_ADDR, 0x35, 0x4b10, },
{ S5H1411_I2C_TOP_ADDR, 0x36, 0x0f17, },
{ S5H1411_I2C_TOP_ADDR, 0x3c, 0x1577, },
{ S5H1411_I2C_TOP_ADDR, 0x3d, 0x081a, },
{ S5H1411_I2C_TOP_ADDR, 0x3e, 0x77ee, },
{ S5H1411_I2C_TOP_ADDR, 0x40, 0x1e09, },
{ S5H1411_I2C_TOP_ADDR, 0x41, 0x0f0c, },
{ S5H1411_I2C_TOP_ADDR, 0x42, 0x1f10, },
{ S5H1411_I2C_TOP_ADDR, 0x4d, 0x0509, },
{ S5H1411_I2C_TOP_ADDR, 0x4e, 0x0a00, },
{ S5H1411_I2C_TOP_ADDR, 0x50, 0x0000, },
{ S5H1411_I2C_TOP_ADDR, 0x5b, 0x0000, },
{ S5H1411_I2C_TOP_ADDR, 0x5c, 0x0008, },
{ S5H1411_I2C_TOP_ADDR, 0x57, 0x1101, },
{ S5H1411_I2C_TOP_ADDR, 0x65, 0x007c, },
{ S5H1411_I2C_TOP_ADDR, 0x68, 0x0512, },
{ S5H1411_I2C_TOP_ADDR, 0x69, 0x0258, },
{ S5H1411_I2C_TOP_ADDR, 0x70, 0x0004, },
{ S5H1411_I2C_TOP_ADDR, 0x71, 0x0007, },
{ S5H1411_I2C_TOP_ADDR, 0x76, 0x00a9, },
{ S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
{ S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
{ S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
{ S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
{ S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
{ S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
{ S5H1411_I2C_TOP_ADDR, 0xb8, 0x003f, },
{ S5H1411_I2C_TOP_ADDR, 0xb9, 0x2700, },
{ S5H1411_I2C_TOP_ADDR, 0xba, 0xfac8, },
{ S5H1411_I2C_TOP_ADDR, 0xbe, 0x1003, },
{ S5H1411_I2C_TOP_ADDR, 0xbf, 0x103f, },
{ S5H1411_I2C_TOP_ADDR, 0xce, 0x2000, },
{ S5H1411_I2C_TOP_ADDR, 0xcf, 0x0800, },
{ S5H1411_I2C_TOP_ADDR, 0xd0, 0x0800, },
{ S5H1411_I2C_TOP_ADDR, 0xd1, 0x0400, },
{ S5H1411_I2C_TOP_ADDR, 0xd2, 0x0800, },
{ S5H1411_I2C_TOP_ADDR, 0xd3, 0x2000, },
{ S5H1411_I2C_TOP_ADDR, 0xd4, 0x3000, },
{ S5H1411_I2C_TOP_ADDR, 0xdb, 0x4a9b, },
{ S5H1411_I2C_TOP_ADDR, 0xdc, 0x1000, },
{ S5H1411_I2C_TOP_ADDR, 0xde, 0x0001, },
{ S5H1411_I2C_TOP_ADDR, 0xdf, 0x0000, },
{ S5H1411_I2C_TOP_ADDR, 0xe3, 0x0301, },
{ S5H1411_I2C_QAM_ADDR, 0xf3, 0x0000, },
{ S5H1411_I2C_QAM_ADDR, 0xf3, 0x0001, },
{ S5H1411_I2C_QAM_ADDR, 0x08, 0x0600, },
{ S5H1411_I2C_QAM_ADDR, 0x18, 0x4201, },
{ S5H1411_I2C_QAM_ADDR, 0x1e, 0x6476, },
{ S5H1411_I2C_QAM_ADDR, 0x21, 0x0830, },
{ S5H1411_I2C_QAM_ADDR, 0x0c, 0x5679, },
{ S5H1411_I2C_QAM_ADDR, 0x0d, 0x579b, },
{ S5H1411_I2C_QAM_ADDR, 0x24, 0x0102, },
{ S5H1411_I2C_QAM_ADDR, 0x31, 0x7488, },
{ S5H1411_I2C_QAM_ADDR, 0x32, 0x0a08, },
{ S5H1411_I2C_QAM_ADDR, 0x3d, 0x8689, },
{ S5H1411_I2C_QAM_ADDR, 0x49, 0x0048, },
{ S5H1411_I2C_QAM_ADDR, 0x57, 0x2012, },
{ S5H1411_I2C_QAM_ADDR, 0x5d, 0x7676, },
{ S5H1411_I2C_QAM_ADDR, 0x04, 0x0400, },
{ S5H1411_I2C_QAM_ADDR, 0x58, 0x00c0, },
{ S5H1411_I2C_QAM_ADDR, 0x5b, 0x0100, },
};
/* VSB SNR lookup table */
static struct vsb_snr_tab {
u16 val;
u16 data;
} vsb_snr_tab[] = {
{ 0x39f, 300, },
{ 0x39b, 295, },
{ 0x397, 290, },
{ 0x394, 285, },
{ 0x38f, 280, },
{ 0x38b, 275, },
{ 0x387, 270, },
{ 0x382, 265, },
{ 0x37d, 260, },
{ 0x377, 255, },
{ 0x370, 250, },
{ 0x36a, 245, },
{ 0x364, 240, },
{ 0x35b, 235, },
{ 0x353, 230, },
{ 0x349, 225, },
{ 0x340, 220, },
{ 0x337, 215, },
{ 0x327, 210, },
{ 0x31b, 205, },
{ 0x310, 200, },
{ 0x302, 195, },
{ 0x2f3, 190, },
{ 0x2e4, 185, },
{ 0x2d7, 180, },
{ 0x2cd, 175, },
{ 0x2bb, 170, },
{ 0x2a9, 165, },
{ 0x29e, 160, },
{ 0x284, 155, },
{ 0x27a, 150, },
{ 0x260, 145, },
{ 0x23a, 140, },
{ 0x224, 135, },
{ 0x213, 130, },
{ 0x204, 125, },
{ 0x1fe, 120, },
{ 0, 0, },
};
/* QAM64 SNR lookup table */
static struct qam64_snr_tab {
u16 val;
u16 data;
} qam64_snr_tab[] = {
{ 0x0001, 0, },
{ 0x0af0, 300, },
{ 0x0d80, 290, },
{ 0x10a0, 280, },
{ 0x14b5, 270, },
{ 0x1590, 268, },
{ 0x1680, 266, },
{ 0x17b0, 264, },
{ 0x18c0, 262, },
{ 0x19b0, 260, },
{ 0x1ad0, 258, },
{ 0x1d00, 256, },
{ 0x1da0, 254, },
{ 0x1ef0, 252, },
{ 0x2050, 250, },
{ 0x20f0, 249, },
{ 0x21d0, 248, },
{ 0x22b0, 247, },
{ 0x23a0, 246, },
{ 0x2470, 245, },
{ 0x24f0, 244, },
{ 0x25a0, 243, },
{ 0x26c0, 242, },
{ 0x27b0, 241, },
{ 0x28d0, 240, },
{ 0x29b0, 239, },
{ 0x2ad0, 238, },
{ 0x2ba0, 237, },
{ 0x2c80, 236, },
{ 0x2d20, 235, },
{ 0x2e00, 234, },
{ 0x2f10, 233, },
{ 0x3050, 232, },
{ 0x3190, 231, },
{ 0x3300, 230, },
{ 0x3340, 229, },
{ 0x3200, 228, },
{ 0x3550, 227, },
{ 0x3610, 226, },
{ 0x3600, 225, },
{ 0x3700, 224, },
{ 0x3800, 223, },
{ 0x3920, 222, },
{ 0x3a20, 221, },
{ 0x3b30, 220, },
{ 0x3d00, 219, },
{ 0x3e00, 218, },
{ 0x4000, 217, },
{ 0x4100, 216, },
{ 0x4300, 215, },
{ 0x4400, 214, },
{ 0x4600, 213, },
{ 0x4700, 212, },
{ 0x4800, 211, },
{ 0x4a00, 210, },
{ 0x4b00, 209, },
{ 0x4d00, 208, },
{ 0x4f00, 207, },
{ 0x5050, 206, },
{ 0x5200, 205, },
{ 0x53c0, 204, },
{ 0x5450, 203, },
{ 0x5650, 202, },
{ 0x5820, 201, },
{ 0x6000, 200, },
{ 0xffff, 0, },
};
/* QAM256 SNR lookup table */
static struct qam256_snr_tab {
u16 val;
u16 data;
} qam256_snr_tab[] = {
{ 0x0001, 0, },
{ 0x0970, 400, },
{ 0x0a90, 390, },
{ 0x0b90, 380, },
{ 0x0d90, 370, },
{ 0x0ff0, 360, },
{ 0x1240, 350, },
{ 0x1345, 348, },
{ 0x13c0, 346, },
{ 0x14c0, 344, },
{ 0x1500, 342, },
{ 0x1610, 340, },
{ 0x1700, 338, },
{ 0x1800, 336, },
{ 0x18b0, 334, },
{ 0x1900, 332, },
{ 0x1ab0, 330, },
{ 0x1bc0, 328, },
{ 0x1cb0, 326, },
{ 0x1db0, 324, },
{ 0x1eb0, 322, },
{ 0x2030, 320, },
{ 0x2200, 318, },
{ 0x2280, 316, },
{ 0x2410, 314, },
{ 0x25b0, 312, },
{ 0x27a0, 310, },
{ 0x2840, 308, },
{ 0x29d0, 306, },
{ 0x2b10, 304, },
{ 0x2d30, 302, },
{ 0x2f20, 300, },
{ 0x30c0, 298, },
{ 0x3260, 297, },
{ 0x32c0, 296, },
{ 0x3300, 295, },
{ 0x33b0, 294, },
{ 0x34b0, 293, },
{ 0x35a0, 292, },
{ 0x3650, 291, },
{ 0x3800, 290, },
{ 0x3900, 289, },
{ 0x3a50, 288, },
{ 0x3b30, 287, },
{ 0x3cb0, 286, },
{ 0x3e20, 285, },
{ 0x3fa0, 284, },
{ 0x40a0, 283, },
{ 0x41c0, 282, },
{ 0x42f0, 281, },
{ 0x44a0, 280, },
{ 0x4600, 279, },
{ 0x47b0, 278, },
{ 0x4900, 277, },
{ 0x4a00, 276, },
{ 0x4ba0, 275, },
{ 0x4d00, 274, },
{ 0x4f00, 273, },
{ 0x5000, 272, },
{ 0x51f0, 272, },
{ 0x53a0, 270, },
{ 0x5520, 269, },
{ 0x5700, 268, },
{ 0x5800, 267, },
{ 0x5a00, 266, },
{ 0x5c00, 265, },
{ 0x5d00, 264, },
{ 0x5f00, 263, },
{ 0x6000, 262, },
{ 0x6200, 261, },
{ 0x6400, 260, },
{ 0xffff, 0, },
};
/* 8 bit registers, 16 bit values */
static int s5h1411_writereg(struct s5h1411_state *state,
u8 addr, u8 reg, u16 data)
{
int ret;
u8 buf[] = { reg, data >> 8, data & 0xff };
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
__func__, addr, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
static u16 s5h1411_readreg(struct s5h1411_state *state, u8 addr, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0, 0 };
struct i2c_msg msg[] = {
{ .addr = addr, .flags = 0, .buf = b0, .len = 1 },
{ .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
printk(KERN_ERR "%s: readreg error (ret == %i)\n",
__func__, ret);
return (b1[0] << 8) | b1[1];
}
static int s5h1411_softreset(struct dvb_frontend *fe)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 0);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 1);
return 0;
}
static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(%d KHz)\n", __func__, KHz);
switch (KHz) {
case 3250:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d5);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
break;
case 3500:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1225);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x1e96);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1225);
break;
case 4000:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x14bc);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0xb53e);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x14bd);
break;
default:
dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
__func__, KHz);
fallthrough;
case 5380:
case 44000:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x3655);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1be4);
break;
}
state->if_freq = KHz;
return 0;
}
static int s5h1411_set_mpeg_timing(struct dvb_frontend *fe, int mode)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 val;
dprintk("%s(%d)\n", __func__, mode);
val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbe) & 0xcfff;
switch (mode) {
case S5H1411_MPEGTIMING_CONTINUOUS_INVERTING_CLOCK:
val |= 0x0000;
break;
case S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK:
dprintk("%s(%d) Mode1 or Defaulting\n", __func__, mode);
val |= 0x1000;
break;
case S5H1411_MPEGTIMING_NONCONTINUOUS_INVERTING_CLOCK:
val |= 0x2000;
break;
case S5H1411_MPEGTIMING_NONCONTINUOUS_NONINVERTING_CLOCK:
val |= 0x3000;
break;
default:
return -EINVAL;
}
/* Configure MPEG Signal Timing charactistics */
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbe, val);
}
static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 val;
dprintk("%s(%d)\n", __func__, inversion);
val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x24) & ~0x1000;
if (inversion == 1)
val |= 0x1000; /* Inverted */
state->inversion = inversion;
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
}
static int s5h1411_set_serialmode(struct dvb_frontend *fe, int serial)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 val;
dprintk("%s(%d)\n", __func__, serial);
val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbd) & ~0x100;
if (serial == 1)
val |= 0x100;
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, val);
}
static int s5h1411_enable_modulation(struct dvb_frontend *fe,
enum fe_modulation m)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(0x%08x)\n", __func__, m);
if ((state->first_tune == 0) && (m == state->current_modulation)) {
dprintk("%s() Already at desired modulation. Skipping...\n",
__func__);
return 0;
}
switch (m) {
case VSB_8:
dprintk("%s() VSB_8\n", __func__);
s5h1411_set_if_freq(fe, state->config->vsb_if);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x71);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x00);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0xf1);
break;
case QAM_64:
case QAM_256:
case QAM_AUTO:
dprintk("%s() QAM_AUTO (64/256)\n", __func__);
s5h1411_set_if_freq(fe, state->config->qam_if);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x0001);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x16, 0x1101);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0x00f0);
break;
default:
dprintk("%s() Invalid modulation\n", __func__);
return -EINVAL;
}
state->current_modulation = m;
state->first_tune = 0;
s5h1411_softreset(fe);
return 0;
}
static int s5h1411_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(%d)\n", __func__, enable);
if (enable)
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
else
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 0);
}
static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 val;
dprintk("%s(%d)\n", __func__, enable);
val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xe0) & ~0x02;
if (enable)
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0,
val | 0x02);
else
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
}
static int s5h1411_set_powerstate(struct dvb_frontend *fe, int enable)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(%d)\n", __func__, enable);
if (enable)
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 1);
else {
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 0);
s5h1411_softreset(fe);
}
return 0;
}
static int s5h1411_sleep(struct dvb_frontend *fe)
{
return s5h1411_set_powerstate(fe, 1);
}
static int s5h1411_register_reset(struct dvb_frontend *fe)
{
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf3, 0);
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
static int s5h1411_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(frequency=%d)\n", __func__, p->frequency);
s5h1411_softreset(fe);
state->current_frequency = p->frequency;
s5h1411_enable_modulation(fe, p->modulation);
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
/* Issue a reset to the demod so it knows to resync against the
newly tuned frequency */
s5h1411_softreset(fe);
return 0;
}
/* Reset the demod hardware and reset all of the configuration registers
to a default state. */
static int s5h1411_init(struct dvb_frontend *fe)
{
struct s5h1411_state *state = fe->demodulator_priv;
int i;
dprintk("%s()\n", __func__);
s5h1411_set_powerstate(fe, 0);
s5h1411_register_reset(fe);
for (i = 0; i < ARRAY_SIZE(init_tab); i++)
s5h1411_writereg(state, init_tab[i].addr,
init_tab[i].reg,
init_tab[i].data);
/* The datasheet says that after initialisation, VSB is default */
state->current_modulation = VSB_8;
/* Although the datasheet says it's in VSB, empirical evidence
shows problems getting lock on the first tuning request. Make
sure we call enable_modulation the first time around */
state->first_tune = 1;
if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
/* Serial */
s5h1411_set_serialmode(fe, 1);
else
/* Parallel */
s5h1411_set_serialmode(fe, 0);
s5h1411_set_spectralinversion(fe, state->config->inversion);
s5h1411_set_if_freq(fe, state->config->vsb_if);
s5h1411_set_gpio(fe, state->config->gpio);
s5h1411_set_mpeg_timing(fe, state->config->mpeg_timing);
s5h1411_softreset(fe);
/* Note: Leaving the I2C gate closed. */
s5h1411_i2c_gate_ctrl(fe, 0);
return 0;
}
static int s5h1411_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 reg;
u32 tuner_status = 0;
*status = 0;
/* Register F2 bit 15 = Master Lock, removed */
switch (state->current_modulation) {
case QAM_64:
case QAM_256:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
if (reg & 0x10) /* QAM FEC Lock */
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
if (reg & 0x100) /* QAM EQ Lock */
*status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
case VSB_8:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
if (reg & 0x1000) /* FEC Lock */
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
if (reg & 0x2000) /* EQ Lock */
*status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x53);
if (reg & 0x1) /* AFC Lock */
*status |= FE_HAS_SIGNAL;
break;
default:
return -EINVAL;
}
switch (state->config->status_mode) {
case S5H1411_DEMODLOCKING:
if (*status & FE_HAS_VITERBI)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
case S5H1411_TUNERLOCKING:
/* Get the tuner status */
if (fe->ops.tuner_ops.get_status) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.get_status(fe, &tuner_status);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
if (tuner_status)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
}
dprintk("%s() status 0x%08x\n", __func__, *status);
return 0;
}
static int s5h1411_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
{
int i, ret = -EINVAL;
dprintk("%s()\n", __func__);
for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
if (v < qam256_snr_tab[i].val) {
*snr = qam256_snr_tab[i].data;
ret = 0;
break;
}
}
return ret;
}
static int s5h1411_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
{
int i, ret = -EINVAL;
dprintk("%s()\n", __func__);
for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
if (v < qam64_snr_tab[i].val) {
*snr = qam64_snr_tab[i].data;
ret = 0;
break;
}
}
return ret;
}
static int s5h1411_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
{
int i, ret = -EINVAL;
dprintk("%s()\n", __func__);
for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
if (v > vsb_snr_tab[i].val) {
*snr = vsb_snr_tab[i].data;
ret = 0;
break;
}
}
dprintk("%s() snr=%d\n", __func__, *snr);
return ret;
}
static int s5h1411_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct s5h1411_state *state = fe->demodulator_priv;
u16 reg;
dprintk("%s()\n", __func__);
switch (state->current_modulation) {
case QAM_64:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
return s5h1411_qam64_lookup_snr(fe, snr, reg);
case QAM_256:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
return s5h1411_qam256_lookup_snr(fe, snr, reg);
case VSB_8:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR,
0xf2) & 0x3ff;
return s5h1411_vsb_lookup_snr(fe, snr, reg);
default:
break;
}
return -EINVAL;
}
static int s5h1411_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
/* borrowed from lgdt330x.c
*
* Calculate strength from SNR up to 35dB
* Even though the SNR can go higher than 35dB,
* there is some comfort factor in having a range of
* strong signals that can show at 100%
*/
u16 snr;
u32 tmp;
int ret = s5h1411_read_snr(fe, &snr);
*signal_strength = 0;
if (0 == ret) {
/* The following calculation method was chosen
* purely for the sake of code re-use from the
* other demod drivers that use this method */
/* Convert from SNR in dB * 10 to 8.24 fixed-point */
tmp = (snr * ((1 << 24) / 10));
/* Convert from 8.24 fixed-point to
* scale the range 0 - 35*2^24 into 0 - 65535*/
if (tmp >= 8960 * 0x10000)
*signal_strength = 0xffff;
else
*signal_strength = tmp / 8960;
}
return ret;
}
static int s5h1411_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct s5h1411_state *state = fe->demodulator_priv;
*ucblocks = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xc9);
return 0;
}
static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
{
return s5h1411_read_ucblocks(fe, ber);
}
static int s5h1411_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *p)
{
struct s5h1411_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
p->modulation = state->current_modulation;
return 0;
}
static int s5h1411_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static void s5h1411_release(struct dvb_frontend *fe)
{
struct s5h1411_state *state = fe->demodulator_priv;
kfree(state);
}
static const struct dvb_frontend_ops s5h1411_ops;
struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
struct i2c_adapter *i2c)
{
struct s5h1411_state *state = NULL;
u16 reg;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
if (state == NULL)
goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->current_modulation = VSB_8;
state->inversion = state->config->inversion;
/* check if the demod exists */
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x05);
if (reg != 0x0066)
goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &s5h1411_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
if (s5h1411_init(&state->frontend) != 0) {
printk(KERN_ERR "%s: Failed to initialize correctly\n",
__func__);
goto error;
}
/* Note: Leaving the I2C gate open here. */
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
/* Put the device into low-power mode until first use */
s5h1411_set_powerstate(&state->frontend, 1);
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL_GPL(s5h1411_attach);
static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
.frequency_min_hz = 54 * MHz,
.frequency_max_hz = 858 * MHz,
.frequency_stepsize_hz = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.init = s5h1411_init,
.sleep = s5h1411_sleep,
.i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
.set_frontend = s5h1411_set_frontend,
.get_frontend = s5h1411_get_frontend,
.get_tune_settings = s5h1411_get_tune_settings,
.read_status = s5h1411_read_status,
.read_ber = s5h1411_read_ber,
.read_signal_strength = s5h1411_read_signal_strength,
.read_snr = s5h1411_read_snr,
.read_ucblocks = s5h1411_read_ucblocks,
.release = s5h1411_release,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
MODULE_DESCRIPTION("Samsung S5H1411 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* MPC8xx Communication Processor Module.
* Copyright (c) 1997 Dan Malek ([email protected])
*
* This file contains structures and information for the communication
* processor channels. Some CPM control and status is available
* through the MPC8xx internal memory map. See immap.h for details.
* This file only contains what I need for the moment, not the total
* CPM capabilities. I (or someone else) will add definitions as they
* are needed. -- Dan
*
* On the MBX board, EPPC-Bug loads CPM microcode into the first 512
* bytes of the DP RAM and relocates the I2C parameter area to the
* IDMA1 space. The remaining DP RAM is available for buffer descriptors
* or other use.
*/
#ifndef __CPM1__
#define __CPM1__
#include <linux/init.h>
#include <asm/8xx_immap.h>
#include <asm/ptrace.h>
#include <asm/cpm.h>
/* CPM Command register.
*/
#define CPM_CR_RST ((ushort)0x8000)
#define CPM_CR_OPCODE ((ushort)0x0f00)
#define CPM_CR_CHAN ((ushort)0x00f0)
#define CPM_CR_FLG ((ushort)0x0001)
/* Channel numbers.
*/
#define CPM_CR_CH_SCC1 ((ushort)0x0000)
#define CPM_CR_CH_I2C ((ushort)0x0001) /* I2C and IDMA1 */
#define CPM_CR_CH_SCC2 ((ushort)0x0004)
#define CPM_CR_CH_SPI ((ushort)0x0005) /* SPI / IDMA2 / Timers */
#define CPM_CR_CH_TIMER CPM_CR_CH_SPI
#define CPM_CR_CH_SCC3 ((ushort)0x0008)
#define CPM_CR_CH_SMC1 ((ushort)0x0009) /* SMC1 / DSP1 */
#define CPM_CR_CH_SCC4 ((ushort)0x000c)
#define CPM_CR_CH_SMC2 ((ushort)0x000d) /* SMC2 / DSP2 */
#define mk_cr_cmd(CH, CMD) ((CMD << 8) | (CH << 4))
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
extern void cpm_setbrg(uint brg, uint rate);
extern void __init cpm_load_patch(cpm8xx_t *cp);
extern void cpm_reset(void);
/* Parameter RAM offsets.
*/
#define PROFF_SCC1 ((uint)0x0000)
#define PROFF_IIC ((uint)0x0080)
#define PROFF_SCC2 ((uint)0x0100)
#define PROFF_SPI ((uint)0x0180)
#define PROFF_SCC3 ((uint)0x0200)
#define PROFF_SMC1 ((uint)0x0280)
#define PROFF_DSP1 ((uint)0x02c0)
#define PROFF_SCC4 ((uint)0x0300)
#define PROFF_SMC2 ((uint)0x0380)
/* Define enough so I can at least use the serial port as a UART.
* The MBX uses SMC1 as the host serial port.
*/
typedef struct smc_uart {
ushort smc_rbase; /* Rx Buffer descriptor base address */
ushort smc_tbase; /* Tx Buffer descriptor base address */
u_char smc_rfcr; /* Rx function code */
u_char smc_tfcr; /* Tx function code */
ushort smc_mrblr; /* Max receive buffer length */
uint smc_rstate; /* Internal */
uint smc_idp; /* Internal */
ushort smc_rbptr; /* Internal */
ushort smc_ibc; /* Internal */
uint smc_rxtmp; /* Internal */
uint smc_tstate; /* Internal */
uint smc_tdp; /* Internal */
ushort smc_tbptr; /* Internal */
ushort smc_tbc; /* Internal */
uint smc_txtmp; /* Internal */
ushort smc_maxidl; /* Maximum idle characters */
ushort smc_tmpidl; /* Temporary idle counter */
ushort smc_brklen; /* Last received break length */
ushort smc_brkec; /* rcv'd break condition counter */
ushort smc_brkcr; /* xmt break count register */
ushort smc_rmask; /* Temporary bit mask */
char res1[8]; /* Reserved */
ushort smc_rpbase; /* Relocation pointer */
} smc_uart_t;
/* Function code bits.
*/
#define SMC_EB ((u_char)0x10) /* Set big endian byte order */
/* SMC uart mode register.
*/
#define SMCMR_REN ((ushort)0x0001)
#define SMCMR_TEN ((ushort)0x0002)
#define SMCMR_DM ((ushort)0x000c)
#define SMCMR_SM_GCI ((ushort)0x0000)
#define SMCMR_SM_UART ((ushort)0x0020)
#define SMCMR_SM_TRANS ((ushort)0x0030)
#define SMCMR_SM_MASK ((ushort)0x0030)
#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */
#define SMCMR_REVD SMCMR_PM_EVEN
#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */
#define SMCMR_BS SMCMR_PEN
#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */
#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */
#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK)
/* SMC2 as Centronics parallel printer. It is half duplex, in that
* it can only receive or transmit. The parameter ram values for
* each direction are either unique or properly overlap, so we can
* include them in one structure.
*/
typedef struct smc_centronics {
ushort scent_rbase;
ushort scent_tbase;
u_char scent_cfcr;
u_char scent_smask;
ushort scent_mrblr;
uint scent_rstate;
uint scent_r_ptr;
ushort scent_rbptr;
ushort scent_r_cnt;
uint scent_rtemp;
uint scent_tstate;
uint scent_t_ptr;
ushort scent_tbptr;
ushort scent_t_cnt;
uint scent_ttemp;
ushort scent_max_sl;
ushort scent_sl_cnt;
ushort scent_character1;
ushort scent_character2;
ushort scent_character3;
ushort scent_character4;
ushort scent_character5;
ushort scent_character6;
ushort scent_character7;
ushort scent_character8;
ushort scent_rccm;
ushort scent_rccr;
} smc_cent_t;
/* Centronics Status Mask Register.
*/
#define SMC_CENT_F ((u_char)0x08)
#define SMC_CENT_PE ((u_char)0x04)
#define SMC_CENT_S ((u_char)0x02)
/* SMC Event and Mask register.
*/
#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */
#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */
#define SMCM_TXE ((unsigned char)0x10) /* When in Transparent Mode */
#define SMCM_BSY ((unsigned char)0x04)
#define SMCM_TX ((unsigned char)0x02)
#define SMCM_RX ((unsigned char)0x01)
/* Baud rate generators.
*/
#define CPM_BRG_RST ((uint)0x00020000)
#define CPM_BRG_EN ((uint)0x00010000)
#define CPM_BRG_EXTC_INT ((uint)0x00000000)
#define CPM_BRG_EXTC_CLK2 ((uint)0x00004000)
#define CPM_BRG_EXTC_CLK6 ((uint)0x00008000)
#define CPM_BRG_ATB ((uint)0x00002000)
#define CPM_BRG_CD_MASK ((uint)0x00001ffe)
#define CPM_BRG_DIV16 ((uint)0x00000001)
/* SI Clock Route Register
*/
#define SICR_RCLK_SCC1_BRG1 ((uint)0x00000000)
#define SICR_TCLK_SCC1_BRG1 ((uint)0x00000000)
#define SICR_RCLK_SCC2_BRG2 ((uint)0x00000800)
#define SICR_TCLK_SCC2_BRG2 ((uint)0x00000100)
#define SICR_RCLK_SCC3_BRG3 ((uint)0x00100000)
#define SICR_TCLK_SCC3_BRG3 ((uint)0x00020000)
#define SICR_RCLK_SCC4_BRG4 ((uint)0x18000000)
#define SICR_TCLK_SCC4_BRG4 ((uint)0x03000000)
/* SCCs.
*/
#define SCC_GSMRH_IRP ((uint)0x00040000)
#define SCC_GSMRH_GDE ((uint)0x00010000)
#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000)
#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000)
#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000)
#define SCC_GSMRH_REVD ((uint)0x00002000)
#define SCC_GSMRH_TRX ((uint)0x00001000)
#define SCC_GSMRH_TTX ((uint)0x00000800)
#define SCC_GSMRH_CDP ((uint)0x00000400)
#define SCC_GSMRH_CTSP ((uint)0x00000200)
#define SCC_GSMRH_CDS ((uint)0x00000100)
#define SCC_GSMRH_CTSS ((uint)0x00000080)
#define SCC_GSMRH_TFL ((uint)0x00000040)
#define SCC_GSMRH_RFW ((uint)0x00000020)
#define SCC_GSMRH_TXSY ((uint)0x00000010)
#define SCC_GSMRH_SYNL16 ((uint)0x0000000c)
#define SCC_GSMRH_SYNL8 ((uint)0x00000008)
#define SCC_GSMRH_SYNL4 ((uint)0x00000004)
#define SCC_GSMRH_RTSM ((uint)0x00000002)
#define SCC_GSMRH_RSYN ((uint)0x00000001)
#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */
#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000)
#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000)
#define SCC_GSMRL_EDGE_POS ((uint)0x20000000)
#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000)
#define SCC_GSMRL_TCI ((uint)0x10000000)
#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000)
#define SCC_GSMRL_TSNC_4 ((uint)0x08000000)
#define SCC_GSMRL_TSNC_14 ((uint)0x04000000)
#define SCC_GSMRL_TSNC_INF ((uint)0x00000000)
#define SCC_GSMRL_RINV ((uint)0x02000000)
#define SCC_GSMRL_TINV ((uint)0x01000000)
#define SCC_GSMRL_TPL_128 ((uint)0x00c00000)
#define SCC_GSMRL_TPL_64 ((uint)0x00a00000)
#define SCC_GSMRL_TPL_48 ((uint)0x00800000)
#define SCC_GSMRL_TPL_32 ((uint)0x00600000)
#define SCC_GSMRL_TPL_16 ((uint)0x00400000)
#define SCC_GSMRL_TPL_8 ((uint)0x00200000)
#define SCC_GSMRL_TPL_NONE ((uint)0x00000000)
#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000)
#define SCC_GSMRL_TPP_01 ((uint)0x00100000)
#define SCC_GSMRL_TPP_10 ((uint)0x00080000)
#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000)
#define SCC_GSMRL_TEND ((uint)0x00040000)
#define SCC_GSMRL_TDCR_32 ((uint)0x00030000)
#define SCC_GSMRL_TDCR_16 ((uint)0x00020000)
#define SCC_GSMRL_TDCR_8 ((uint)0x00010000)
#define SCC_GSMRL_TDCR_1 ((uint)0x00000000)
#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000)
#define SCC_GSMRL_RDCR_16 ((uint)0x00008000)
#define SCC_GSMRL_RDCR_8 ((uint)0x00004000)
#define SCC_GSMRL_RDCR_1 ((uint)0x00000000)
#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000)
#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000)
#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000)
#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800)
#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000)
#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600)
#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400)
#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200)
#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100)
#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000)
#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */
#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080)
#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040)
#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000)
#define SCC_GSMRL_ENR ((uint)0x00000020)
#define SCC_GSMRL_ENT ((uint)0x00000010)
#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c)
#define SCC_GSMRL_MODE_QMC ((uint)0x0000000a)
#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009)
#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008)
#define SCC_GSMRL_MODE_V14 ((uint)0x00000007)
#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006)
#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005)
#define SCC_GSMRL_MODE_UART ((uint)0x00000004)
#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003)
#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002)
#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000)
#define SCC_TODR_TOD ((ushort)0x8000)
/* SCC Event and Mask register.
*/
#define SCCM_TXE ((unsigned char)0x10)
#define SCCM_BSY ((unsigned char)0x04)
#define SCCM_TX ((unsigned char)0x02)
#define SCCM_RX ((unsigned char)0x01)
typedef struct scc_param {
ushort scc_rbase; /* Rx Buffer descriptor base address */
ushort scc_tbase; /* Tx Buffer descriptor base address */
u_char scc_rfcr; /* Rx function code */
u_char scc_tfcr; /* Tx function code */
ushort scc_mrblr; /* Max receive buffer length */
uint scc_rstate; /* Internal */
uint scc_idp; /* Internal */
ushort scc_rbptr; /* Internal */
ushort scc_ibc; /* Internal */
uint scc_rxtmp; /* Internal */
uint scc_tstate; /* Internal */
uint scc_tdp; /* Internal */
ushort scc_tbptr; /* Internal */
ushort scc_tbc; /* Internal */
uint scc_txtmp; /* Internal */
uint scc_rcrc; /* Internal */
uint scc_tcrc; /* Internal */
} sccp_t;
/* Function code bits.
*/
#define SCC_EB ((u_char)0x10) /* Set big endian byte order */
/* CPM Ethernet through SCCx.
*/
typedef struct scc_enet {
sccp_t sen_genscc;
uint sen_cpres; /* Preset CRC */
uint sen_cmask; /* Constant mask for CRC */
uint sen_crcec; /* CRC Error counter */
uint sen_alec; /* alignment error counter */
uint sen_disfc; /* discard frame counter */
ushort sen_pads; /* Tx short frame pad character */
ushort sen_retlim; /* Retry limit threshold */
ushort sen_retcnt; /* Retry limit counter */
ushort sen_maxflr; /* maximum frame length register */
ushort sen_minflr; /* minimum frame length register */
ushort sen_maxd1; /* maximum DMA1 length */
ushort sen_maxd2; /* maximum DMA2 length */
ushort sen_maxd; /* Rx max DMA */
ushort sen_dmacnt; /* Rx DMA counter */
ushort sen_maxb; /* Max BD byte count */
ushort sen_gaddr1; /* Group address filter */
ushort sen_gaddr2;
ushort sen_gaddr3;
ushort sen_gaddr4;
uint sen_tbuf0data0; /* Save area 0 - current frame */
uint sen_tbuf0data1; /* Save area 1 - current frame */
uint sen_tbuf0rba; /* Internal */
uint sen_tbuf0crc; /* Internal */
ushort sen_tbuf0bcnt; /* Internal */
ushort sen_paddrh; /* physical address (MSB) */
ushort sen_paddrm;
ushort sen_paddrl; /* physical address (LSB) */
ushort sen_pper; /* persistence */
ushort sen_rfbdptr; /* Rx first BD pointer */
ushort sen_tfbdptr; /* Tx first BD pointer */
ushort sen_tlbdptr; /* Tx last BD pointer */
uint sen_tbuf1data0; /* Save area 0 - current frame */
uint sen_tbuf1data1; /* Save area 1 - current frame */
uint sen_tbuf1rba; /* Internal */
uint sen_tbuf1crc; /* Internal */
ushort sen_tbuf1bcnt; /* Internal */
ushort sen_txlen; /* Tx Frame length counter */
ushort sen_iaddr1; /* Individual address filter */
ushort sen_iaddr2;
ushort sen_iaddr3;
ushort sen_iaddr4;
ushort sen_boffcnt; /* Backoff counter */
/* NOTE: Some versions of the manual have the following items
* incorrectly documented. Below is the proper order.
*/
ushort sen_taddrh; /* temp address (MSB) */
ushort sen_taddrm;
ushort sen_taddrl; /* temp address (LSB) */
} scc_enet_t;
/* SCC Event register as used by Ethernet.
*/
#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */
#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */
#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */
#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */
#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */
#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */
/* SCC Mode Register (PMSR) as used by Ethernet.
*/
#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */
#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */
#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */
#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */
#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */
#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */
#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */
#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */
#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */
#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */
#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */
#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */
#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */
/* SCC as UART
*/
typedef struct scc_uart {
sccp_t scc_genscc;
char res1[8]; /* Reserved */
ushort scc_maxidl; /* Maximum idle chars */
ushort scc_idlc; /* temp idle counter */
ushort scc_brkcr; /* Break count register */
ushort scc_parec; /* receive parity error counter */
ushort scc_frmec; /* receive framing error counter */
ushort scc_nosec; /* receive noise counter */
ushort scc_brkec; /* receive break condition counter */
ushort scc_brkln; /* last received break length */
ushort scc_uaddr1; /* UART address character 1 */
ushort scc_uaddr2; /* UART address character 2 */
ushort scc_rtemp; /* Temp storage */
ushort scc_toseq; /* Transmit out of sequence char */
ushort scc_char1; /* control character 1 */
ushort scc_char2; /* control character 2 */
ushort scc_char3; /* control character 3 */
ushort scc_char4; /* control character 4 */
ushort scc_char5; /* control character 5 */
ushort scc_char6; /* control character 6 */
ushort scc_char7; /* control character 7 */
ushort scc_char8; /* control character 8 */
ushort scc_rccm; /* receive control character mask */
ushort scc_rccr; /* receive control character register */
ushort scc_rlbc; /* receive last break character */
} scc_uart_t;
/* SCC Event and Mask registers when it is used as a UART.
*/
#define UART_SCCM_GLR ((ushort)0x1000)
#define UART_SCCM_GLT ((ushort)0x0800)
#define UART_SCCM_AB ((ushort)0x0200)
#define UART_SCCM_IDL ((ushort)0x0100)
#define UART_SCCM_GRA ((ushort)0x0080)
#define UART_SCCM_BRKE ((ushort)0x0040)
#define UART_SCCM_BRKS ((ushort)0x0020)
#define UART_SCCM_CCR ((ushort)0x0008)
#define UART_SCCM_BSY ((ushort)0x0004)
#define UART_SCCM_TX ((ushort)0x0002)
#define UART_SCCM_RX ((ushort)0x0001)
/* The SCC PMSR when used as a UART.
*/
#define SCU_PSMR_FLC ((ushort)0x8000)
#define SCU_PSMR_SL ((ushort)0x4000)
#define SCU_PSMR_CL ((ushort)0x3000)
#define SCU_PSMR_UM ((ushort)0x0c00)
#define SCU_PSMR_FRZ ((ushort)0x0200)
#define SCU_PSMR_RZS ((ushort)0x0100)
#define SCU_PSMR_SYN ((ushort)0x0080)
#define SCU_PSMR_DRT ((ushort)0x0040)
#define SCU_PSMR_PEN ((ushort)0x0010)
#define SCU_PSMR_RPM ((ushort)0x000c)
#define SCU_PSMR_REVP ((ushort)0x0008)
#define SCU_PSMR_TPM ((ushort)0x0003)
#define SCU_PSMR_TEVP ((ushort)0x0002)
/* CPM Transparent mode SCC.
*/
typedef struct scc_trans {
sccp_t st_genscc;
uint st_cpres; /* Preset CRC */
uint st_cmask; /* Constant mask for CRC */
} scc_trans_t;
/* IIC parameter RAM.
*/
typedef struct iic {
ushort iic_rbase; /* Rx Buffer descriptor base address */
ushort iic_tbase; /* Tx Buffer descriptor base address */
u_char iic_rfcr; /* Rx function code */
u_char iic_tfcr; /* Tx function code */
ushort iic_mrblr; /* Max receive buffer length */
uint iic_rstate; /* Internal */
uint iic_rdp; /* Internal */
ushort iic_rbptr; /* Internal */
ushort iic_rbc; /* Internal */
uint iic_rxtmp; /* Internal */
uint iic_tstate; /* Internal */
uint iic_tdp; /* Internal */
ushort iic_tbptr; /* Internal */
ushort iic_tbc; /* Internal */
uint iic_txtmp; /* Internal */
char res1[4]; /* Reserved */
ushort iic_rpbase; /* Relocation pointer */
char res2[2]; /* Reserved */
} iic_t;
/*
* RISC Controller Configuration Register definitons
*/
#define RCCR_TIME 0x8000 /* RISC Timer Enable */
#define RCCR_TIMEP(t) (((t) & 0x3F)<<8) /* RISC Timer Period */
#define RCCR_TIME_MASK 0x00FF /* not RISC Timer related bits */
/* RISC Timer Parameter RAM offset */
#define PROFF_RTMR ((uint)0x01B0)
typedef struct risc_timer_pram {
unsigned short tm_base; /* RISC Timer Table Base Address */
unsigned short tm_ptr; /* RISC Timer Table Pointer (internal) */
unsigned short r_tmr; /* RISC Timer Mode Register */
unsigned short r_tmv; /* RISC Timer Valid Register */
unsigned long tm_cmd; /* RISC Timer Command Register */
unsigned long tm_cnt; /* RISC Timer Internal Count */
} rt_pram_t;
/* Bits in RISC Timer Command Register */
#define TM_CMD_VALID 0x80000000 /* Valid - Enables the timer */
#define TM_CMD_RESTART 0x40000000 /* Restart - for automatic restart */
#define TM_CMD_PWM 0x20000000 /* Run in Pulse Width Modulation Mode */
#define TM_CMD_NUM(n) (((n)&0xF)<<16) /* Timer Number */
#define TM_CMD_PERIOD(p) ((p)&0xFFFF) /* Timer Period */
/* CPM interrupts. There are nearly 32 interrupts generated by CPM
* channels or devices. All of these are presented to the PPC core
* as a single interrupt. The CPM interrupt handler dispatches its
* own handlers, in a similar fashion to the PPC core handler. We
* use the table as defined in the manuals (i.e. no special high
* priority and SCC1 == SCCa, etc...).
*/
#define CPMVEC_NR 32
#define CPMVEC_PIO_PC15 ((ushort)0x1f)
#define CPMVEC_SCC1 ((ushort)0x1e)
#define CPMVEC_SCC2 ((ushort)0x1d)
#define CPMVEC_SCC3 ((ushort)0x1c)
#define CPMVEC_SCC4 ((ushort)0x1b)
#define CPMVEC_PIO_PC14 ((ushort)0x1a)
#define CPMVEC_TIMER1 ((ushort)0x19)
#define CPMVEC_PIO_PC13 ((ushort)0x18)
#define CPMVEC_PIO_PC12 ((ushort)0x17)
#define CPMVEC_SDMA_CB_ERR ((ushort)0x16)
#define CPMVEC_IDMA1 ((ushort)0x15)
#define CPMVEC_IDMA2 ((ushort)0x14)
#define CPMVEC_TIMER2 ((ushort)0x12)
#define CPMVEC_RISCTIMER ((ushort)0x11)
#define CPMVEC_I2C ((ushort)0x10)
#define CPMVEC_PIO_PC11 ((ushort)0x0f)
#define CPMVEC_PIO_PC10 ((ushort)0x0e)
#define CPMVEC_TIMER3 ((ushort)0x0c)
#define CPMVEC_PIO_PC9 ((ushort)0x0b)
#define CPMVEC_PIO_PC8 ((ushort)0x0a)
#define CPMVEC_PIO_PC7 ((ushort)0x09)
#define CPMVEC_TIMER4 ((ushort)0x07)
#define CPMVEC_PIO_PC6 ((ushort)0x06)
#define CPMVEC_SPI ((ushort)0x05)
#define CPMVEC_SMC1 ((ushort)0x04)
#define CPMVEC_SMC2 ((ushort)0x03)
#define CPMVEC_PIO_PC5 ((ushort)0x02)
#define CPMVEC_PIO_PC4 ((ushort)0x01)
#define CPMVEC_ERROR ((ushort)0x00)
/* CPM interrupt configuration vector.
*/
#define CICR_SCD_SCC4 ((uint)0x00c00000) /* SCC4 @ SCCd */
#define CICR_SCC_SCC3 ((uint)0x00200000) /* SCC3 @ SCCc */
#define CICR_SCB_SCC2 ((uint)0x00040000) /* SCC2 @ SCCb */
#define CICR_SCA_SCC1 ((uint)0x00000000) /* SCC1 @ SCCa */
#define CICR_IRL_MASK ((uint)0x0000e000) /* Core interrupt */
#define CICR_HP_MASK ((uint)0x00001f00) /* Hi-pri int. */
#define CICR_IEN ((uint)0x00000080) /* Int. enable */
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
#define CPM_PIN_PRIMARY 0
#define CPM_PIN_SECONDARY 2
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
#define CPM_PIN_FALLEDGE 16
#define CPM_PIN_ANYEDGE 0
enum cpm_port {
CPM_PORTA,
CPM_PORTB,
CPM_PORTC,
CPM_PORTD,
CPM_PORTE,
};
void cpm1_set_pin(enum cpm_port port, int pin, int flags);
enum cpm_clk_dir {
CPM_CLK_RX,
CPM_CLK_TX,
CPM_CLK_RTX
};
enum cpm_clk_target {
CPM_CLK_SCC1,
CPM_CLK_SCC2,
CPM_CLK_SCC3,
CPM_CLK_SCC4,
CPM_CLK_SMC1,
CPM_CLK_SMC2,
};
enum cpm_clk {
CPM_BRG1, /* Baud Rate Generator 1 */
CPM_BRG2, /* Baud Rate Generator 2 */
CPM_BRG3, /* Baud Rate Generator 3 */
CPM_BRG4, /* Baud Rate Generator 4 */
CPM_CLK1, /* Clock 1 */
CPM_CLK2, /* Clock 2 */
CPM_CLK3, /* Clock 3 */
CPM_CLK4, /* Clock 4 */
CPM_CLK5, /* Clock 5 */
CPM_CLK6, /* Clock 6 */
CPM_CLK7, /* Clock 7 */
CPM_CLK8, /* Clock 8 */
};
int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode);
int cpm1_gpiochip_add16(struct device *dev);
int cpm1_gpiochip_add32(struct device *dev);
#endif /* __CPM1__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* NXP PCF50633 Power Management Unit (PMU) driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Harald Welte <[email protected]>
* Balaji Rao <[email protected]>
* All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/mfd/pcf50633/core.h>
/* Read a block of up to 32 regs */
int pcf50633_read_block(struct pcf50633 *pcf, u8 reg,
int nr_regs, u8 *data)
{
int ret;
ret = regmap_raw_read(pcf->regmap, reg, data, nr_regs);
if (ret != 0)
return ret;
return nr_regs;
}
EXPORT_SYMBOL_GPL(pcf50633_read_block);
/* Write a block of up to 32 regs */
int pcf50633_write_block(struct pcf50633 *pcf , u8 reg,
int nr_regs, u8 *data)
{
return regmap_raw_write(pcf->regmap, reg, data, nr_regs);
}
EXPORT_SYMBOL_GPL(pcf50633_write_block);
u8 pcf50633_reg_read(struct pcf50633 *pcf, u8 reg)
{
unsigned int val;
int ret;
ret = regmap_read(pcf->regmap, reg, &val);
if (ret < 0)
return -1;
return val;
}
EXPORT_SYMBOL_GPL(pcf50633_reg_read);
int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val)
{
return regmap_write(pcf->regmap, reg, val);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_write);
int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val)
{
return regmap_update_bits(pcf->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_set_bit_mask);
int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val)
{
return regmap_update_bits(pcf->regmap, reg, val, 0);
}
EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits);
/* sysfs attributes */
static ssize_t dump_regs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcf50633 *pcf = dev_get_drvdata(dev);
u8 dump[16];
int n, n1, idx = 0;
char *buf1 = buf;
static u8 address_no_read[] = { /* must be ascending */
PCF50633_REG_INT1,
PCF50633_REG_INT2,
PCF50633_REG_INT3,
PCF50633_REG_INT4,
PCF50633_REG_INT5,
0 /* terminator */
};
for (n = 0; n < 256; n += sizeof(dump)) {
for (n1 = 0; n1 < sizeof(dump); n1++)
if (n == address_no_read[idx]) {
idx++;
dump[n1] = 0x00;
} else
dump[n1] = pcf50633_reg_read(pcf, n + n1);
buf1 += sprintf(buf1, "%*ph\n", (int)sizeof(dump), dump);
}
return buf1 - buf;
}
static DEVICE_ATTR_ADMIN_RO(dump_regs);
static ssize_t resume_reason_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcf50633 *pcf = dev_get_drvdata(dev);
int n;
n = sprintf(buf, "%02x%02x%02x%02x%02x\n",
pcf->resume_reason[0],
pcf->resume_reason[1],
pcf->resume_reason[2],
pcf->resume_reason[3],
pcf->resume_reason[4]);
return n;
}
static DEVICE_ATTR_ADMIN_RO(resume_reason);
static struct attribute *pcf_sysfs_entries[] = {
&dev_attr_dump_regs.attr,
&dev_attr_resume_reason.attr,
NULL,
};
static struct attribute_group pcf_attr_group = {
.name = NULL, /* put in device directory */
.attrs = pcf_sysfs_entries,
};
static void
pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
struct platform_device **pdev)
{
int ret;
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
(*pdev)->dev.parent = pcf->dev;
ret = platform_device_add(*pdev);
if (ret) {
dev_err(pcf->dev, "Failed to register %s: %d\n", name, ret);
platform_device_put(*pdev);
*pdev = NULL;
}
}
static const struct regmap_config pcf50633_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int pcf50633_probe(struct i2c_client *client)
{
struct pcf50633 *pcf;
struct platform_device *pdev;
struct pcf50633_platform_data *pdata = dev_get_platdata(&client->dev);
int i, j, ret;
int version, variant;
if (!client->irq) {
dev_err(&client->dev, "Missing IRQ\n");
return -ENOENT;
}
pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
i2c_set_clientdata(client, pcf);
pcf->dev = &client->dev;
pcf->pdata = pdata;
mutex_init(&pcf->lock);
pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
if (IS_ERR(pcf->regmap)) {
ret = PTR_ERR(pcf->regmap);
dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
return ret;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
version, variant);
pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
pdev = platform_device_alloc("pcf50633-regulator", i);
if (!pdev) {
ret = -ENOMEM;
goto err2;
}
pdev->dev.parent = pcf->dev;
ret = platform_device_add_data(pdev, &pdata->reg_init_data[i],
sizeof(pdata->reg_init_data[i]));
if (ret)
goto err;
ret = platform_device_add(pdev);
if (ret)
goto err;
pcf->regulator_pdev[i] = pdev;
}
ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
if (ret)
dev_warn(pcf->dev, "error creating sysfs entries\n");
if (pdata->probe_done)
pdata->probe_done(pcf);
return 0;
err:
platform_device_put(pdev);
err2:
for (j = 0; j < i; j++)
platform_device_put(pcf->regulator_pdev[j]);
return ret;
}
static void pcf50633_remove(struct i2c_client *client)
{
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
sysfs_remove_group(&client->dev.kobj, &pcf_attr_group);
pcf50633_irq_free(pcf);
platform_device_unregister(pcf->input_pdev);
platform_device_unregister(pcf->rtc_pdev);
platform_device_unregister(pcf->mbc_pdev);
platform_device_unregister(pcf->adc_pdev);
platform_device_unregister(pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
}
static const struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
MODULE_DEVICE_TABLE(i2c, pcf50633_id_table);
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
.pm = pm_sleep_ptr(&pcf50633_pm),
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = pcf50633_remove,
};
static int __init pcf50633_init(void)
{
return i2c_add_driver(&pcf50633_driver);
}
static void __exit pcf50633_exit(void)
{
i2c_del_driver(&pcf50633_driver);
}
MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU");
MODULE_AUTHOR("Harald Welte <[email protected]>");
MODULE_LICENSE("GPL");
subsys_initcall(pcf50633_init);
module_exit(pcf50633_exit);
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_IRQ_STACK_H
#define _ASM_X86_IRQ_STACK_H
#include <linux/ptrace.h>
#include <linux/objtool.h>
#include <asm/processor.h>
#ifdef CONFIG_X86_64
/*
* Macro to inline switching to an interrupt stack and invoking function
* calls from there. The following rules apply:
*
* - Ordering:
*
* 1. Write the stack pointer into the top most place of the irq
* stack. This ensures that the various unwinders can link back to the
* original stack.
*
* 2. Switch the stack pointer to the top of the irq stack.
*
* 3. Invoke whatever needs to be done (@asm_call argument)
*
* 4. Pop the original stack pointer from the top of the irq stack
* which brings it back to the original stack where it left off.
*
* - Function invocation:
*
* To allow flexible usage of the macro, the actual function code including
* the store of the arguments in the call ABI registers is handed in via
* the @asm_call argument.
*
* - Local variables:
*
* @tos:
* The @tos variable holds a pointer to the top of the irq stack and
* _must_ be allocated in a non-callee saved register as this is a
* restriction coming from objtool.
*
* Note, that (tos) is both in input and output constraints to ensure
* that the compiler does not assume that R11 is left untouched in
* case this macro is used in some place where the per cpu interrupt
* stack pointer is used again afterwards
*
* - Function arguments:
* The function argument(s), if any, have to be defined in register
* variables at the place where this is invoked. Storing the
* argument(s) in the proper register(s) is part of the @asm_call
*
* - Constraints:
*
* The constraints have to be done very carefully because the compiler
* does not know about the assembly call.
*
* output:
* As documented already above the @tos variable is required to be in
* the output constraints to make the compiler aware that R11 cannot be
* reused after the asm() statement.
*
* For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
* required as well as this prevents certain creative GCC variants from
* misplacing the ASM code.
*
* input:
* - func:
* Immediate, which tells the compiler that the function is referenced.
*
* - tos:
* Register. The actual register is defined by the variable declaration.
*
* - function arguments:
* The constraints are handed in via the 'argconstr' argument list. They
* describe the register arguments which are used in @asm_call.
*
* clobbers:
* Function calls can clobber anything except the callee-saved
* registers. Tell the compiler.
*/
#define call_on_stack(stack, func, asm_call, argconstr...) \
{ \
register void *tos asm("r11"); \
\
tos = ((void *)(stack)); \
\
asm_inline volatile( \
"movq %%rsp, (%[tos]) \n" \
"movq %[tos], %%rsp \n" \
\
asm_call \
\
"popq %%rsp \n" \
\
: "+r" (tos), ASM_CALL_CONSTRAINT \
: [__func] "i" (func), [tos] "r" (tos) argconstr \
: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
"memory" \
); \
}
#define ASM_CALL_ARG0 \
"call %c[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \
"movq %[arg1], %%rdi \n" \
ASM_CALL_ARG0
#define ASM_CALL_ARG2 \
"movq %[arg2], %%rsi \n" \
ASM_CALL_ARG1
#define ASM_CALL_ARG3 \
"movq %[arg3], %%rdx \n" \
ASM_CALL_ARG2
#define call_on_irqstack(func, asm_call, argconstr...) \
call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr), \
func, asm_call, argconstr)
/* Macros to assert type correctness for run_*_on_irqstack macros */
#define assert_function_type(func, proto) \
static_assert(__builtin_types_compatible_p(typeof(&func), proto))
#define assert_arg_type(arg, proto) \
static_assert(__builtin_types_compatible_p(typeof(arg), proto))
/*
* Macro to invoke system vector and device interrupt C handlers.
*/
#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
{ \
/* \
* User mode entry and interrupt on the irq stack do not \
* switch stacks. If from user mode the task stack is empty. \
*/ \
if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
irq_enter_rcu(); \
func(c_args); \
irq_exit_rcu(); \
} else { \
/* \
* Mark the irq stack inuse _before_ and unmark _after_ \
* switching stacks. Interrupts are disabled in both \
* places. Invoke the stack switch macro with the call \
* sequence which matches the above direct invocation. \
*/ \
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
call_on_irqstack(func, asm_call, constr); \
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
} \
}
/*
* Function call sequence for __call_on_irqstack() for system vectors.
*
* Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
* mechanism because these functions are global and cannot be optimized out
* when compiling a particular source file which uses one of these macros.
*
* The argument (regs) does not need to be pushed or stashed in a callee
* saved register to be safe vs. the irq_enter_rcu() call because the
* clobbers already prevent the compiler from storing it in a callee
* clobbered register. As the compiler has to preserve @regs for the final
* call to idtentry_exit() anyway, it's likely that it does not cause extra
* effort for this asm magic.
*/
#define ASM_CALL_SYSVEC \
"call irq_enter_rcu \n" \
ASM_CALL_ARG1 \
"call irq_exit_rcu \n"
#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
assert_function_type(func, void (*)(struct pt_regs *)); \
assert_arg_type(regs, struct pt_regs *); \
\
call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
SYSVEC_CONSTRAINTS, regs); \
}
/*
* As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
* @regs and @vector in callee saved registers.
*/
#define ASM_CALL_IRQ \
"call irq_enter_rcu \n" \
ASM_CALL_ARG2 \
"call irq_exit_rcu \n"
#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
#define run_irq_on_irqstack_cond(func, regs, vector) \
{ \
assert_function_type(func, void (*)(struct pt_regs *, u32)); \
assert_arg_type(regs, struct pt_regs *); \
assert_arg_type(vector, u32); \
\
call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
IRQ_CONSTRAINTS, regs, vector); \
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
/*
* Macro to invoke __do_softirq on the irq stack. This is only called from
* task context when bottom halves are about to be reenabled and soft
* interrupts are pending to be processed. The interrupt stack cannot be in
* use here.
*/
#define do_softirq_own_stack() \
{ \
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
}
#endif
#else /* CONFIG_X86_64 */
/* System vector handlers always run on the stack they interrupted. */
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
irq_enter_rcu(); \
func(regs); \
irq_exit_rcu(); \
}
/* Switches to the irq stack within func() */
#define run_irq_on_irqstack_cond(func, regs, vector) \
{ \
irq_enter_rcu(); \
func(regs, vector); \
irq_exit_rcu(); \
}
#endif /* !CONFIG_X86_64 */
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* KVM L1 hypervisor optimizations on Hyper-V.
*/
#ifndef __ARCH_X86_KVM_KVM_ONHYPERV_H__
#define __ARCH_X86_KVM_KVM_ONHYPERV_H__
#if IS_ENABLED(CONFIG_HYPERV)
int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
int hv_flush_remote_tlbs(struct kvm *kvm);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
static inline hpa_t hv_get_partition_assist_page(struct kvm_vcpu *vcpu)
{
/*
* Partition assist page is something which Hyper-V running in L0
* requires from KVM running in L1 before direct TLB flush for L2
* guests can be enabled. KVM doesn't currently use the page but to
* comply with TLFS it still needs to be allocated. For now, this
* is a single page shared among all vCPUs.
*/
struct hv_partition_assist_pg **p_hv_pa_pg =
&vcpu->kvm->arch.hv_pa_pg;
if (!*p_hv_pa_pg)
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
if (!*p_hv_pa_pg)
return INVALID_PAGE;
return __pa(*p_hv_pa_pg);
}
#else /* !CONFIG_HYPERV */
static inline int hv_flush_remote_tlbs(struct kvm *kvm)
{
return -EOPNOTSUPP;
}
static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
}
#endif /* !CONFIG_HYPERV */
#endif
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2016 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
/**
* struct vmw_user_simple_resource - User-space simple resource struct
*
* @base: The TTM base object implementing user-space visibility.
* @simple: The embedded struct vmw_simple_resource.
*/
struct vmw_user_simple_resource {
struct ttm_base_object base;
struct vmw_simple_resource simple;
/*
* Nothing to be placed after @simple, since size of @simple is
* unknown.
*/
};
/**
* vmw_simple_resource_init - Initialize a simple resource object.
*
* @dev_priv: Pointer to a struct device private.
* @simple: The struct vmw_simple_resource to initialize.
* @data: Data passed to the information initialization function.
* @res_free: Function pointer to destroy the simple resource.
*
* Returns:
* 0 if succeeded.
* Negative error value if error, in which case the resource will have been
* freed.
*/
static int vmw_simple_resource_init(struct vmw_private *dev_priv,
struct vmw_simple_resource *simple,
void *data,
void (*res_free)(struct vmw_resource *res))
{
struct vmw_resource *res = &simple->res;
int ret;
ret = vmw_resource_init(dev_priv, res, false, res_free,
&simple->func->res_func);
if (ret) {
res_free(res);
return ret;
}
ret = simple->func->init(res, data);
if (ret) {
vmw_resource_unreference(&res);
return ret;
}
simple->res.hw_destroy = simple->func->hw_destroy;
return 0;
}
/**
* vmw_simple_resource_free - Free a simple resource object.
*
* @res: The struct vmw_resource member of the simple resource object.
*
* Frees memory for the object.
*/
static void vmw_simple_resource_free(struct vmw_resource *res)
{
struct vmw_user_simple_resource *usimple =
container_of(res, struct vmw_user_simple_resource,
simple.res);
ttm_base_object_kfree(usimple, base);
}
/**
* vmw_simple_resource_base_release - TTM object release callback
*
* @p_base: The struct ttm_base_object member of the simple resource object.
*
* Called when the last reference to the embedded struct ttm_base_object is
* gone. Typically results in an object free, unless there are other
* references to the embedded struct vmw_resource.
*/
static void vmw_simple_resource_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_simple_resource *usimple =
container_of(base, struct vmw_user_simple_resource, base);
struct vmw_resource *res = &usimple->simple.res;
*p_base = NULL;
vmw_resource_unreference(&res);
}
/**
* vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to
* create a struct vmw_simple_resource.
*
* @dev: Pointer to a struct drm device.
* @data: Ioctl argument.
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @func: Pointer to a struct vmw_simple_resource_func identifying the
* simple resource type.
*
* Returns:
* 0 if success,
* Negative error value on error.
*/
int
vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv,
const struct vmw_simple_resource_func *func)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_simple_resource *usimple;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
size_t alloc_size;
int ret;
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
usimple = kzalloc(alloc_size, GFP_KERNEL);
if (!usimple) {
ret = -ENOMEM;
goto out_ret;
}
usimple->simple.func = func;
res = &usimple->simple.res;
usimple->base.shareable = false;
usimple->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_simple_resource_init(dev_priv, &usimple->simple,
data, vmw_simple_resource_free);
if (ret)
goto out_ret;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &usimple->base, false,
func->ttm_res_type,
&vmw_simple_resource_base_release);
if (ret) {
vmw_resource_unreference(&tmp);
goto out_err;
}
func->set_arg_handle(data, usimple->base.handle);
out_err:
vmw_resource_unreference(&res);
out_ret:
return ret;
}
/**
* vmw_simple_resource_lookup - Look up a simple resource from its user-space
* handle.
*
* @tfile: struct ttm_object_file identifying the caller.
* @handle: The user-space handle.
* @func: The struct vmw_simple_resource_func identifying the simple resource
* type.
*
* Returns: Refcounted pointer to the embedded struct vmw_resource if
* successful. Error pointer otherwise.
*/
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_simple_resource_func *func)
{
struct vmw_user_simple_resource *usimple;
struct ttm_base_object *base;
struct vmw_resource *res;
base = ttm_base_object_lookup(tfile, handle);
if (!base) {
VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n",
func->res_func.type_name,
(unsigned long) handle);
return ERR_PTR(-ESRCH);
}
if (ttm_base_object_type(base) != func->ttm_res_type) {
ttm_base_object_unref(&base);
VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n",
func->res_func.type_name,
(unsigned long) handle);
return ERR_PTR(-EINVAL);
}
usimple = container_of(base, typeof(*usimple), base);
res = vmw_resource_reference(&usimple->simple.res);
ttm_base_object_unref(&base);
return res;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <bpf/btf.h>
void test_libbpf_probe_prog_types(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_prog_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
if (!ASSERT_OK_PTR(t, "bpf_prog_type_enum"))
goto cleanup;
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
const char *prog_type_name = btf__str_by_offset(btf, e->name_off);
enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
int res;
if (prog_type == BPF_PROG_TYPE_UNSPEC)
continue;
if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0)
continue;
if (!test__start_subtest(prog_type_name))
continue;
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
ASSERT_EQ(res, 1, prog_type_name);
}
cleanup:
btf__free(btf);
}
void test_libbpf_probe_map_types(void)
{
struct btf *btf;
const struct btf_type *t;
const struct btf_enum *e;
int i, n, id;
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
if (!ASSERT_OK_PTR(btf, "btf_parse"))
return;
/* find enum bpf_map_type and enumerate each value */
id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
goto cleanup;
t = btf__type_by_id(btf, id);
if (!ASSERT_OK_PTR(t, "bpf_map_type_enum"))
goto cleanup;
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
const char *map_type_name = btf__str_by_offset(btf, e->name_off);
enum bpf_map_type map_type = (enum bpf_map_type)e->val;
int res;
if (map_type == BPF_MAP_TYPE_UNSPEC)
continue;
if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
continue;
if (!test__start_subtest(map_type_name))
continue;
res = libbpf_probe_bpf_map_type(map_type, NULL);
ASSERT_EQ(res, 1, map_type_name);
}
cleanup:
btf__free(btf);
}
void test_libbpf_probe_helpers(void)
{
#define CASE(prog, helper, supp) { \
.prog_type_name = "BPF_PROG_TYPE_" # prog, \
.helper_name = "bpf_" # helper, \
.prog_type = BPF_PROG_TYPE_ ## prog, \
.helper_id = BPF_FUNC_ ## helper, \
.supported = supp, \
}
const struct case_def {
const char *prog_type_name;
const char *helper_name;
enum bpf_prog_type prog_type;
enum bpf_func_id helper_id;
bool supported;
} cases[] = {
CASE(KPROBE, unspec, false),
CASE(KPROBE, map_lookup_elem, true),
CASE(KPROBE, loop, true),
CASE(KPROBE, ktime_get_coarse_ns, false),
CASE(SOCKET_FILTER, ktime_get_coarse_ns, true),
CASE(KPROBE, sys_bpf, false),
CASE(SYSCALL, sys_bpf, true),
};
size_t case_cnt = ARRAY_SIZE(cases), i;
char buf[128];
for (i = 0; i < case_cnt; i++) {
const struct case_def *d = &cases[i];
int res;
snprintf(buf, sizeof(buf), "%s+%s", d->prog_type_name, d->helper_name);
if (!test__start_subtest(buf))
continue;
res = libbpf_probe_bpf_helper(d->prog_type, d->helper_id, NULL);
ASSERT_EQ(res, d->supported, buf);
}
}
|
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (C) 2013-15, Intel Corporation
*/
#ifndef __LINUX_SND_SOC_ACPI_H
#define __LINUX_SND_SOC_ACPI_H
#include <linux/stddef.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
#include <linux/soundwire/sdw.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
int length; /* number of elements */
struct acpi_buffer *format;
struct acpi_buffer *state;
bool data_valid;
};
/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
#if IS_ENABLED(CONFIG_ACPI)
/* acpi match */
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
/* check all codecs */
struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg);
#else
/* acpi match */
static inline struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
{
return NULL;
}
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
{
return false;
}
/* check all codecs */
static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
{
return NULL;
}
#endif
/**
* snd_soc_acpi_mach_params: interface for machine driver configuration
*
* @acpi_ipc_irq_index: used for BYT-CR detection
* @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
* @dmic_num: number of SoC- or chipset-attached PDM digital microphones
* @link_mask: SoundWire links enabled on the board
* @links: array of SoundWire link _ADR descriptors, null terminated
* @i2s_link_mask: I2S/TDM links enabled on the board
* @num_dai_drivers: number of elements in @dai_drivers
* @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
* @subsystem_vendor: optional PCI SSID vendor value
* @subsystem_device: optional PCI SSID device value
* @subsystem_rev: optional PCI SSID revision value
* @subsystem_id_set: true if a value has been written to
* subsystem_vendor and subsystem_device.
* @bt_link_mask: BT offload link enabled on the board
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
u32 i2s_link_mask;
u32 num_dai_drivers;
struct snd_soc_dai_driver *dai_drivers;
unsigned short subsystem_vendor;
unsigned short subsystem_device;
unsigned short subsystem_rev;
bool subsystem_id_set;
u32 bt_link_mask;
};
/**
* snd_soc_acpi_endpoint - endpoint descriptor
* @num: endpoint number (mandatory, unique per device)
* @aggregated: 0 (independent) or 1 (logically grouped)
* @group_position: zero-based order (only when @aggregated is 1)
* @group_id: platform-unique group identifier (only when @aggregrated is 1)
*/
struct snd_soc_acpi_endpoint {
u8 num;
u8 aggregated;
u8 group_position;
u8 group_id;
};
/**
* snd_soc_acpi_adr_device - descriptor for _ADR-enumerated device
* @adr: 64 bit ACPI _ADR value
* @num_endpoints: number of endpoints for this device
* @endpoints: array of endpoints
* @name_prefix: string used for codec controls
*/
struct snd_soc_acpi_adr_device {
const u64 adr;
const u8 num_endpoints;
const struct snd_soc_acpi_endpoint *endpoints;
const char *name_prefix;
};
/**
* snd_soc_acpi_link_adr - ACPI-based list of _ADR enumerated devices
* @mask: one bit set indicates the link this list applies to
* @num_adr: ARRAY_SIZE of devices
* @adr_d: array of devices
*
* The number of devices per link can be more than 1, e.g. in SoundWire
* multi-drop configurations.
*/
struct snd_soc_acpi_link_adr {
const u32 mask;
const u32 num_adr;
const struct snd_soc_acpi_adr_device *adr_d;
};
/*
* when set the topology uses the -ssp<N> suffix, where N is determined based on
* BIOS or DMI information
*/
#define SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER BIT(0)
/*
* when more than one SSP is reported in the link mask, use the most significant.
* This choice was found to be valid on platforms with ES8336 codecs.
*/
#define SND_SOC_ACPI_TPLG_INTEL_SSP_MSB BIT(1)
/*
* when set the topology uses the -dmic<N>ch suffix, where N is determined based on
* BIOS or DMI information
*/
#define SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER BIT(2)
/*
* when set the speaker amplifier name suffix (i.e. "-max98360a") will be
* appended to topology file name
*/
#define SND_SOC_ACPI_TPLG_INTEL_AMP_NAME BIT(3)
/*
* when set the headphone codec name suffix (i.e. "-rt5682") will be appended to
* topology file name
*/
#define SND_SOC_ACPI_TPLG_INTEL_CODEC_NAME BIT(4)
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
* A platform supported by legacy and Sound Open Firmware (SOF) would expose
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
* @uid: ACPI Unique ID, can be used to disambiguate matches.
* @comp_ids: list of compatible audio codecs using the same machine driver,
* firmware and topology
* @link_mask: describes required board layout, e.g. for SoundWire.
* @links: array of link _ADR descriptors, null terminated.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
* @tplg_filename: topology file name. Used when SOF is not enabled.
* @board: board name
* @machine_quirk: pointer to quirk, usually based on DMI information when
* ACPI ID alone is not sufficient, wrong or misleading
* @quirk_data: data used to uniquely identify a machine, usually a list of
* audio codecs whose presence if checked with ACPI
* @machine_check: pointer to quirk function. The functionality is similar to
* the use of @machine_quirk, except that the return value is a boolean: the intent
* is to skip a machine if the additional hardware/firmware verification invalidates
* the initial selection in the snd_soc_acpi_mach table.
* @pdata: intended for platform data or machine specific-ops. This structure
* is not constant since this field may be updated at run-time
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
* @tplg_quirk_mask: quirks to select different topology files dynamically
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
u8 id[ACPI_ID_LEN];
const char *uid;
const struct snd_soc_acpi_codecs *comp_ids;
const u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
const char *drv_name;
const char *fw_filename;
const char *tplg_filename;
const char *board;
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
bool (*machine_check)(void *arg);
void *pdata;
struct snd_soc_acpi_mach_params mach_params;
const char *sof_tplg_filename;
const u32 tplg_quirk_mask;
};
#define SND_SOC_ACPI_MAX_CODECS 3
/**
* struct snd_soc_acpi_codecs: Structure to hold secondary codec information
* apart from the matched one, this data will be passed to the quirk function
* to match with the ACPI detected devices
*
* @num_codecs: number of secondary codecs used in the platform
* @codecs: holds the codec IDs
*
*/
struct snd_soc_acpi_codecs {
int num_codecs;
u8 codecs[SND_SOC_ACPI_MAX_CODECS][ACPI_ID_LEN];
};
static inline bool snd_soc_acpi_sof_parent(struct device *dev)
{
return dev->parent && dev->parent->driver && dev->parent->driver->name &&
!strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi"));
}
bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev,
const struct snd_soc_acpi_link_adr *link,
struct sdw_peripherals *peripherals);
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* rt711-sdca.h -- RT711 SDCA ALSA SoC audio driver header
*
* Copyright(c) 2021 Realtek Semiconductor Corp.
*/
#ifndef __RT711_SDCA_H__
#define __RT711_SDCA_H__
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include <sound/soc.h>
#include <linux/workqueue.h>
struct rt711_sdca_priv {
struct regmap *regmap, *mbq_regmap;
struct snd_soc_component *component;
struct sdw_slave *slave;
struct sdw_bus_params params;
bool hw_init;
bool first_hw_init;
struct snd_soc_jack *hs_jack;
struct delayed_work jack_detect_work;
struct delayed_work jack_btn_check_work;
struct mutex calibrate_mutex; /* for headset calibration */
struct mutex disable_irq_lock; /* SDCA irq lock protection */
bool disable_irq;
int jack_type, jd_src;
unsigned int scp_sdca_stat1, scp_sdca_stat2;
int hw_ver;
bool fu0f_dapm_mute, fu0f_mixer_l_mute, fu0f_mixer_r_mute;
bool fu1e_dapm_mute, fu1e_mixer_l_mute, fu1e_mixer_r_mute;
unsigned int ge_mode_override;
};
/* NID */
#define RT711_AUDIO_FUNCTION_GROUP 0x01
#define RT711_DAC_OUT2 0x03
#define RT711_ADC_IN1 0x09
#define RT711_ADC_IN2 0x08
#define RT711_DMIC1 0x12
#define RT711_DMIC2 0x13
#define RT711_MIC2 0x19
#define RT711_LINE1 0x1a
#define RT711_LINE2 0x1b
#define RT711_BEEP 0x1d
#define RT711_VENDOR_REG 0x20
#define RT711_HP_OUT 0x21
#define RT711_MIXER_IN1 0x22
#define RT711_MIXER_IN2 0x23
#define RT711_INLINE_CMD 0x55
#define RT711_VENDOR_CALI 0x58
#define RT711_VENDOR_IMS_DRE 0x5b
#define RT711_VENDOR_VAD 0x5e
#define RT711_VENDOR_ANALOG_CTL 0x5f
#define RT711_VENDOR_HDA_CTL 0x61
/* Index (NID:20h) */
#define RT711_JD_PRODUCT_NUM 0x00
#define RT711_DMIC_CTL1 0x06
#define RT711_JD_CTL1 0x08
#define RT711_JD_CTL2 0x09
#define RT711_CC_DET1 0x11
#define RT711_PARA_VERB_CTL 0x1a
#define RT711_COMBO_JACK_AUTO_CTL1 0x45
#define RT711_COMBO_JACK_AUTO_CTL2 0x46
#define RT711_COMBO_JACK_AUTO_CTL3 0x47
#define RT711_INLINE_CMD_CTL 0x48
#define RT711_DIGITAL_MISC_CTRL4 0x4a
#define RT711_JD_CTRL6 0x6a
#define RT711_VREFOUT_CTL 0x6b
#define RT711_GPIO_TEST_MODE_CTL2 0x6d
#define RT711_FSM_CTL 0x6f
#define RT711_IRQ_FLAG_TABLE1 0x80
#define RT711_IRQ_FLAG_TABLE2 0x81
#define RT711_IRQ_FLAG_TABLE3 0x82
#define RT711_HP_FSM_CTL 0x83
#define RT711_TX_RX_MUX_CTL 0x91
#define RT711_FILTER_SRC_SEL 0xb0
#define RT711_ADC27_VOL_SET 0xb7
/* Index (NID:58h) */
#define RT711_DAC_DC_CALI_CTL1 0x00
#define RT711_DAC_DC_CALI_CTL2 0x01
/* Index (NID:5bh) */
#define RT711_IMS_DIGITAL_CTL1 0x00
#define RT711_HP_IMS_RESULT_L 0x20
#define RT711_HP_IMS_RESULT_R 0x21
/* Index (NID:5eh) */
#define RT711_VAD_SRAM_CTL1 0x10
/* Index (NID:5fh) */
#define RT711_MISC_POWER_CTL0 0x01
#define RT711_MISC_POWER_CTL4 0x05
/* Index (NID:61h) */
#define RT711_HDA_LEGACY_MUX_CTL1 0x00
#define RT711_HDA_LEGACY_UNSOLICITED_CTL 0x03
#define RT711_HDA_LEGACY_CONFIG_CTL 0x06
#define RT711_HDA_LEGACY_RESET_CTL 0x08
#define RT711_HDA_LEGACY_GPIO_CTL 0x0a
#define RT711_ADC08_09_PDE_CTL 0x24
#define RT711_GE_MODE_RELATED_CTL 0x35
#define RT711_PUSH_BTN_INT_CTL0 0x36
#define RT711_PUSH_BTN_INT_CTL1 0x37
#define RT711_PUSH_BTN_INT_CTL2 0x38
#define RT711_PUSH_BTN_INT_CTL6 0x3c
#define RT711_PUSH_BTN_INT_CTL7 0x3d
#define RT711_PUSH_BTN_INT_CTL9 0x3f
/* DAC DC offset calibration control-1 (0x00)(NID:20h) */
#define RT711_DAC_DC_CALI_TRIGGER (0x1 << 15)
#define RT711_DAC_DC_CALI_CLK_EN (0x1 << 14)
#define RT711_DAC_DC_FORCE_CALI_RST (0x1 << 3)
/* jack detect control 1 (0x08)(NID:20h) */
#define RT711_JD2_DIGITAL_MODE_SEL (0x1 << 1)
/* jack detect control 2 (0x09)(NID:20h) */
#define RT711_JD2_2PORT_200K_DECODE_HP (0x1 << 13)
#define RT711_JD2_2PORT_100K_DECODE_MASK (0x1 << 12)
#define RT711_JD2_2PORT_100K_DECODE_HP (0x0 << 12)
#define RT711_HP_JD_SEL_JD1 (0x0 << 1)
#define RT711_HP_JD_SEL_JD2 (0x1 << 1)
/* CC DET1 (0x11)(NID:20h) */
#define RT711_HP_JD_FINAL_RESULT_CTL_JD12 (0x1 << 10)
#define RT711_HP_JD_FINAL_RESULT_CTL_CCDET (0x0 << 10)
#define RT711_POW_CC1_AGPI (0x1 << 5)
#define RT711_POW_CC1_AGPI_ON (0x1 << 5)
#define RT711_POW_CC1_AGPI_OFF (0x0 << 5)
/* Parameter & Verb control (0x1a)(NID:20h) */
#define RT711_HIDDEN_REG_SW_RESET (0x1 << 14)
/* combo jack auto switch control 2 (0x46)(NID:20h) */
#define RT711_COMBOJACK_AUTO_DET_STATUS (0x1 << 11)
#define RT711_COMBOJACK_AUTO_DET_TRS (0x1 << 10)
#define RT711_COMBOJACK_AUTO_DET_CTIA (0x1 << 9)
#define RT711_COMBOJACK_AUTO_DET_OMTP (0x1 << 8)
/* FSM control (0x6f)(NID:20h) */
#define RT711_CALI_CTL (0x0 << 0)
#define RT711_COMBOJACK_CTL (0x1 << 0)
#define RT711_IMS_CTL (0x2 << 0)
#define RT711_DEPOP_CTL (0x3 << 0)
#define RT711_FSM_IMP_EN (0x1 << 6)
/* Impedance Sense Digital Control 1 (0x00)(NID:5bh) */
#define RT711_TRIGGER_IMS (0x1 << 15)
#define RT711_IMS_EN (0x1 << 6)
#define RT711_EAPD_HIGH 0x2
#define RT711_EAPD_LOW 0x0
#define RT711_MUTE_SFT 7
/* set input/output mapping to payload[14][15] separately */
#define RT711_DIR_IN_SFT 6
#define RT711_DIR_OUT_SFT 7
/* RC Calibration register */
#define RT711_RC_CAL_STATUS 0x320c
/* Buffer address for HID */
#define RT711_BUF_ADDR_HID1 0x44030000
#define RT711_BUF_ADDR_HID2 0x44030020
/* RT711 SDCA Control - function number */
#define FUNC_NUM_JACK_CODEC 0x01
#define FUNC_NUM_MIC_ARRAY 0x02
#define FUNC_NUM_HID 0x03
/* RT711 SDCA entity */
#define RT711_SDCA_ENT_HID01 0x01
#define RT711_SDCA_ENT_GE49 0x49
#define RT711_SDCA_ENT_USER_FU05 0x05
#define RT711_SDCA_ENT_USER_FU0F 0x0f
#define RT711_SDCA_ENT_USER_FU1E 0x1e
#define RT711_SDCA_ENT_PLATFORM_FU15 0x15
#define RT711_SDCA_ENT_PLATFORM_FU44 0x44
#define RT711_SDCA_ENT_PDE28 0x28
#define RT711_SDCA_ENT_PDE29 0x29
#define RT711_SDCA_ENT_PDE2A 0x2a
#define RT711_SDCA_ENT_CS01 0x01
#define RT711_SDCA_ENT_CS11 0x11
#define RT711_SDCA_ENT_CS1F 0x1f
#define RT711_SDCA_ENT_OT1 0x06
#define RT711_SDCA_ENT_LINE1 0x09
#define RT711_SDCA_ENT_LINE2 0x31
#define RT711_SDCA_ENT_PDELINE2 0x36
#define RT711_SDCA_ENT_USER_FU9 0x41
/* RT711 SDCA control */
#define RT711_SDCA_CTL_SAMPLE_FREQ_INDEX 0x10
#define RT711_SDCA_CTL_FU_CH_GAIN 0x0b
#define RT711_SDCA_CTL_FU_MUTE 0x01
#define RT711_SDCA_CTL_FU_VOLUME 0x02
#define RT711_SDCA_CTL_HIDTX_CURRENT_OWNER 0x10
#define RT711_SDCA_CTL_HIDTX_SET_OWNER_TO_DEVICE 0x11
#define RT711_SDCA_CTL_HIDTX_MESSAGE_OFFSET 0x12
#define RT711_SDCA_CTL_HIDTX_MESSAGE_LENGTH 0x13
#define RT711_SDCA_CTL_SELECTED_MODE 0x01
#define RT711_SDCA_CTL_DETECTED_MODE 0x02
#define RT711_SDCA_CTL_REQ_POWER_STATE 0x01
#define RT711_SDCA_CTL_VENDOR_DEF 0x30
/* RT711 SDCA channel */
#define CH_L 0x01
#define CH_R 0x02
/* sample frequency index */
#define RT711_SDCA_RATE_44100HZ 0x08
#define RT711_SDCA_RATE_48000HZ 0x09
#define RT711_SDCA_RATE_96000HZ 0x0b
#define RT711_SDCA_RATE_192000HZ 0x0d
enum {
RT711_AIF1,
RT711_AIF2,
RT711_AIFS,
};
enum rt711_sdca_jd_src {
RT711_JD_NULL,
RT711_JD1,
RT711_JD2,
RT711_JD2_100K
};
enum rt711_sdca_ver {
RT711_VER_VD0,
RT711_VER_VD1
};
int rt711_sdca_io_init(struct device *dev, struct sdw_slave *slave);
int rt711_sdca_init(struct device *dev, struct regmap *regmap,
struct regmap *mbq_regmap, struct sdw_slave *slave);
int rt711_sdca_jack_detect(struct rt711_sdca_priv *rt711, bool *hp, bool *mic);
#endif /* __RT711_SDCA_H__ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMD SoC Power Management Controller Driver Quirks
*
* Copyright (c) 2023, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Mario Limonciello <[email protected]>
*/
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include "pmc.h"
struct quirk_entry {
u32 s2idle_bug_mmio;
bool spurious_8042;
};
static struct quirk_entry quirk_s2idle_bug = {
.s2idle_bug_mmio = 0xfed80380,
};
static struct quirk_entry quirk_spurious_8042 = {
.spurious_8042 = true,
};
static const struct dmi_system_id fwbug_list[] = {
{
.ident = "L14 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
}
},
{
.ident = "T14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
}
},
{
.ident = "X13 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
}
},
{
.ident = "T14 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
}
},
{
.ident = "T14 Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
}
},
{
.ident = "T14 Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
}
},
{
.ident = "T14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
}
},
{
.ident = "T14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
}
},
{
.ident = "P14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
}
},
{
.ident = "P14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
}
},
{
.ident = "P14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
}
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
{
.ident = "V14 G4 AMN",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
}
},
{
.ident = "V14 G4 AMN",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
}
},
{
.ident = "V15 G4 AMN",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
}
},
{
.ident = "V15 G4 AMN",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
}
},
{
.ident = "IdeaPad 1 14AMN7",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
}
},
{
.ident = "IdeaPad 1 15AMN7",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
}
},
{
.ident = "IdeaPad 1 15AMN7",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
}
},
{
.ident = "IdeaPad Slim 3 14AMN8",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
}
},
{
.ident = "IdeaPad Slim 3 15AMN8",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
}
},
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
}
},
/* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */
{
.ident = "Framework Laptop 13 (Phoenix)",
.driver_data = &quirk_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
{
.ident = "Framework Laptop 13 (Phoenix)",
.driver_data = &quirk_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
}
},
{}
};
/*
* Laptops that run a SMI handler during the D3->D0 transition that occurs
* specifically when exiting suspend to idle which can cause
* large delays during resume when the IOMMU translation layer is enabled (the default
* behavior) for NVME devices:
*
* To avoid this firmware problem, skip the SMI handler on these machines before the
* D0 transition occurs.
*/
static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio)
{
void __iomem *addr;
u8 val;
if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"))
return;
addr = ioremap(s2idle_bug_mmio, 1);
if (!addr)
goto cleanup_resource;
val = ioread8(addr);
iowrite8(val & ~BIT(0), addr);
iounmap(addr);
cleanup_resource:
release_mem_region(s2idle_bug_mmio, 1);
}
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
{
if (dev->quirks && dev->quirks->s2idle_bug_mmio)
amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio);
}
void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
{
const struct dmi_system_id *dmi_id;
if (dev->cpu_id == AMD_CPU_ID_CZN)
dev->disable_8042_wakeup = true;
dmi_id = dmi_first_match(fwbug_list);
if (!dmi_id)
return;
dev->quirks = dmi_id->driver_data;
if (dev->quirks->s2idle_bug_mmio)
pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
dmi_id->ident);
if (dev->quirks->spurious_8042)
dev->disable_8042_wakeup = true;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CLK_EXYNOS5_SUBCMU_H
#define __CLK_EXYNOS5_SUBCMU_H
struct exynos5_subcmu_reg_dump {
u32 offset;
u32 value;
u32 mask;
u32 save;
};
struct exynos5_subcmu_info {
const struct samsung_div_clock *div_clks;
unsigned int nr_div_clks;
const struct samsung_gate_clock *gate_clks;
unsigned int nr_gate_clks;
struct exynos5_subcmu_reg_dump *suspend_regs;
unsigned int nr_suspend_regs;
const char *pd_name;
};
void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
const struct exynos5_subcmu_info **cmu);
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
/*
* min()/max()/clamp() macros must accomplish three things:
*
* - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
* - Retain result as a constant expressions when called with only
* constant expressions (to avoid tripping VLA warnings in stack
* allocation usage).
* - Perform signed v unsigned type-checking (to generate compile
* errors instead of nasty runtime surprises).
* - Unsigned char/short are always promoted to signed int and can be
* compared against signed or unsigned arguments.
* - Unsigned arguments can be compared against non-negative signed constants.
* - Comparison of a signed argument against an unsigned constant fails
* even if the constant is below __INT_MAX__ and could be cast to int.
*/
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
/*
* __sign_use for integer expressions:
* bit #0 set if ok for unsigned comparisons
* bit #1 set if ok for signed comparisons
*
* In particular, statically non-negative signed integer
* expressions are ok for both.
*
* NOTE! Unsigned types smaller than 'int' are implicitly
* converted to 'int' in expressions, and are accepted for
* signed conversions for now. This is debatable.
*
* Note that 'x' is the original expression, and 'ux' is
* the unique variable that contains the value.
*
* We use 'ux' for pure type checking, and 'x' for when
* we need to look at the value (but without evaluating
* it for side effects! Careful to only ever evaluate it
* with sizeof() or __builtin_constant_p() etc).
*
* Pointers end up being checked by the normal C type
* rules at the actual comparison, and these expressions
* only need to be careful to not cause warnings for
* pointer use.
*/
#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
__signed_type_use(x,ux):__unsigned_type_use(x,ux))
/*
* To avoid warnings about casting pointers to integers
* of different sizes, we need that special sign type.
*
* On 64-bit we can just always use 'long', since any
* integer or pointer type can just be cast to that.
*
* This does not work for 128-bit signed integers since
* the cast would truncate them, but we do not use s128
* types in the kernel (we do use 'u128', but they will
* be handled by the !is_signed_type() case).
*
* NOTE! The cast is there only to avoid any warnings
* from when values that aren't signed integer types.
*/
#ifdef CONFIG_64BIT
#define __signed_type(ux) long
#else
#define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
#endif
#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
#define __types_ok(x,y,ux,uy) \
(__sign_use(x,ux) & __sign_use(y,uy))
#define __types_ok3(x,y,z,ux,uy,uz) \
(__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
#define __cmp_op_min <
#define __cmp_op_max >
#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
#define __cmp_once_unique(op, type, x, y, ux, uy) \
({ type ux = (x); type uy = (y); __cmp(op, ux, uy); })
#define __cmp_once(op, type, x, y) \
__cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
#define __careful_cmp_once(op, x, y, ux, uy) ({ \
__auto_type ux = (x); __auto_type uy = (y); \
BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \
#op"("#x", "#y") signedness error"); \
__cmp(op, ux, uy); })
#define __careful_cmp(op, x, y) \
__careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \
__auto_type uval = (val); \
__auto_type ulo = (lo); \
__auto_type uhi = (hi); \
static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
(lo) <= (hi), true), \
"clamp() low limit " #lo " greater than high limit " #hi); \
BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \
"clamp("#val", "#lo", "#hi") signedness error"); \
__clamp(uval, ulo, uhi); })
#define __careful_clamp(val, lo, hi) \
__clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
#define min(x, y) __careful_cmp(min, x, y)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
#define max(x, y) __careful_cmp(max, x, y)
/**
* umin - return minimum of two non-negative values
* Signed types are zero extended to match a larger unsigned type.
* @x: first value
* @y: second value
*/
#define umin(x, y) \
__careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
/**
* umax - return maximum of two non-negative values
* @x: first value
* @y: second value
*/
#define umax(x, y) \
__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
__auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \
#op"3("#x", "#y", "#z") signedness error"); \
__cmp(op, ux, __cmp(op, uy, uz)); })
/**
* min3 - return minimum of three values
* @x: first value
* @y: second value
* @z: third value
*/
#define min3(x, y, z) \
__careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* max3 - return maximum of three values
* @x: first value
* @y: second value
* @z: third value
*/
#define max3(x, y, z) \
__careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
* @y: value2
*/
#define min_not_zero(x, y) ({ \
typeof(x) __x = (x); \
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
/**
* clamp - return a value clamped to a given range with strict typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
* This macro does strict typechecking of @lo/@hi to make sure they are of the
* same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
/*
* ..and if you can't take the strict
* types, you can specify one yourself.
*
* Or not use min/max/clamp at all, of course.
*/
/**
* min_t - return minimum of two values, using the specified type
* @type: data type to use
* @x: first value
* @y: second value
*/
#define min_t(type, x, y) __cmp_once(min, type, x, y)
/**
* max_t - return maximum of two values, using the specified type
* @type: data type to use
* @x: first value
* @y: second value
*/
#define max_t(type, x, y) __cmp_once(max, type, x, y)
/*
* Do not check the array parameter using __must_be_array().
* In the following legit use-case where the "array" passed is a simple pointer,
* __must_be_array() will return a failure.
* --- 8< ---
* int *buff
* ...
* min = min_array(buff, nb_items);
* --- 8< ---
*
* The first typeof(&(array)[0]) is needed in order to support arrays of both
* 'int *buff' and 'int buff[N]' types.
*
* The array can be an array of const items.
* typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
* to discard the const qualifier for the __element variable.
*/
#define __minmax_array(op, array, len) ({ \
typeof(&(array)[0]) __array = (array); \
typeof(len) __len = (len); \
__unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
while (__len--) \
__element = op(__element, __array[__len]); \
__element; })
/**
* min_array - return minimum of values present in an array
* @array: array
* @len: array length
*
* Note that @len must not be zero (empty array).
*/
#define min_array(array, len) __minmax_array(min, array, len)
/**
* max_array - return maximum of values present in an array
* @array: array
* @len: array length
*
* Note that @len must not be zero (empty array).
*/
#define max_array(array, len) __minmax_array(max, array, len)
/**
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument @val is. This is useful when @val is an unsigned
* type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
static inline bool in_range64(u64 val, u64 start, u64 len)
{
return (val - start) < len;
}
static inline bool in_range32(u32 val, u32 start, u32 len)
{
return (val - start) < len;
}
/**
* in_range - Determine if a value lies within a range.
* @val: Value to test.
* @start: First value in range.
* @len: Number of values in range.
*
* This is more efficient than "if (start <= val && val < (start + len))".
* It also gives a different answer if @start + @len overflows the size of
* the type by a sufficient amount to encompass @val. Decide for yourself
* which behaviour you want, or prove that start + len never overflow.
* Do not blindly replace one form with the other.
*/
#define in_range(val, start, len) \
((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
in_range32(val, start, len) : in_range64(val, start, len))
/**
* swap - swap values of @a and @b
* @a: first value
* @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
/*
* Use these carefully: no type checking, and uses the arguments
* multiple times. Use for obvious constants only.
*/
#define MIN(a,b) __cmp(min,a,b)
#define MAX(a,b) __cmp(max,a,b)
#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
#endif /* _LINUX_MINMAX_H */
|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Apple iPhone 5s (GSM), N51, iPhone6,1 (A1453/A1533)
* Copyright (c) 2022, Konrad Dybcio <[email protected]>
*/
/dts-v1/;
#include "s5l8960x-5s.dtsi"
/ {
compatible = "apple,n51", "apple,s5l8960x", "apple,arm-platform";
model = "Apple iPhone 5s (GSM)";
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors: Joonyoung Shim <[email protected]>
*/
#include <linux/refcount.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <drm/drm_file.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
/* vaild register range set from user: 0x0104 ~ 0x0880 */
#define G2D_VALID_START 0x0104
#define G2D_VALID_END 0x0880
/* general registers */
#define G2D_SOFT_RESET 0x0000
#define G2D_INTEN 0x0004
#define G2D_INTC_PEND 0x000C
#define G2D_DMA_SFR_BASE_ADDR 0x0080
#define G2D_DMA_COMMAND 0x0084
#define G2D_DMA_STATUS 0x008C
#define G2D_DMA_HOLD_CMD 0x0090
/* command registers */
#define G2D_BITBLT_START 0x0100
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
#define G2D_SRC_STRIDE 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
#define G2D_DST_STRIDE 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
#define G2D_PAT_BASE_ADDR 0x0500
#define G2D_MSK_BASE_ADDR 0x0520
/* G2D_SOFT_RESET */
#define G2D_SFRCLEAR (1 << 1)
#define G2D_R (1 << 0)
/* G2D_INTEN */
#define G2D_INTEN_ACF (1 << 3)
#define G2D_INTEN_UCF (1 << 2)
#define G2D_INTEN_GCF (1 << 1)
#define G2D_INTEN_SCF (1 << 0)
/* G2D_INTC_PEND */
#define G2D_INTP_ACMD_FIN (1 << 3)
#define G2D_INTP_UCMD_FIN (1 << 2)
#define G2D_INTP_GCMD_FIN (1 << 1)
#define G2D_INTP_SCMD_FIN (1 << 0)
/* G2D_DMA_COMMAND */
#define G2D_DMA_HALT (1 << 2)
#define G2D_DMA_CONTINUE (1 << 1)
#define G2D_DMA_START (1 << 0)
/* G2D_DMA_STATUS */
#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
#define G2D_DMA_DONE (1 << 0)
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
/* G2D_DMA_HOLD_CMD */
#define G2D_USER_HOLD (1 << 2)
#define G2D_LIST_HOLD (1 << 1)
#define G2D_BITBLT_HOLD (1 << 0)
/* G2D_BITBLT_START */
#define G2D_START_CASESEL (1 << 2)
#define G2D_START_NHOLT (1 << 1)
#define G2D_START_BITBLT (1 << 0)
/* buffer color format */
#define G2D_FMT_XRGB8888 0
#define G2D_FMT_ARGB8888 1
#define G2D_FMT_RGB565 2
#define G2D_FMT_XRGB1555 3
#define G2D_FMT_ARGB1555 4
#define G2D_FMT_XRGB4444 5
#define G2D_FMT_ARGB4444 6
#define G2D_FMT_PACKED_RGB888 7
#define G2D_FMT_A8 11
#define G2D_FMT_L8 12
/* buffer valid length */
#define G2D_LEN_MIN 1
#define G2D_LEN_MAX 8000
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
#define G2D_CMDLIST_NUM 64
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
enum {
BUF_TYPE_GEM = 1,
BUF_TYPE_USERPTR,
};
enum g2d_reg_type {
REG_TYPE_NONE = -1,
REG_TYPE_SRC,
REG_TYPE_SRC_PLANE2,
REG_TYPE_DST,
REG_TYPE_DST_PLANE2,
REG_TYPE_PAT,
REG_TYPE_MSK,
MAX_REG_TYPE_NR
};
enum g2d_flag_bits {
/*
* If set, suspends the runqueue worker after the currently
* processed node is finished.
*/
G2D_BIT_SUSPEND_RUNQUEUE,
/*
* If set, indicates that the engine is currently busy.
*/
G2D_BIT_ENGINE_BUSY,
};
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
unsigned long data[G2D_CMDLIST_DATA_NUM];
u32 last; /* last data offset */
};
/*
* A structure of buffer description
*
* @format: color format
* @stride: buffer stride/pitch in bytes
* @left_x: the x coordinates of left top corner
* @top_y: the y coordinates of left top corner
* @right_x: the x coordinates of right bottom corner
* @bottom_y: the y coordinates of right bottom corner
*
*/
struct g2d_buf_desc {
unsigned int format;
unsigned int stride;
unsigned int left_x;
unsigned int top_y;
unsigned int right_x;
unsigned int bottom_y;
};
/*
* A structure of buffer information
*
* @map_nr: manages the number of mapped buffers
* @reg_types: stores regitster type in the order of requested command
* @handles: stores buffer handle in its reg_type position
* @types: stores buffer type in its reg_type position
* @descs: stores buffer description in its reg_type position
*
*/
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
struct drm_exynos_pending_g2d_event {
struct drm_pending_event base;
struct drm_exynos_g2d_event event;
};
struct g2d_cmdlist_userptr {
struct list_head list;
dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
refcount_t refcount;
bool in_pool;
bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
dma_addr_t dma_addr;
struct g2d_buf_info buf_info;
struct drm_exynos_pending_g2d_event *event;
};
struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
};
struct g2d_data {
struct device *dev;
void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
struct g2d_cmdlist_node *cmdlist_node;
struct list_head free_cmdlist;
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
unsigned long cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
unsigned long current_pool;
unsigned long max_pool;
};
static inline void g2d_hw_reset(struct g2d_data *g2d)
{
writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
}
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
}
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
ret = -ENOMEM;
goto err;
}
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
unsigned int i;
node[nr].cmdlist =
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
node[nr].dma_addr =
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
buf_info = &node[nr].buf_info;
for (i = 0; i < MAX_REG_TYPE_NR; i++)
buf_info->reg_types[i] = REG_TYPE_NONE;
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
}
return 0;
err:
dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
}
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node;
mutex_lock(&g2d->cmdlist_mutex);
if (list_empty(&g2d->free_cmdlist)) {
dev_err(dev, "there is no free cmdlist\n");
mutex_unlock(&g2d->cmdlist_mutex);
return NULL;
}
node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
list);
list_del_init(&node->list);
mutex_unlock(&g2d->cmdlist_mutex);
return node;
}
static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
{
mutex_lock(&g2d->cmdlist_mutex);
list_move_tail(&node->list, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
}
static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
list_add_tail(&node->event->base.link, &file_priv->event_list);
}
static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
void *obj,
bool force)
{
struct g2d_cmdlist_userptr *g2d_userptr = obj;
if (!obj)
return;
if (force)
goto out;
refcount_dec(&g2d_userptr->refcount);
if (refcount_read(&g2d_userptr->refcount) > 0)
return;
if (g2d_userptr->in_pool)
return;
out:
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0);
unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
true);
kvfree(g2d_userptr->pages);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt);
kfree(g2d_userptr);
}
static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
int ret;
if (!size) {
DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
/* check if userptr already exists in userptr_list. */
list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
* and different size.
*/
if (g2d_userptr->size == size) {
refcount_inc(&g2d_userptr->refcount);
*obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
/*
* at this moment, maybe g2d dma is accessing this
* g2d_userptr memory region so just remove this
* g2d_userptr object from userptr_list not to be
* referred again and also except it the userptr
* pool to be released after the dma access completion.
*/
g2d_userptr->out_of_list = true;
g2d_userptr->in_pool = false;
list_del_init(&g2d_userptr->list);
break;
}
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
if (!g2d_userptr)
return ERR_PTR(-ENOMEM);
refcount_set(&g2d_userptr->refcount, 1);
g2d_userptr->size = size;
start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
GFP_KERNEL);
if (!g2d_userptr->pages) {
ret = -ENOMEM;
goto err_free;
}
ret = pin_user_pages_fast(start, npages,
FOLL_WRITE | FOLL_LONGTERM,
g2d_userptr->pages);
if (ret != npages) {
DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n");
if (ret < 0)
goto err_destroy_pages;
npages = ret;
ret = -EFAULT;
goto err_unpin_pages;
}
g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
ret = -ENOMEM;
goto err_unpin_pages;
}
ret = sg_alloc_table_from_pages(sgt,
g2d_userptr->pages,
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
goto err_free_sgt;
}
g2d_userptr->sgt = sgt;
ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
DMA_BIDIRECTIONAL, 0);
if (ret) {
DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
goto err_sg_free_table;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
*obj = g2d_userptr;
return &g2d_userptr->dma_addr;
err_sg_free_table:
sg_free_table(sgt);
err_free_sgt:
kfree(sgt);
err_unpin_pages:
unpin_user_pages(g2d_userptr->pages, npages);
err_destroy_pages:
kvfree(g2d_userptr->pages);
err_free:
kfree(g2d_userptr);
return ERR_PTR(ret);
}
static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
{
enum g2d_reg_type reg_type;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_STRIDE:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
reg_type = REG_TYPE_SRC;
break;
case G2D_SRC_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
case G2D_DST_STRIDE:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
reg_type = REG_TYPE_DST;
break;
case G2D_DST_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_DST_PLANE2;
break;
case G2D_PAT_BASE_ADDR:
reg_type = REG_TYPE_PAT;
break;
case G2D_MSK_BASE_ADDR:
reg_type = REG_TYPE_MSK;
break;
default:
reg_type = REG_TYPE_NONE;
DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
reg_offset);
break;
}
return reg_type;
}
static unsigned long g2d_get_buf_bpp(unsigned int format)
{
unsigned long bpp;
switch (format) {
case G2D_FMT_XRGB8888:
case G2D_FMT_ARGB8888:
bpp = 4;
break;
case G2D_FMT_RGB565:
case G2D_FMT_XRGB1555:
case G2D_FMT_ARGB1555:
case G2D_FMT_XRGB4444:
case G2D_FMT_ARGB4444:
bpp = 2;
break;
case G2D_FMT_PACKED_RGB888:
bpp = 3;
break;
default:
bpp = 1;
break;
}
return bpp;
}
static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
struct g2d_buf_desc *buf_desc,
enum g2d_reg_type reg_type,
unsigned long size)
{
int width, height;
unsigned long bpp, last_pos;
/*
* check source and destination buffers only.
* so the others are always valid.
*/
if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
return true;
/* This check also makes sure that right_x > left_x. */
width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
return false;
}
/* This check also makes sure that bottom_y > top_y. */
height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
DRM_DEV_ERROR(g2d->dev,
"height[%d] is out of range!\n", height);
return false;
}
bpp = g2d_get_buf_bpp(buf_desc->format);
/* Compute the position of the last byte that the engine accesses. */
last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
(unsigned long)buf_desc->stride +
(unsigned long)buf_desc->right_x * bpp - 1;
/*
* Since right_x > left_x and bottom_y > top_y we already know
* that the first_pos < last_pos (first_pos being the position
* of the first byte the engine accesses), it just remains to
* check if last_pos is smaller then the buffer size.
*/
if (last_pos >= size) {
DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
"is out of range [%lu]!\n", last_pos, size);
return false;
}
return true;
}
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
struct g2d_buf_info *buf_info = &node->buf_info;
int offset;
int ret;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
int reg_pos;
unsigned long handle;
dma_addr_t *addr;
reg_pos = cmdlist->last - 2 * (i + 1);
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
reg_type = g2d_get_reg_type(g2d, offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
}
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
struct exynos_drm_gem *exynos_gem;
exynos_gem = exynos_drm_gem_get(file, handle);
if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type, exynos_gem->size)) {
exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
addr = &exynos_gem->dma_addr;
buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type,
g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
&buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
}
}
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
}
return 0;
err:
buf_info->map_nr = i;
return ret;
}
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
exynos_drm_gem_put(obj);
else
g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
buf_info->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
{
struct g2d_runqueue_node *runqueue_node;
if (list_empty(&g2d->runqueue))
return NULL;
runqueue_node = list_first_entry(&g2d->runqueue,
struct g2d_runqueue_node, list);
list_del_init(&runqueue_node->list);
return runqueue_node;
}
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
struct g2d_cmdlist_node *node;
mutex_lock(&g2d->cmdlist_mutex);
/*
* commands in run_cmdlist have been completed so unmap all gem
* objects in each command node so that they are unreferenced.
*/
list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
/**
* g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
* @g2d: G2D state object
* @file: if not zero, only remove items with this DRM file
*
* Has to be called under runqueue lock.
*/
static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
{
struct g2d_runqueue_node *node, *n;
if (list_empty(&g2d->runqueue))
return;
list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
if (file && node->filp != file)
continue;
list_del_init(&node->list);
g2d_free_runqueue_node(g2d, node);
}
}
static void g2d_runqueue_worker(struct work_struct *work)
{
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
struct g2d_runqueue_node *runqueue_node;
/*
* The engine is busy and the completion of the current node is going
* to poke the runqueue worker, so nothing to do here.
*/
if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
return;
mutex_lock(&g2d->runqueue_mutex);
runqueue_node = g2d->runqueue_node;
g2d->runqueue_node = NULL;
if (runqueue_node) {
pm_runtime_mark_last_busy(g2d->dev);
pm_runtime_put_autosuspend(g2d->dev);
complete(&runqueue_node->complete);
if (runqueue_node->async)
g2d_free_runqueue_node(g2d, runqueue_node);
}
if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
if (g2d->runqueue_node) {
int ret;
ret = pm_runtime_resume_and_get(g2d->dev);
if (ret < 0) {
dev_err(g2d->dev, "failed to enable G2D device.\n");
goto out;
}
g2d_dma_start(g2d, g2d->runqueue_node);
}
}
out:
mutex_unlock(&g2d->runqueue_mutex);
}
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
if (list_empty(&runqueue_node->event_list))
return;
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base);
}
static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
{
struct g2d_data *g2d = dev_id;
u32 pending;
pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
if (pending)
writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
if (pending & G2D_INTP_GCMD_FIN) {
u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
G2D_DMA_LIST_DONE_COUNT_OFFSET;
g2d_finish_event(g2d, cmdlist_no);
writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
if (!(pending & G2D_INTP_ACMD_FIN)) {
writel_relaxed(G2D_DMA_CONTINUE,
g2d->regs + G2D_DMA_COMMAND);
}
}
if (pending & G2D_INTP_ACMD_FIN) {
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
}
return IRQ_HANDLED;
}
/**
* g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
* @g2d: G2D state object
* @file: if not zero, only wait if the current runqueue node belongs
* to the DRM file
*
* Should the engine not become idle after a 100ms timeout, a hardware
* reset is issued.
*/
static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
{
struct device *dev = g2d->dev;
struct g2d_runqueue_node *runqueue_node = NULL;
unsigned int tries = 10;
mutex_lock(&g2d->runqueue_mutex);
/* If no node is currently processed, we have nothing to do. */
if (!g2d->runqueue_node)
goto out;
runqueue_node = g2d->runqueue_node;
/* Check if the currently processed item belongs to us. */
if (file && runqueue_node->filp != file)
goto out;
mutex_unlock(&g2d->runqueue_mutex);
/* Wait for the G2D engine to finish. */
while (tries-- && (g2d->runqueue_node == runqueue_node))
mdelay(10);
mutex_lock(&g2d->runqueue_mutex);
if (g2d->runqueue_node != runqueue_node)
goto out;
dev_err(dev, "wait timed out, resetting engine...\n");
g2d_hw_reset(g2d);
/*
* After the hardware reset of the engine we are going to loose
* the IRQ which triggers the PM runtime put().
* So do this manually here.
*/
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
complete(&runqueue_node->complete);
if (runqueue_node->async)
g2d_free_runqueue_node(g2d, runqueue_node);
out:
mutex_unlock(&g2d->runqueue_mutex);
}
static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
struct g2d_buf_info *buf_info = &node->buf_info;
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
unsigned long value;
index = cmdlist->last - 2 * (i + 1);
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
goto err;
if (reg_offset % 4)
goto err;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_PLANE2_BASE_ADDR:
case G2D_DST_BASE_ADDR:
case G2D_DST_PLANE2_BASE_ADDR:
case G2D_PAT_BASE_ADDR:
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
buf_info->types[reg_type] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
case G2D_SRC_STRIDE:
case G2D_DST_STRIDE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
buf_desc->stride = cmdlist->data[index + 1];
break;
case G2D_SRC_COLOR_MODE:
case G2D_DST_COLOR_MODE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->format = value & 0xf;
break;
case G2D_SRC_LEFT_TOP:
case G2D_DST_LEFT_TOP:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->left_x = value & 0x1fff;
buf_desc->top_y = (value & 0x1fff0000) >> 16;
break;
case G2D_SRC_RIGHT_BOTTOM:
case G2D_DST_RIGHT_BOTTOM:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->right_x = value & 0x1fff;
buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
break;
default:
if (for_addr)
goto err;
break;
}
}
return 0;
err:
dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
/* ioctl functions */
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_g2d_get_ver *ver = data;
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
return 0;
}
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
struct g2d_cmdlist_node *node;
struct g2d_cmdlist *cmdlist;
int size;
int ret;
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
/*
* To avoid an integer overflow for the later size computations, we
* enforce a maximum number of submitted commands here. This limit is
* sufficient for all conceivable usage cases of the G2D.
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
ret = -ENOMEM;
goto err;
}
e->event.base.type = DRM_EXYNOS_G2D_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = req->user_data;
ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
if (ret) {
kfree(e);
goto err;
}
node->event = e;
}
cmdlist = node->cmdlist;
cmdlist->last = 0;
/*
* If don't clear SFR registers, the cmdlist is affected by register
* values of previous cmdlist. G2D hw executes SFR clear command and
* a next command at the same time then the next command is ignored and
* is executed rightly from next next command, so needs a dummy command
* to next command of SFR clear command.
*/
cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
cmdlist->data[cmdlist->last++] = 0;
/*
* 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
* and GCF bit should be set to INTEN register if user wants
* G2D interrupt event once current command list execution is
* finished.
* Otherwise only ACF bit should be set to INTEN register so
* that one interrupt is occurred after all command lists
* have been completed.
*/
if (node->event) {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
} else {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
/*
* Check the size of cmdlist. The 2 that is added last comes from
* the implicit G2D_BITBLT_START that is appended once we have
* checked all the submitted commands.
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd,
sizeof(*cmd) * req->cmd_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_nr * 2;
ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
node->buf_info.map_nr = req->cmd_buf_nr;
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;
cmd_buf = (struct drm_exynos_g2d_cmd *)
(unsigned long)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_buf,
sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
cmdlist->last += req->cmd_buf_nr * 2;
ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
/* head */
cmdlist->head = cmdlist->last / 2;
/* tail */
cmdlist->data[cmdlist->last] = 0;
g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
err_unmap:
g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event)
drm_event_cancel_free(drm_dev, &node->event->base);
err:
g2d_put_cmdlist(g2d, node);
return ret;
}
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
INIT_LIST_HEAD(event_list);
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
mutex_unlock(&g2d->runqueue_mutex);
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);
g2d_free_runqueue_node(g2d, runqueue_node);
out:
return 0;
}
int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
INIT_LIST_HEAD(&file_priv->event_list);
INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
if (!priv->g2d_dev)
return;
g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
g2d_remove_runqueue_nodes(g2d, file);
mutex_unlock(&g2d->runqueue_mutex);
/*
* Wait for the runqueue worker to finish its current node.
* After this the engine should no longer be accessing any
* memory belonging to us.
*/
g2d_wait_finish(g2d, file);
/*
* Even after the engine is idle, there might still be stale cmdlists
* (i.e. cmdlisst which we submitted but never executed) around, with
* their corresponding GEM/userptr buffers.
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
g2d_userptr_free_all(g2d, file);
}
static int g2d_bind(struct device *dev, struct device *master, void *data)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
g2d->drm_dev = drm_dev;
/* allocate dma-aware cmdlist buffer. */
ret = g2d_init_cmdlist(g2d);
if (ret < 0) {
dev_err(dev, "cmdlist init failed\n");
return ret;
}
ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
return ret;
}
priv->g2d_dev = dev;
dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
return 0;
}
static void g2d_unbind(struct device *dev, struct device *master, void *data)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_private *priv = drm_dev->dev_private;
/* Suspend operation and wait for engine idle. */
set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
g2d_wait_finish(g2d, NULL);
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {
.bind = g2d_bind,
.unbind = g2d_unbind,
};
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct g2d_data *g2d;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
if (!g2d)
return -ENOMEM;
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
if (!g2d->runqueue_slab)
return -ENOMEM;
g2d->dev = dev;
g2d->g2d_workq = create_singlethread_workqueue("g2d");
if (!g2d->g2d_workq) {
dev_err(dev, "failed to create workqueue\n");
ret = -EINVAL;
goto err_destroy_slab;
}
INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
INIT_LIST_HEAD(&g2d->free_cmdlist);
INIT_LIST_HEAD(&g2d->runqueue);
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
goto err_destroy_workqueue;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_enable(dev);
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
g2d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs);
goto err_put_clk;
}
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
ret = g2d->irq;
goto err_put_clk;
}
ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
"drm_g2d", g2d);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
goto err_put_clk;
}
g2d->max_pool = MAX_POOL;
platform_set_drvdata(pdev, g2d);
ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
return 0;
err_put_clk:
pm_runtime_disable(dev);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab);
return ret;
}
static void g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
}
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
/*
* Suspend the runqueue worker operation and wait until the G2D
* engine is idle.
*/
set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
g2d_wait_finish(g2d, NULL);
flush_work(&g2d->runqueue_work);
return 0;
}
static int g2d_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
return 0;
}
static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
clk_disable_unprepare(g2d->gate_clk);
return 0;
}
static int g2d_runtime_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(g2d->gate_clk);
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
return ret;
}
static const struct dev_pm_ops g2d_pm_ops = {
SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{ .compatible = "samsung,exynos4212-g2d" },
{},
};
MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
.remove = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
};
|
/*
* Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2017, Dave Watson <[email protected]>. All rights reserved.
* Copyright (c) 2016-2017, Lance Chao <[email protected]>. All rights reserved.
* Copyright (c) 2016, Fridolin Pokorny <[email protected]>. All rights reserved.
* Copyright (c) 2016, Nikos Mavrogiannopoulos <[email protected]>. All rights reserved.
* Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bug.h>
#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/splice.h>
#include <crypto/aead.h>
#include <net/strparser.h>
#include <net/tls.h>
#include <trace/events/sock.h>
#include "tls.h"
struct tls_decrypt_arg {
struct_group(inargs,
bool zc;
bool async;
bool async_done;
u8 tail;
);
struct sk_buff *skb;
};
struct tls_decrypt_ctx {
struct sock *sk;
u8 iv[TLS_MAX_IV_SIZE];
u8 aad[TLS_MAX_AAD_SIZE];
u8 tail;
bool free_sgout;
struct scatterlist sg[];
};
noinline void tls_err_abort(struct sock *sk, int err)
{
WARN_ON_ONCE(err >= 0);
/* sk->sk_err should contain a positive error code. */
WRITE_ONCE(sk->sk_err, -err);
/* Paired with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(sk);
}
static int __skb_nsg(struct sk_buff *skb, int offset, int len,
unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, chunk = start - offset;
struct sk_buff *frag_iter;
int elt = 0;
if (unlikely(recursion_level >= 24))
return -EMSGSIZE;
if (chunk > 0) {
if (chunk > len)
chunk = len;
elt++;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
chunk = end - offset;
if (chunk > 0) {
if (chunk > len)
chunk = len;
elt++;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
start = end;
}
if (unlikely(skb_has_frag_list(skb))) {
skb_walk_frags(skb, frag_iter) {
int end, ret;
WARN_ON(start > offset + len);
end = start + frag_iter->len;
chunk = end - offset;
if (chunk > 0) {
if (chunk > len)
chunk = len;
ret = __skb_nsg(frag_iter, offset - start, chunk,
recursion_level + 1);
if (unlikely(ret < 0))
return ret;
elt += ret;
len -= chunk;
if (len == 0)
return elt;
offset += chunk;
}
start = end;
}
}
BUG_ON(len);
return elt;
}
/* Return the number of scatterlist elements required to completely map the
* skb, or -EMSGSIZE if the recursion depth is exceeded.
*/
static int skb_nsg(struct sk_buff *skb, int offset, int len)
{
return __skb_nsg(skb, offset, len, 0);
}
static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
struct tls_decrypt_arg *darg)
{
struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb);
int sub = 0;
/* Determine zero-padding length */
if (prot->version == TLS_1_3_VERSION) {
int offset = rxm->full_len - TLS_TAG_SIZE - 1;
char content_type = darg->zc ? darg->tail : 0;
int err;
while (content_type == 0) {
if (offset < prot->prepend_size)
return -EBADMSG;
err = skb_copy_bits(skb, rxm->offset + offset,
&content_type, 1);
if (err)
return err;
if (content_type)
break;
sub++;
offset--;
}
tlm->control = content_type;
}
return sub;
}
static void tls_decrypt_done(void *data, int err)
{
struct aead_request *aead_req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
struct scatterlist *sgout = aead_req->dst;
struct tls_sw_context_rx *ctx;
struct tls_decrypt_ctx *dctx;
struct tls_context *tls_ctx;
struct scatterlist *sg;
unsigned int pages;
struct sock *sk;
int aead_size;
/* If requests get too backlogged crypto API returns -EBUSY and calls
* ->complete(-EINPROGRESS) immediately followed by ->complete(0)
* to make waiting for backlog to flush with crypto_wait_req() easier.
* First wait converts -EBUSY -> -EINPROGRESS, and the second one
* -EINPROGRESS -> 0.
* We have a single struct crypto_async_request per direction, this
* scheme doesn't help us, so just ignore the first ->complete().
*/
if (err == -EINPROGRESS)
return;
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
dctx = (void *)((u8 *)aead_req + aead_size);
sk = dctx->sk;
tls_ctx = tls_get_ctx(sk);
ctx = tls_sw_ctx_rx(tls_ctx);
/* Propagate if there was an err */
if (err) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
tls_err_abort(sk, err);
}
/* Free the destination pages if skb was not decrypted inplace */
if (dctx->free_sgout) {
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
break;
put_page(sg_page(sg));
}
}
kfree(aead_req);
if (atomic_dec_and_test(&ctx->decrypt_pending))
complete(&ctx->async_wait.completion);
}
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
{
if (!atomic_dec_and_test(&ctx->decrypt_pending))
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
atomic_inc(&ctx->decrypt_pending);
return ctx->async_wait.err;
}
static int tls_do_decryption(struct sock *sk,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
struct aead_request *aead_req,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
int ret;
aead_request_set_tfm(aead_req, ctx->aead_recv);
aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + prot->tag_size,
(u8 *)iv_recv);
if (darg->async) {
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, aead_req);
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
atomic_inc(&ctx->decrypt_pending);
} else {
DECLARE_CRYPTO_WAIT(wait);
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS || ret == -EBUSY)
ret = crypto_wait_req(ret, &wait);
return ret;
}
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS)
return 0;
if (ret == -EBUSY) {
ret = tls_decrypt_async_wait(ctx);
darg->async_done = true;
/* all completions have run, we're not doing async anymore */
darg->async = false;
return ret;
}
atomic_dec(&ctx->decrypt_pending);
darg->async = false;
return ret;
}
static void tls_trim_both_msgs(struct sock *sk, int target_size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
sk_msg_trim(sk, &rec->msg_plaintext, target_size);
if (target_size > 0)
target_size += prot->overhead_size;
sk_msg_trim(sk, &rec->msg_encrypted, target_size);
}
static int tls_alloc_encrypted_msg(struct sock *sk, int len)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_en = &rec->msg_encrypted;
return sk_msg_alloc(sk, msg_en, len, 0);
}
static int tls_clone_plaintext_msg(struct sock *sk, int required)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_pl = &rec->msg_plaintext;
struct sk_msg *msg_en = &rec->msg_encrypted;
int skip, len;
/* We add page references worth len bytes from encrypted sg
* at the end of plaintext sg. It is guaranteed that msg_en
* has enough required room (ensured by caller).
*/
len = required - msg_pl->sg.size;
/* Skip initial bytes in msg_en's data to be able to use
* same offset of both plain and encrypted data.
*/
skip = prot->prepend_size + msg_pl->sg.size;
return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
}
static struct tls_rec *tls_get_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
int mem_size;
mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
rec = kzalloc(mem_size, sk->sk_allocation);
if (!rec)
return NULL;
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
sk_msg_init(msg_pl);
sk_msg_init(msg_en);
sg_init_table(rec->sg_aead_in, 2);
sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_in[1]);
sg_init_table(rec->sg_aead_out, 2);
sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
sg_unmark_end(&rec->sg_aead_out[1]);
rec->sk = sk;
return rec;
}
static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
{
sk_msg_free(sk, &rec->msg_encrypted);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
static void tls_free_open_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
if (rec) {
tls_free_rec(sk, rec);
ctx->open_rec = NULL;
}
}
int tls_tx_records(struct sock *sk, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp;
struct sk_msg *msg_en;
int tx_flags, rc = 0;
if (tls_is_partially_sent_record(tls_ctx)) {
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
if (flags == -1)
tx_flags = rec->tx_flags;
else
tx_flags = flags;
rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
if (rc)
goto tx_err;
/* Full record has been transmitted.
* Remove the head of tx_list
*/
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
/* Tx all ready records */
list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
if (READ_ONCE(rec->tx_ready)) {
if (flags == -1)
tx_flags = rec->tx_flags;
else
tx_flags = flags;
msg_en = &rec->msg_encrypted;
rc = tls_push_sg(sk, tls_ctx,
&msg_en->sg.data[msg_en->sg.curr],
0, tx_flags);
if (rc)
goto tx_err;
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
} else {
break;
}
}
tx_err:
if (rc < 0 && rc != -EAGAIN)
tls_err_abort(sk, -EBADMSG);
return rc;
}
static void tls_encrypt_done(void *data, int err)
{
struct tls_sw_context_tx *ctx;
struct tls_context *tls_ctx;
struct tls_prot_info *prot;
struct tls_rec *rec = data;
struct scatterlist *sge;
struct sk_msg *msg_en;
struct sock *sk;
if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
return;
msg_en = &rec->msg_encrypted;
sk = rec->sk;
tls_ctx = tls_get_ctx(sk);
prot = &tls_ctx->prot_info;
ctx = tls_sw_ctx_tx(tls_ctx);
sge = sk_msg_elem(msg_en, msg_en->sg.curr);
sge->offset -= prot->prepend_size;
sge->length += prot->prepend_size;
/* Check if error is previously set on socket */
if (err || sk->sk_err) {
rec = NULL;
/* If err is already set on socket, return the same code */
if (sk->sk_err) {
ctx->async_wait.err = -sk->sk_err;
} else {
ctx->async_wait.err = err;
tls_err_abort(sk, err);
}
}
if (rec) {
struct tls_rec *first_rec;
/* Mark the record as ready for transmission */
smp_store_mb(rec->tx_ready, true);
/* If received record is at head of tx_list, schedule tx */
first_rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
if (rec == first_rec) {
/* Schedule the transmission */
if (!test_and_set_bit(BIT_TX_SCHEDULED,
&ctx->tx_bitmask))
schedule_delayed_work(&ctx->tx_work.work, 1);
}
}
if (atomic_dec_and_test(&ctx->encrypt_pending))
complete(&ctx->async_wait.completion);
}
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
{
if (!atomic_dec_and_test(&ctx->encrypt_pending))
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
atomic_inc(&ctx->encrypt_pending);
return ctx->async_wait.err;
}
static int tls_do_encryption(struct sock *sk,
struct tls_context *tls_ctx,
struct tls_sw_context_tx *ctx,
struct aead_request *aead_req,
size_t data_len, u32 start)
{
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_en = &rec->msg_encrypted;
struct scatterlist *sge = sk_msg_elem(msg_en, start);
int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */
switch (prot->cipher_type) {
case TLS_CIPHER_AES_CCM_128:
rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
case TLS_CIPHER_SM4_CCM:
rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
}
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
msg_en->sg.curr = start;
aead_request_set_tfm(aead_req, ctx->aead_send);
aead_request_set_ad(aead_req, prot->aad_size);
aead_request_set_crypt(aead_req, rec->sg_aead_in,
rec->sg_aead_out,
data_len, rec->iv_data);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_encrypt_done, rec);
/* Add the record in tx_list */
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req);
if (rc == -EBUSY) {
rc = tls_encrypt_async_wait(ctx);
rc = rc ?: -EINPROGRESS;
}
if (!rc || rc != -EINPROGRESS) {
atomic_dec(&ctx->encrypt_pending);
sge->offset -= prot->prepend_size;
sge->length += prot->prepend_size;
}
if (!rc) {
WRITE_ONCE(rec->tx_ready, true);
} else if (rc != -EINPROGRESS) {
list_del(&rec->list);
return rc;
}
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
tls_advance_record_sn(sk, prot, &tls_ctx->tx);
return rc;
}
static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
struct tls_rec **to, struct sk_msg *msg_opl,
struct sk_msg *msg_oen, u32 split_point,
u32 tx_overhead_size, u32 *orig_end)
{
u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
struct scatterlist *sge, *osge, *nsge;
u32 orig_size = msg_opl->sg.size;
struct scatterlist tmp = { };
struct sk_msg *msg_npl;
struct tls_rec *new;
int ret;
new = tls_get_rec(sk);
if (!new)
return -ENOMEM;
ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
tx_overhead_size, 0);
if (ret < 0) {
tls_free_rec(sk, new);
return ret;
}
*orig_end = msg_opl->sg.end;
i = msg_opl->sg.start;
sge = sk_msg_elem(msg_opl, i);
while (apply && sge->length) {
if (sge->length > apply) {
u32 len = sge->length - apply;
get_page(sg_page(sge));
sg_set_page(&tmp, sg_page(sge), len,
sge->offset + apply);
sge->length = apply;
bytes += apply;
apply = 0;
} else {
apply -= sge->length;
bytes += sge->length;
}
sk_msg_iter_var_next(i);
if (i == msg_opl->sg.end)
break;
sge = sk_msg_elem(msg_opl, i);
}
msg_opl->sg.end = i;
msg_opl->sg.curr = i;
msg_opl->sg.copybreak = 0;
msg_opl->apply_bytes = 0;
msg_opl->sg.size = bytes;
msg_npl = &new->msg_plaintext;
msg_npl->apply_bytes = apply;
msg_npl->sg.size = orig_size - bytes;
j = msg_npl->sg.start;
nsge = sk_msg_elem(msg_npl, j);
if (tmp.length) {
memcpy(nsge, &tmp, sizeof(*nsge));
sk_msg_iter_var_next(j);
nsge = sk_msg_elem(msg_npl, j);
}
osge = sk_msg_elem(msg_opl, i);
while (osge->length) {
memcpy(nsge, osge, sizeof(*nsge));
sg_unmark_end(nsge);
sk_msg_iter_var_next(i);
sk_msg_iter_var_next(j);
if (i == *orig_end)
break;
osge = sk_msg_elem(msg_opl, i);
nsge = sk_msg_elem(msg_npl, j);
}
msg_npl->sg.end = j;
msg_npl->sg.curr = j;
msg_npl->sg.copybreak = 0;
*to = new;
return 0;
}
static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
struct tls_rec *from, u32 orig_end)
{
struct sk_msg *msg_npl = &from->msg_plaintext;
struct sk_msg *msg_opl = &to->msg_plaintext;
struct scatterlist *osge, *nsge;
u32 i, j;
i = msg_opl->sg.end;
sk_msg_iter_var_prev(i);
j = msg_npl->sg.start;
osge = sk_msg_elem(msg_opl, i);
nsge = sk_msg_elem(msg_npl, j);
if (sg_page(osge) == sg_page(nsge) &&
osge->offset + osge->length == nsge->offset) {
osge->length += nsge->length;
put_page(sg_page(nsge));
}
msg_opl->sg.end = orig_end;
msg_opl->sg.curr = orig_end;
msg_opl->sg.copybreak = 0;
msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
msg_opl->sg.size += msg_npl->sg.size;
sk_msg_free(sk, &to->msg_encrypted);
sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
kfree(from);
}
static int tls_push_record(struct sock *sk, int flags,
unsigned char record_type)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
u32 i, split_point, orig_end;
struct sk_msg *msg_pl, *msg_en;
struct aead_request *req;
bool split;
int rc;
if (!rec)
return 0;
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
split_point = msg_pl->apply_bytes;
split = split_point && split_point < msg_pl->sg.size;
if (unlikely((!split &&
msg_pl->sg.size +
prot->overhead_size > msg_en->sg.size) ||
(split &&
split_point +
prot->overhead_size > msg_en->sg.size))) {
split = true;
split_point = msg_en->sg.size;
}
if (split) {
rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
split_point, prot->overhead_size,
&orig_end);
if (rc < 0)
return rc;
/* This can happen if above tls_split_open_record allocates
* a single large encryption buffer instead of two smaller
* ones. In this case adjust pointers and continue without
* split.
*/
if (!msg_pl->sg.size) {
tls_merge_open_record(sk, rec, tmp, orig_end);
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
split = false;
}
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
prot->overhead_size);
}
rec->tx_flags = flags;
req = &rec->aead_req;
i = msg_pl->sg.end;
sk_msg_iter_var_prev(i);
rec->content_type = record_type;
if (prot->version == TLS_1_3_VERSION) {
/* Add content type to end of message. No padding added */
sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
sg_mark_end(&rec->sg_content_type);
sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
&rec->sg_content_type);
} else {
sg_mark_end(sk_msg_elem(msg_pl, i));
}
if (msg_pl->sg.end < msg_pl->sg.start) {
sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
MAX_SKB_FRAGS - msg_pl->sg.start + 1,
msg_pl->sg.data);
}
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
i = msg_en->sg.end;
sk_msg_iter_var_prev(i);
sg_mark_end(sk_msg_elem(msg_en, i));
i = msg_en->sg.start;
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
tls_ctx->tx.rec_seq, record_type, prot);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
msg_en->sg.data[i].offset,
msg_pl->sg.size + prot->tail_size,
record_type);
tls_ctx->pending_open_record_frags = false;
rc = tls_do_encryption(sk, tls_ctx, ctx, req,
msg_pl->sg.size + prot->tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
tls_err_abort(sk, -EBADMSG);
if (split) {
tls_ctx->pending_open_record_frags = true;
tls_merge_open_record(sk, rec, tmp, orig_end);
}
}
ctx->async_capable = 1;
return rc;
} else if (split) {
msg_pl = &tmp->msg_plaintext;
msg_en = &tmp->msg_encrypted;
sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
tls_ctx->pending_open_record_frags = true;
ctx->open_rec = tmp;
}
return tls_tx_records(sk, flags);
}
static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
bool full_record, u8 record_type,
ssize_t *copied, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct sk_msg msg_redir = { };
struct sk_psock *psock;
struct sock *sk_redir;
struct tls_rec *rec;
bool enospc, policy, redir_ingress;
int err = 0, send;
u32 delta = 0;
policy = !(flags & MSG_SENDPAGE_NOPOLICY);
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
}
if (psock)
sk_psock_put(sk, psock);
return err;
}
more_data:
enospc = sk_msg_full(msg);
if (psock->eval == __SK_NONE) {
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
delta -= msg->sg.size;
}
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
err = -ENOSPC;
goto out_err;
}
msg->cork_bytes = 0;
send = msg->sg.size;
if (msg->apply_bytes && msg->apply_bytes < send)
send = msg->apply_bytes;
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
err = -sk->sk_err;
goto out_err;
}
break;
case __SK_REDIRECT:
redir_ingress = psock->redir_ingress;
sk_redir = psock->sk_redir;
memcpy(&msg_redir, msg, sizeof(*msg));
if (msg->apply_bytes < send)
msg->apply_bytes = 0;
else
msg->apply_bytes -= send;
sk_msg_return_zero(sk, msg, send);
msg->sg.size -= send;
release_sock(sk);
err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
&msg_redir, send, flags);
lock_sock(sk);
if (err < 0) {
*copied -= sk_msg_free_nocharge(sk, &msg_redir);
msg->sg.size = 0;
}
if (msg->sg.size == 0)
tls_free_open_rec(sk);
break;
case __SK_DROP:
default:
sk_msg_free_partial(sk, msg, send);
if (msg->apply_bytes < send)
msg->apply_bytes = 0;
else
msg->apply_bytes -= send;
if (msg->sg.size == 0)
tls_free_open_rec(sk);
*copied -= (send + delta);
err = -EACCES;
}
if (likely(!err)) {
bool reset_eval = !ctx->open_rec;
rec = ctx->open_rec;
if (rec) {
msg = &rec->msg_plaintext;
if (!msg->apply_bytes)
reset_eval = true;
}
if (reset_eval) {
psock->eval = __SK_NONE;
if (psock->sk_redir) {
sock_put(psock->sk_redir);
psock->sk_redir = NULL;
}
}
if (rec)
goto more_data;
}
out_err:
sk_psock_put(sk, psock);
return err;
}
static int tls_sw_push_pending_record(struct sock *sk, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec = ctx->open_rec;
struct sk_msg *msg_pl;
size_t copied;
if (!rec)
return 0;
msg_pl = &rec->msg_plaintext;
copied = msg_pl->sg.size;
if (!copied)
return 0;
return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
&copied, flags);
}
static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
struct sk_msg *msg_pl, size_t try_to_copy,
ssize_t *copied)
{
struct page *page = NULL, **pages = &page;
do {
ssize_t part;
size_t off;
part = iov_iter_extract_pages(&msg->msg_iter, &pages,
try_to_copy, 1, 0, &off);
if (part <= 0)
return part ?: -EIO;
if (WARN_ON_ONCE(!sendpage_ok(page))) {
iov_iter_revert(&msg->msg_iter, part);
return -EIO;
}
sk_msg_page_add(msg_pl, page, part, off);
msg_pl->sg.copybreak = 0;
msg_pl->sg.curr = msg_pl->sg.end;
sk_mem_charge(sk, part);
*copied += part;
try_to_copy -= part;
} while (try_to_copy && !sk_msg_full(msg_pl));
return 0;
}
static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
size_t size)
{
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
bool async_capable = ctx->async_capable;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE);
size_t try_to_copy;
ssize_t copied = 0;
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
int required_size;
int num_async = 0;
bool full_record;
int record_room;
int num_zc = 0;
int orig_size;
int ret = 0;
if (!eor && (msg->msg_flags & MSG_EOR))
return -EINVAL;
if (unlikely(msg->msg_controllen)) {
ret = tls_process_cmsg(sk, msg, &record_type);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret != -EAGAIN)
goto send_end;
}
}
while (msg_data_left(msg)) {
if (sk->sk_err) {
ret = -sk->sk_err;
goto send_end;
}
if (ctx->open_rec)
rec = ctx->open_rec;
else
rec = ctx->open_rec = tls_get_rec(sk);
if (!rec) {
ret = -ENOMEM;
goto send_end;
}
msg_pl = &rec->msg_plaintext;
msg_en = &rec->msg_encrypted;
orig_size = msg_pl->sg.size;
full_record = false;
try_to_copy = msg_data_left(msg);
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
if (try_to_copy >= record_room) {
try_to_copy = record_room;
full_record = true;
}
required_size = msg_pl->sg.size + try_to_copy +
prot->overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
alloc_encrypted:
ret = tls_alloc_encrypted_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto wait_for_memory;
/* Adjust try_to_copy according to the amount that was
* actually allocated. The difference is due
* to max sg elements limit
*/
try_to_copy -= required_size - msg_en->sg.size;
full_record = true;
}
if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
try_to_copy, &copied);
if (ret < 0)
goto send_end;
tls_ctx->pending_open_record_frags = true;
if (sk_msg_full(msg_pl))
full_record = true;
if (full_record || eor)
goto copied;
continue;
}
if (!is_kvec && (full_record || eor) && !async_capable) {
u32 first = msg_pl->sg.end;
ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
msg_pl, try_to_copy);
if (ret)
goto fallback_to_reg_send;
num_zc++;
copied += try_to_copy;
sk_msg_sg_copy_set(msg_pl, first);
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
record_type, &copied,
msg->msg_flags);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret == -ENOMEM)
goto wait_for_memory;
else if (ctx->open_rec && ret == -ENOSPC)
goto rollback_iter;
else if (ret != -EAGAIN)
goto send_end;
}
continue;
rollback_iter:
copied -= try_to_copy;
sk_msg_sg_copy_clear(msg_pl, first);
iov_iter_revert(&msg->msg_iter,
msg_pl->sg.size - orig_size);
fallback_to_reg_send:
sk_msg_trim(sk, msg_pl, orig_size);
}
required_size = msg_pl->sg.size + try_to_copy;
ret = tls_clone_plaintext_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto send_end;
/* Adjust try_to_copy according to the amount that was
* actually allocated. The difference is due
* to max sg elements limit
*/
try_to_copy -= required_size - msg_pl->sg.size;
full_record = true;
sk_msg_trim(sk, msg_en,
msg_pl->sg.size + prot->overhead_size);
}
if (try_to_copy) {
ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
msg_pl, try_to_copy);
if (ret < 0)
goto trim_sgl;
}
/* Open records defined only if successfully copied, otherwise
* we would trim the sg but not reset the open record frags.
*/
tls_ctx->pending_open_record_frags = true;
copied += try_to_copy;
copied:
if (full_record || eor) {
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
record_type, &copied,
msg->msg_flags);
if (ret) {
if (ret == -EINPROGRESS)
num_async++;
else if (ret == -ENOMEM)
goto wait_for_memory;
else if (ret != -EAGAIN) {
if (ret == -ENOSPC)
ret = 0;
goto send_end;
}
}
}
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
trim_sgl:
if (ctx->open_rec)
tls_trim_both_msgs(sk, orig_size);
goto send_end;
}
if (ctx->open_rec && msg_en->sg.size < required_size)
goto alloc_encrypted;
}
if (!num_async) {
goto send_end;
} else if (num_zc || eor) {
int err;
/* Wait for pending encryptions to get completed */
err = tls_encrypt_async_wait(ctx);
if (err) {
ret = err;
copied = 0;
}
}
/* Transmit if any encryptions have completed */
if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
cancel_delayed_work(&ctx->tx_work.work);
tls_tx_records(sk, msg->msg_flags);
}
send_end:
ret = sk_stream_error(sk, msg->msg_flags, ret);
return copied > 0 ? copied : ret;
}
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
int ret;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
MSG_SENDPAGE_NOPOLICY))
return -EOPNOTSUPP;
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
if (ret)
return ret;
lock_sock(sk);
ret = tls_sw_sendmsg_locked(sk, msg, size);
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
return ret;
}
/*
* Handle unexpected EOF during splice without SPLICE_F_MORE set.
*/
void tls_sw_splice_eof(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec;
struct sk_msg *msg_pl;
ssize_t copied = 0;
bool retrying = false;
int ret = 0;
if (!ctx->open_rec)
return;
mutex_lock(&tls_ctx->tx_lock);
lock_sock(sk);
retry:
/* same checks as in tls_sw_push_pending_record() */
rec = ctx->open_rec;
if (!rec)
goto unlock;
msg_pl = &rec->msg_plaintext;
if (msg_pl->sg.size == 0)
goto unlock;
/* Check the BPF advisor and perform transmission. */
ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
&copied, 0);
switch (ret) {
case 0:
case -EAGAIN:
if (retrying)
goto unlock;
retrying = true;
goto retry;
case -EINPROGRESS:
break;
default:
goto unlock;
}
/* Wait for pending encryptions to get completed */
if (tls_encrypt_async_wait(ctx))
goto unlock;
/* Transmit if any encryptions have completed */
if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
cancel_delayed_work(&ctx->tx_work.work);
tls_tx_records(sk, 0);
}
unlock:
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
}
static int
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
bool released)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int ret = 0;
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
while (!tls_strp_msg_ready(ctx)) {
if (!sk_psock_queue_empty(psock))
return 0;
if (sk->sk_err)
return sock_error(sk);
if (ret < 0)
return ret;
if (!skb_queue_empty(&sk->sk_receive_queue)) {
tls_strp_check_rcv(&ctx->strp);
if (tls_strp_msg_ready(ctx))
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
if (sock_flag(sk, SOCK_DONE))
return 0;
if (!timeo)
return -EAGAIN;
released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
ret = sk_wait_event(sk, &timeo,
tls_strp_msg_ready(ctx) ||
!sk_psock_queue_empty(psock),
&wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
/* Handle signals */
if (signal_pending(current))
return sock_intr_errno(timeo);
}
tls_strp_msg_load(&ctx->strp, released);
return 1;
}
static int tls_setup_from_iter(struct iov_iter *from,
int length, int *pages_used,
struct scatterlist *to,
int to_max_pages)
{
int rc = 0, i = 0, num_elem = *pages_used, maxpages;
struct page *pages[MAX_SKB_FRAGS];
unsigned int size = 0;
ssize_t copied, use;
size_t offset;
while (length > 0) {
i = 0;
maxpages = to_max_pages - num_elem;
if (maxpages == 0) {
rc = -EFAULT;
goto out;
}
copied = iov_iter_get_pages2(from, pages,
length,
maxpages, &offset);
if (copied <= 0) {
rc = -EFAULT;
goto out;
}
length -= copied;
size += copied;
while (copied) {
use = min_t(int, copied, PAGE_SIZE - offset);
sg_set_page(&to[num_elem],
pages[i], use, offset);
sg_unmark_end(&to[num_elem]);
/* We do not uncharge memory from this API */
offset = 0;
copied -= use;
i++;
num_elem++;
}
}
/* Mark the end in the last sg entry if newly added */
if (num_elem > *pages_used)
sg_mark_end(&to[num_elem - 1]);
out:
if (rc)
iov_iter_revert(from, size);
*pages_used = num_elem;
return rc;
}
static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
unsigned int full_len)
{
struct strp_msg *clr_rxm;
struct sk_buff *clr_skb;
int err;
clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
&err, sk->sk_allocation);
if (!clr_skb)
return NULL;
skb_copy_header(clr_skb, skb);
clr_skb->len = full_len;
clr_skb->data_len = full_len;
clr_rxm = strp_msg(clr_skb);
clr_rxm->offset = 0;
return clr_skb;
}
/* Decrypt handlers
*
* tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
* They must transform the darg in/out argument are as follows:
* | Input | Output
* -------------------------------------------------------------------
* zc | Zero-copy decrypt allowed | Zero-copy performed
* async | Async decrypt allowed | Async crypto used / in progress
* skb | * | Output skb
*
* If ZC decryption was performed darg.skb will point to the input skb.
*/
/* This function decrypts the input skb into either out_iov or in out_sg
* or in skb buffers itself. The input parameter 'darg->zc' indicates if
* zero-copy mode needs to be tried or not. With zero-copy mode, either
* out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
* NULL, then the decryption happens inside skb buffers itself, i.e.
* zero-copy gets disabled and 'darg->zc' is updated.
*/
static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
struct scatterlist *out_sg,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
int n_sgin, n_sgout, aead_size, err, pages = 0;
struct sk_buff *skb = tls_strp_msg(ctx);
const struct strp_msg *rxm = strp_msg(skb);
const struct tls_msg *tlm = tls_msg(skb);
struct aead_request *aead_req;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
const int data_len = rxm->full_len - prot->overhead_size;
int tail_pages = !!prot->tail_size;
struct tls_decrypt_ctx *dctx;
struct sk_buff *clear_skb;
int iv_offset = 0;
u8 *mem;
n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
if (n_sgin < 1)
return n_sgin ?: -EBADMSG;
if (darg->zc && (out_iov || out_sg)) {
clear_skb = NULL;
if (out_iov)
n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else
n_sgout = sg_nents(out_sg);
} else {
darg->zc = false;
clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
if (!clear_skb)
return -ENOMEM;
n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
}
/* Increment to accommodate AAD */
n_sgin = n_sgin + 1;
/* Allocate a single block of memory which contains
* aead_req || tls_decrypt_ctx.
* Both structs are variable length.
*/
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
sk->sk_allocation);
if (!mem) {
err = -ENOMEM;
goto exit_free_skb;
}
/* Segment the allocated memory */
aead_req = (struct aead_request *)mem;
dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
dctx->sk = sk;
sgin = &dctx->sg[0];
sgout = &dctx->sg[n_sgin];
/* For CCM based ciphers, first byte of nonce+iv is a constant */
switch (prot->cipher_type) {
case TLS_CIPHER_AES_CCM_128:
dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
case TLS_CIPHER_SM4_CCM:
dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
iv_offset = 1;
break;
}
/* Prepare IV */
if (prot->version == TLS_1_3_VERSION ||
prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
prot->iv_size + prot->salt_size);
} else {
err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
&dctx->iv[iv_offset] + prot->salt_size,
prot->iv_size);
if (err < 0)
goto exit_free;
memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
}
tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
prot->tail_size,
tls_ctx->rx.rec_seq, tlm->control, prot);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(skb, &sgin[1],
rxm->offset + prot->prepend_size,
rxm->full_len - prot->prepend_size);
if (err < 0)
goto exit_free;
if (clear_skb) {
sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
data_len + prot->tail_size);
if (err < 0)
goto exit_free;
} else if (out_iov) {
sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
(n_sgout - 1 - tail_pages));
if (err < 0)
goto exit_free_pages;
if (prot->tail_size) {
sg_unmark_end(&sgout[pages]);
sg_set_buf(&sgout[pages + 1], &dctx->tail,
prot->tail_size);
sg_mark_end(&sgout[pages + 1]);
}
} else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
dctx->free_sgout = !!pages;
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
data_len + prot->tail_size, aead_req, darg);
if (err) {
if (darg->async_done)
goto exit_free_skb;
goto exit_free_pages;
}
darg->skb = clear_skb ?: tls_strp_msg(ctx);
clear_skb = NULL;
if (unlikely(darg->async)) {
err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
if (err)
__skb_queue_tail(&ctx->async_hold, darg->skb);
return err;
}
if (unlikely(darg->async_done))
return 0;
if (prot->tail_size)
darg->tail = dctx->tail;
exit_free_pages:
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
put_page(sg_page(&sgout[pages]));
exit_free:
kfree(mem);
exit_free_skb:
consume_skb(clear_skb);
return err;
}
static int
tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
struct msghdr *msg, struct tls_decrypt_arg *darg)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int pad, err;
err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
if (err < 0) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
return err;
}
/* keep going even for ->async, the code below is TLS 1.3 */
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
darg->tail != TLS_RECORD_TYPE_DATA)) {
darg->zc = false;
if (!darg->tail)
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
return tls_decrypt_sw(sk, tls_ctx, msg, darg);
}
pad = tls_padding_length(prot, darg->skb, darg);
if (pad < 0) {
if (darg->skb != tls_strp_msg(ctx))
consume_skb(darg->skb);
return pad;
}
rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
return 0;
}
static int
tls_decrypt_device(struct sock *sk, struct msghdr *msg,
struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int pad, err;
if (tls_ctx->rx_conf != TLS_HW)
return 0;
err = tls_device_decrypted(sk, tls_ctx);
if (err <= 0)
return err;
pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
if (pad < 0)
return pad;
darg->async = false;
darg->skb = tls_strp_msg(ctx);
/* ->zc downgrade check, in case TLS 1.3 gets here */
darg->zc &= !(prot->version == TLS_1_3_VERSION &&
tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
rxm = strp_msg(darg->skb);
rxm->full_len -= pad;
if (!darg->zc) {
/* Non-ZC case needs a real skb */
darg->skb = tls_strp_msg_detach(ctx);
if (!darg->skb)
return -ENOMEM;
} else {
unsigned int off, len;
/* In ZC case nobody cares about the output skb.
* Just copy the data here. Note the skb is not fully trimmed.
*/
off = rxm->offset + prot->prepend_size;
len = rxm->full_len - prot->overhead_size;
err = skb_copy_datagram_msg(darg->skb, off, msg, len);
if (err)
return err;
}
return 1;
}
static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
struct tls_decrypt_arg *darg)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm;
int err;
err = tls_decrypt_device(sk, msg, tls_ctx, darg);
if (!err)
err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
if (err < 0)
return err;
rxm = strp_msg(darg->skb);
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
return 0;
}
int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
{
struct tls_decrypt_arg darg = { .zc = true, };
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
int err;
if (!*control) {
*control = tlm->control;
if (!*control)
return -EBADMSG;
err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(*control), control);
if (*control != TLS_RECORD_TYPE_DATA) {
if (err || msg->msg_flags & MSG_CTRUNC)
return -EIO;
}
} else if (*control != tlm->control) {
return 0;
}
return 1;
}
static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
{
tls_strp_msg_done(&ctx->strp);
}
/* This function traverses the rx_list in tls receive context to copies the
* decrypted records into the buffer provided by caller zero copy is not
* true. Further, the records are removed from the rx_list if it is not a peek
* case and the record has been consumed completely.
*/
static int process_rx_list(struct tls_sw_context_rx *ctx,
struct msghdr *msg,
u8 *control,
size_t skip,
size_t len,
bool is_peek,
bool *more)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
struct tls_msg *tlm;
ssize_t copied = 0;
int err;
while (skip && skb) {
struct strp_msg *rxm = strp_msg(skb);
tlm = tls_msg(skb);
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
goto more;
if (skip < rxm->full_len)
break;
skip = skip - rxm->full_len;
skb = skb_peek_next(skb, &ctx->rx_list);
}
while (len && skb) {
struct sk_buff *next_skb;
struct strp_msg *rxm = strp_msg(skb);
int chunk = min_t(unsigned int, rxm->full_len - skip, len);
tlm = tls_msg(skb);
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
goto more;
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
msg, chunk);
if (err < 0)
goto more;
len = len - chunk;
copied = copied + chunk;
/* Consume the data from record if it is non-peek case*/
if (!is_peek) {
rxm->offset = rxm->offset + chunk;
rxm->full_len = rxm->full_len - chunk;
/* Return if there is unconsumed data in the record */
if (rxm->full_len - skip)
break;
}
/* The remaining skip-bytes must lie in 1st record in rx_list.
* So from the 2nd record, 'skip' should be 0.
*/
skip = 0;
if (msg)
msg->msg_flags |= MSG_EOR;
next_skb = skb_peek_next(skb, &ctx->rx_list);
if (!is_peek) {
__skb_unlink(skb, &ctx->rx_list);
consume_skb(skb);
}
skb = next_skb;
}
err = 0;
out:
return copied ? : err;
more:
if (more)
*more = true;
goto out;
}
static bool
tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
size_t len_left, size_t decrypted, ssize_t done,
size_t *flushed_at)
{
size_t max_rec;
if (len_left <= decrypted)
return false;
max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
return false;
*flushed_at = done;
return sk_flush_backlog(sk);
}
static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
long timeo;
int ret;
timeo = sock_rcvtimeo(sk, nonblock);
while (unlikely(ctx->reader_present)) {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ctx->reader_contended = 1;
add_wait_queue(&ctx->wq, &wait);
ret = sk_wait_event(sk, &timeo,
!READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
if (timeo <= 0)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(timeo);
if (ret < 0)
return ret;
}
WRITE_ONCE(ctx->reader_present, 1);
return 0;
}
static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
int err;
lock_sock(sk);
err = tls_rx_reader_acquire(sk, ctx, nonblock);
if (err)
release_sock(sk);
return err;
}
static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
{
if (unlikely(ctx->reader_contended)) {
if (wq_has_sleeper(&ctx->wq))
wake_up(&ctx->wq);
else
ctx->reader_contended = 0;
WARN_ON_ONCE(!ctx->reader_present);
}
WRITE_ONCE(ctx->reader_present, 0);
}
static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
{
tls_rx_reader_release(sk, ctx);
release_sock(sk);
}
int tls_sw_recvmsg(struct sock *sk,
struct msghdr *msg,
size_t len,
int flags,
int *addr_len)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
ssize_t decrypted = 0, async_copy_bytes = 0;
struct sk_psock *psock;
unsigned char control = 0;
size_t flushed_at = 0;
struct strp_msg *rxm;
struct tls_msg *tlm;
ssize_t copied = 0;
ssize_t peeked = 0;
bool async = false;
int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
bool rx_more = false;
bool released = true;
bool bpf_strp_enabled;
bool zc_capable;
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
if (err < 0)
return err;
psock = sk_psock_get(sk);
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
err = ctx->async_wait.err;
if (err)
goto end;
/* Process pending decrypted records. It must be non-zero-copy */
err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
if (err < 0)
goto end;
copied = err;
if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
len = len - copied;
zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
ctx->zc_capable;
decrypted = 0;
while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
struct tls_decrypt_arg darg;
int to_decrypt, chunk;
err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
released);
if (err <= 0) {
if (psock) {
chunk = sk_msg_recvmsg(sk, psock, msg, len,
flags);
if (chunk > 0) {
decrypted += chunk;
len -= chunk;
continue;
}
}
goto recv_end;
}
memset(&darg.inargs, 0, sizeof(darg.inargs));
rxm = strp_msg(tls_strp_msg(ctx));
tlm = tls_msg(tls_strp_msg(ctx));
to_decrypt = rxm->full_len - prot->overhead_size;
if (zc_capable && to_decrypt <= len &&
tlm->control == TLS_RECORD_TYPE_DATA)
darg.zc = true;
/* Do not use async mode if record is non-data */
if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
darg.async = ctx->async_capable;
else
darg.async = false;
err = tls_rx_one_record(sk, msg, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto recv_end;
}
async |= darg.async;
/* If the type of records being processed is not known yet,
* set it to record type just dequeued. If it is already known,
* but does not match the record type just dequeued, go to end.
* We always get record type here since for tls1.2, record type
* is known just after record is dequeued from stream parser.
* For tls1.3, we disable async.
*/
err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
if (err <= 0) {
DEBUG_NET_WARN_ON_ONCE(darg.zc);
tls_rx_rec_done(ctx);
put_on_rx_list_err:
__skb_queue_tail(&ctx->rx_list, darg.skb);
goto recv_end;
}
/* periodically flush backlog, and feed strparser */
released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
decrypted + copied,
&flushed_at);
/* TLS 1.3 may have updated the length by more than overhead */
rxm = strp_msg(darg.skb);
chunk = rxm->full_len;
tls_rx_rec_done(ctx);
if (!darg.zc) {
bool partially_consumed = chunk > len;
struct sk_buff *skb = darg.skb;
DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
if (async) {
/* TLS 1.2-only, to_decrypt must be text len */
chunk = min_t(int, to_decrypt, len);
async_copy_bytes += chunk;
put_on_rx_list:
decrypted += chunk;
len -= chunk;
__skb_queue_tail(&ctx->rx_list, skb);
if (unlikely(control != TLS_RECORD_TYPE_DATA))
break;
continue;
}
if (bpf_strp_enabled) {
released = true;
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
rxm->full_len = 0;
if (err == __SK_DROP)
consume_skb(skb);
continue;
}
}
if (partially_consumed)
chunk = len;
err = skb_copy_datagram_msg(skb, rxm->offset,
msg, chunk);
if (err < 0)
goto put_on_rx_list_err;
if (is_peek) {
peeked += chunk;
goto put_on_rx_list;
}
if (partially_consumed) {
rxm->offset += chunk;
rxm->full_len -= chunk;
goto put_on_rx_list;
}
consume_skb(skb);
}
decrypted += chunk;
len -= chunk;
/* Return full control message to userspace before trying
* to parse another message type
*/
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
break;
}
recv_end:
if (async) {
int ret;
/* Wait for all previously submitted records to be decrypted */
ret = tls_decrypt_async_wait(ctx);
__skb_queue_purge(&ctx->async_hold);
if (ret) {
if (err >= 0 || err == -EINPROGRESS)
err = ret;
goto end;
}
/* Drain records from the rx_list & copy if required */
if (is_peek)
err = process_rx_list(ctx, msg, &control, copied + peeked,
decrypted - peeked, is_peek, NULL);
else
err = process_rx_list(ctx, msg, &control, 0,
async_copy_bytes, is_peek, NULL);
/* we could have copied less than we wanted, and possibly nothing */
decrypted += max(err, 0) - async_copy_bytes;
}
copied += decrypted;
end:
tls_rx_reader_unlock(sk, ctx);
if (psock)
sk_psock_put(sk, psock);
return copied ? : err;
}
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = NULL;
struct sock *sk = sock->sk;
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
int chunk;
int err;
err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
if (err < 0)
return err;
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
} else {
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
true);
if (err <= 0)
goto splice_read_end;
memset(&darg.inargs, 0, sizeof(darg.inargs));
err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
tls_rx_rec_done(ctx);
skb = darg.skb;
}
rxm = strp_msg(skb);
tlm = tls_msg(skb);
/* splice does not support reading control messages */
if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
goto splice_requeue;
}
chunk = min_t(unsigned int, rxm->full_len, len);
copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
if (copied < 0)
goto splice_requeue;
if (chunk < rxm->full_len) {
rxm->offset += len;
rxm->full_len -= len;
goto splice_requeue;
}
consume_skb(skb);
splice_read_end:
tls_rx_reader_unlock(sk, ctx);
return copied ? : err;
splice_requeue:
__skb_queue_head(&ctx->rx_list, skb);
goto splice_read_end;
}
int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t read_actor)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct strp_msg *rxm = NULL;
struct sk_buff *skb = NULL;
struct sk_psock *psock;
size_t flushed_at = 0;
bool released = true;
struct tls_msg *tlm;
ssize_t copied = 0;
ssize_t decrypted;
int err, used;
psock = sk_psock_get(sk);
if (psock) {
sk_psock_put(sk, psock);
return -EINVAL;
}
err = tls_rx_reader_acquire(sk, ctx, true);
if (err < 0)
return err;
/* If crypto failed the connection is broken */
err = ctx->async_wait.err;
if (err)
goto read_sock_end;
decrypted = 0;
do {
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
rxm = strp_msg(skb);
tlm = tls_msg(skb);
} else {
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, true, released);
if (err <= 0)
goto read_sock_end;
memset(&darg.inargs, 0, sizeof(darg.inargs));
err = tls_rx_one_record(sk, NULL, &darg);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto read_sock_end;
}
released = tls_read_flush_backlog(sk, prot, INT_MAX,
0, decrypted,
&flushed_at);
skb = darg.skb;
rxm = strp_msg(skb);
tlm = tls_msg(skb);
decrypted += rxm->full_len;
tls_rx_rec_done(ctx);
}
/* read_sock does not support reading control messages */
if (tlm->control != TLS_RECORD_TYPE_DATA) {
err = -EINVAL;
goto read_sock_requeue;
}
used = read_actor(desc, skb, rxm->offset, rxm->full_len);
if (used <= 0) {
if (!copied)
err = used;
goto read_sock_requeue;
}
copied += used;
if (used < rxm->full_len) {
rxm->offset += used;
rxm->full_len -= used;
if (!desc->count)
goto read_sock_requeue;
} else {
consume_skb(skb);
if (!desc->count)
skb = NULL;
}
} while (skb);
read_sock_end:
tls_rx_reader_release(sk, ctx);
return copied ? : err;
read_sock_requeue:
__skb_queue_head(&ctx->rx_list, skb);
goto read_sock_end;
}
bool tls_sw_sock_is_readable(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
bool ingress_empty = true;
struct sk_psock *psock;
rcu_read_lock();
psock = sk_psock(sk);
if (psock)
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
return !ingress_empty || tls_strp_msg_ready(ctx) ||
!skb_queue_empty(&ctx->rx_list);
}
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
size_t cipher_overhead;
size_t data_len = 0;
int ret;
/* Verify that we have a full TLS header, or wait for more data */
if (strp->stm.offset + prot->prepend_size > skb->len)
return 0;
/* Sanity-check size of on-stack buffer. */
if (WARN_ON(prot->prepend_size > sizeof(header))) {
ret = -EINVAL;
goto read_failure;
}
/* Linearize header to local buffer */
ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
if (ret < 0)
goto read_failure;
strp->mark = header[0];
data_len = ((header[4] & 0xFF) | (header[3] << 8));
cipher_overhead = prot->tag_size;
if (prot->version != TLS_1_3_VERSION &&
prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
cipher_overhead += prot->iv_size;
if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
prot->tail_size) {
ret = -EMSGSIZE;
goto read_failure;
}
if (data_len < cipher_overhead) {
ret = -EBADMSG;
goto read_failure;
}
/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
if (header[1] != TLS_1_2_VERSION_MINOR ||
header[2] != TLS_1_2_VERSION_MAJOR) {
ret = -EINVAL;
goto read_failure;
}
tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
TCP_SKB_CB(skb)->seq + strp->stm.offset);
return data_len + TLS_HEADER_SIZE;
read_failure:
tls_err_abort(strp->sk, ret);
return ret;
}
void tls_rx_msg_ready(struct tls_strparser *strp)
{
struct tls_sw_context_rx *ctx;
ctx = container_of(strp, struct tls_sw_context_rx, strp);
ctx->saved_data_ready(strp->sk);
}
static void tls_data_ready(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_psock *psock;
gfp_t alloc_save;
trace_sk_data_ready(sk);
alloc_save = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC;
tls_strp_data_ready(&ctx->strp);
sk->sk_allocation = alloc_save;
psock = sk_psock_get(sk);
if (psock) {
if (!list_empty(&psock->ingress_msg))
ctx->saved_data_ready(sk);
sk_psock_put(sk, psock);
}
}
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
{
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
cancel_delayed_work_sync(&ctx->tx_work.work);
}
void tls_sw_release_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp;
/* Wait for any pending async encryptions to complete */
tls_encrypt_async_wait(ctx);
tls_tx_records(sk, -1);
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
if (tls_ctx->partially_sent_record) {
tls_free_partial_record(sk, tls_ctx);
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
list_del(&rec->list);
sk_msg_free(sk, &rec->msg_encrypted);
sk_msg_free(sk, &rec->msg_plaintext);
kfree(rec);
}
crypto_free_aead(ctx->aead_send);
tls_free_open_rec(sk);
}
void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
{
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
kfree(ctx);
}
void tls_sw_release_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
if (ctx->aead_recv) {
__skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
tls_strp_stop(&ctx->strp);
/* If tls_sw_strparser_arm() was not called (cleanup paths)
* we still want to tls_strp_stop(), but sk->sk_data_ready was
* never swapped.
*/
if (ctx->saved_data_ready) {
write_lock_bh(&sk->sk_callback_lock);
sk->sk_data_ready = ctx->saved_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
}
}
}
void tls_sw_strparser_done(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
tls_strp_done(&ctx->strp);
}
void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
kfree(ctx);
}
void tls_sw_free_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
tls_sw_release_resources_rx(sk);
tls_sw_free_ctx_rx(tls_ctx);
}
/* The work handler to transmitt the encrypted records in tx_list */
static void tx_work_handler(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct tx_work *tx_work = container_of(delayed_work,
struct tx_work, work);
struct sock *sk = tx_work->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx;
if (unlikely(!tls_ctx))
return;
ctx = tls_sw_ctx_tx(tls_ctx);
if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
return;
if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
return;
if (mutex_trylock(&tls_ctx->tx_lock)) {
lock_sock(sk);
tls_tx_records(sk, -1);
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
/* Someone is holding the tx_lock, they will likely run Tx
* and cancel the work on their way out of the lock section.
* Schedule a long delay just in case.
*/
schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
}
}
static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
{
struct tls_rec *rec;
rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
if (!rec)
return false;
return READ_ONCE(rec->tx_ready);
}
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
{
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* Schedule the transmission if tx list is ready */
if (tls_is_tx_ready(tx_ctx) &&
!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
write_lock_bh(&sk->sk_callback_lock);
rx_ctx->saved_data_ready = sk->sk_data_ready;
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
}
void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
tls_ctx->prot_info.version != TLS_1_3_VERSION;
}
static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
{
struct tls_sw_context_tx *sw_ctx_tx;
if (!ctx->priv_ctx_tx) {
sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
if (!sw_ctx_tx)
return NULL;
} else {
sw_ctx_tx = ctx->priv_ctx_tx;
}
crypto_init_wait(&sw_ctx_tx->async_wait);
atomic_set(&sw_ctx_tx->encrypt_pending, 1);
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk;
return sw_ctx_tx;
}
static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
{
struct tls_sw_context_rx *sw_ctx_rx;
if (!ctx->priv_ctx_rx) {
sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
if (!sw_ctx_rx)
return NULL;
} else {
sw_ctx_rx = ctx->priv_ctx_rx;
}
crypto_init_wait(&sw_ctx_rx->async_wait);
atomic_set(&sw_ctx_rx->decrypt_pending, 1);
init_waitqueue_head(&sw_ctx_rx->wq);
skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold);
return sw_ctx_rx;
}
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
const struct tls_cipher_desc *cipher_desc)
{
u16 nonce_size = cipher_desc->nonce;
if (crypto_info->version == TLS_1_3_VERSION) {
nonce_size = 0;
prot->aad_size = TLS_HEADER_SIZE;
prot->tail_size = 1;
} else {
prot->aad_size = TLS_AAD_SPACE_SIZE;
prot->tail_size = 0;
}
/* Sanity-check the sizes for stack allocations. */
if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
return -EINVAL;
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
prot->iv_size = cipher_desc->iv;
prot->salt_size = cipher_desc->salt;
prot->rec_seq_size = cipher_desc->rec_seq;
return 0;
}
int tls_set_sw_offload(struct sock *sk, int tx)
{
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
const struct tls_cipher_desc *cipher_desc;
struct tls_crypto_info *crypto_info;
char *iv, *rec_seq, *key, *salt;
struct cipher_context *cctx;
struct tls_prot_info *prot;
struct crypto_aead **aead;
struct tls_context *ctx;
struct crypto_tfm *tfm;
int rc = 0;
ctx = tls_get_ctx(sk);
prot = &ctx->prot_info;
if (tx) {
ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
if (!ctx->priv_ctx_tx)
return -ENOMEM;
sw_ctx_tx = ctx->priv_ctx_tx;
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
} else {
ctx->priv_ctx_rx = init_ctx_rx(ctx);
if (!ctx->priv_ctx_rx)
return -ENOMEM;
sw_ctx_rx = ctx->priv_ctx_rx;
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv;
}
cipher_desc = get_cipher_desc(crypto_info->cipher_type);
if (!cipher_desc) {
rc = -EINVAL;
goto free_priv;
}
rc = init_prot_info(prot, crypto_info, cipher_desc);
if (rc)
goto free_priv;
iv = crypto_info_iv(crypto_info, cipher_desc);
key = crypto_info_key(crypto_info, cipher_desc);
salt = crypto_info_salt(crypto_info, cipher_desc);
rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
memcpy(cctx->iv, salt, cipher_desc->salt);
memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
if (!*aead) {
*aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
if (IS_ERR(*aead)) {
rc = PTR_ERR(*aead);
*aead = NULL;
goto free_priv;
}
}
ctx->push_pending_record = tls_sw_push_pending_record;
rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
if (rc)
goto free_aead;
rc = crypto_aead_setauthsize(*aead, prot->tag_size);
if (rc)
goto free_aead;
if (sw_ctx_rx) {
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
tls_update_rx_zc_capable(ctx);
sw_ctx_rx->async_capable =
crypto_info->version != TLS_1_3_VERSION &&
!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
rc = tls_strp_init(&sw_ctx_rx->strp, sk);
if (rc)
goto free_aead;
}
goto out;
free_aead:
crypto_free_aead(*aead);
*aead = NULL;
free_priv:
if (tx) {
kfree(ctx->priv_ctx_tx);
ctx->priv_ctx_tx = NULL;
} else {
kfree(ctx->priv_ctx_rx);
ctx->priv_ctx_rx = NULL;
}
out:
return rc;
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* digi00x.h - a part of driver for Digidesign Digi 002/003 family
*
* Copyright (c) 2014-2015 Takashi Sakamoto
*/
#ifndef SOUND_DIGI00X_H_INCLUDED
#define SOUND_DIGI00X_H_INCLUDED
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/info.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/firewire.h>
#include <sound/hwdep.h>
#include <sound/rawmidi.h>
#include "../lib.h"
#include "../iso-resources.h"
#include "../amdtp-stream.h"
struct snd_dg00x {
struct snd_card *card;
struct fw_unit *unit;
struct mutex mutex;
spinlock_t lock;
struct amdtp_stream tx_stream;
struct fw_iso_resources tx_resources;
struct amdtp_stream rx_stream;
struct fw_iso_resources rx_resources;
unsigned int substreams_counter;
/* for uapi */
int dev_lock_count;
bool dev_lock_changed;
wait_queue_head_t hwdep_wait;
/* For asynchronous messages. */
struct fw_address_handler async_handler;
u32 msg;
/* Console models have additional MIDI ports for control surface. */
bool is_console;
struct amdtp_domain domain;
};
#define DG00X_ADDR_BASE 0xffffe0000000ull
#define DG00X_OFFSET_STREAMING_STATE 0x0000
#define DG00X_OFFSET_STREAMING_SET 0x0004
/* unknown but address in host space 0x0008 */
/* For LSB of the address 0x000c */
/* unknown 0x0010 */
#define DG00X_OFFSET_MESSAGE_ADDR 0x0014
/* For LSB of the address 0x0018 */
/* unknown 0x001c */
/* unknown 0x0020 */
/* not used 0x0024--0x00ff */
#define DG00X_OFFSET_ISOC_CHANNELS 0x0100
/* unknown 0x0104 */
/* unknown 0x0108 */
/* unknown 0x010c */
#define DG00X_OFFSET_LOCAL_RATE 0x0110
#define DG00X_OFFSET_EXTERNAL_RATE 0x0114
#define DG00X_OFFSET_CLOCK_SOURCE 0x0118
#define DG00X_OFFSET_OPT_IFACE_MODE 0x011c
/* unknown 0x0120 */
/* Mixer control on/off 0x0124 */
/* unknown 0x0128 */
#define DG00X_OFFSET_DETECT_EXTERNAL 0x012c
/* unknown 0x0138 */
#define DG00X_OFFSET_MMC 0x0400
enum snd_dg00x_rate {
SND_DG00X_RATE_44100 = 0,
SND_DG00X_RATE_48000,
SND_DG00X_RATE_88200,
SND_DG00X_RATE_96000,
SND_DG00X_RATE_COUNT,
};
enum snd_dg00x_clock {
SND_DG00X_CLOCK_INTERNAL = 0,
SND_DG00X_CLOCK_SPDIF,
SND_DG00X_CLOCK_ADAT,
SND_DG00X_CLOCK_WORD,
SND_DG00X_CLOCK_COUNT,
};
enum snd_dg00x_optical_mode {
SND_DG00X_OPT_IFACE_MODE_ADAT = 0,
SND_DG00X_OPT_IFACE_MODE_SPDIF,
SND_DG00X_OPT_IFACE_MODE_COUNT,
};
#define DOT_MIDI_IN_PORTS 1
#define DOT_MIDI_OUT_PORTS 2
int amdtp_dot_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir);
int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int pcm_channels);
void amdtp_dot_reset(struct amdtp_stream *s);
int amdtp_dot_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi);
int snd_dg00x_transaction_register(struct snd_dg00x *dg00x);
int snd_dg00x_transaction_reregister(struct snd_dg00x *dg00x);
void snd_dg00x_transaction_unregister(struct snd_dg00x *dg00x);
extern const unsigned int snd_dg00x_stream_rates[SND_DG00X_RATE_COUNT];
extern const unsigned int snd_dg00x_stream_pcm_channels[SND_DG00X_RATE_COUNT];
int snd_dg00x_stream_get_external_rate(struct snd_dg00x *dg00x,
unsigned int *rate);
int snd_dg00x_stream_get_local_rate(struct snd_dg00x *dg00x,
unsigned int *rate);
int snd_dg00x_stream_set_local_rate(struct snd_dg00x *dg00x, unsigned int rate);
int snd_dg00x_stream_get_clock(struct snd_dg00x *dg00x,
enum snd_dg00x_clock *clock);
int snd_dg00x_stream_check_external_clock(struct snd_dg00x *dg00x,
bool *detect);
int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x);
int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate,
unsigned int frames_per_period,
unsigned int frames_per_buffer);
int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_stop_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_update_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_destroy_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_lock_changed(struct snd_dg00x *dg00x);
int snd_dg00x_stream_lock_try(struct snd_dg00x *dg00x);
void snd_dg00x_stream_lock_release(struct snd_dg00x *dg00x);
void snd_dg00x_proc_init(struct snd_dg00x *dg00x);
int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x);
int snd_dg00x_create_midi_devices(struct snd_dg00x *dg00x);
int snd_dg00x_create_hwdep_device(struct snd_dg00x *dg00x);
#endif
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/slab.h>
static int vmw_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(bo, place, *res);
return 0;
}
static void vmw_sys_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
ttm_resource_fini(man, res);
kfree(res);
}
static const struct ttm_resource_manager_func vmw_sys_manager_func = {
.alloc = vmw_sys_man_alloc,
.free = vmw_sys_man_free,
};
int vmw_sys_man_init(struct vmw_private *dev_priv)
{
struct ttm_device *bdev = &dev_priv->bdev;
struct ttm_resource_manager *man =
kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return -ENOMEM;
man->use_tt = true;
man->func = &vmw_sys_manager_func;
ttm_resource_manager_init(man, bdev, 0);
ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
void vmw_sys_man_fini(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev,
VMW_PL_SYSTEM);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
ttm_resource_manager_set_used(man, false);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL);
kfree(man);
}
|
#ifndef __NET_SCHED_CODEL_IMPL_H
#define __NET_SCHED_CODEL_IMPL_H
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
* Copyright (C) 2011-2012 Kathleen Nichols <[email protected]>
* Copyright (C) 2011-2012 Van Jacobson <[email protected]>
* Copyright (C) 2012 Michael D. Taht <[email protected]>
* Copyright (C) 2012,2015 Eric Dumazet <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the authors may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
/* Controlling Queue Delay (CoDel) algorithm
* =========================================
* Source : Kathleen Nichols and Van Jacobson
* http://queue.acm.org/detail.cfm?id=2209336
*
* Implemented on linux by Dave Taht and Eric Dumazet
*/
#include <net/inet_ecn.h>
static void codel_params_init(struct codel_params *params)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->ce_threshold = CODEL_DISABLED_THRESHOLD;
params->ce_threshold_mask = 0;
params->ce_threshold_selector = 0;
params->ecn = false;
}
static void codel_vars_init(struct codel_vars *vars)
{
memset(vars, 0, sizeof(*vars));
}
static void codel_stats_init(struct codel_stats *stats)
{
stats->maxpacket = 0;
}
/*
* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
*
* Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
*/
static void codel_Newton_step(struct codel_vars *vars)
{
u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
val >>= 2; /* avoid overflow in following multiply */
val = (val * invsqrt) >> (32 - 2 + 1);
vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
}
/*
* CoDel control_law is t + interval/sqrt(count)
* We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
* both sqrt() and divide operation.
*/
static codel_time_t codel_control_law(codel_time_t t,
codel_time_t interval,
u32 rec_inv_sqrt)
{
return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
}
static bool codel_should_drop(const struct sk_buff *skb,
void *ctx,
struct codel_vars *vars,
struct codel_params *params,
struct codel_stats *stats,
codel_skb_len_t skb_len_func,
codel_skb_time_t skb_time_func,
u32 *backlog,
codel_time_t now)
{
bool ok_to_drop;
u32 skb_len;
if (!skb) {
vars->first_above_time = 0;
return false;
}
skb_len = skb_len_func(skb);
vars->ldelay = now - skb_time_func(skb);
if (unlikely(skb_len > stats->maxpacket))
stats->maxpacket = skb_len;
if (codel_time_before(vars->ldelay, params->target) ||
*backlog <= params->mtu) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
}
ok_to_drop = false;
if (vars->first_above_time == 0) {
/* just went above from below. If we stay above
* for at least interval we'll say it's ok to drop
*/
vars->first_above_time = now + params->interval;
} else if (codel_time_after(now, vars->first_above_time)) {
ok_to_drop = true;
}
return ok_to_drop;
}
static struct sk_buff *codel_dequeue(void *ctx,
u32 *backlog,
struct codel_params *params,
struct codel_vars *vars,
struct codel_stats *stats,
codel_skb_len_t skb_len_func,
codel_skb_time_t skb_time_func,
codel_skb_drop_t drop_func,
codel_skb_dequeue_t dequeue_func)
{
struct sk_buff *skb = dequeue_func(vars, ctx);
codel_time_t now;
bool drop;
if (!skb) {
vars->dropping = false;
return skb;
}
now = codel_get_time();
drop = codel_should_drop(skb, ctx, vars, params, stats,
skb_len_func, skb_time_func, backlog, now);
if (vars->dropping) {
if (!drop) {
/* sojourn time below target - leave dropping state */
vars->dropping = false;
} else if (codel_time_after_eq(now, vars->drop_next)) {
/* It's time for the next drop. Drop the current
* packet and dequeue the next. The dequeue might
* take us out of dropping state.
* If not, schedule the next drop.
* A large backlog might result in drop rates so high
* that the next drop should happen now,
* hence the while loop.
*/
while (vars->dropping &&
codel_time_after_eq(now, vars->drop_next)) {
vars->count++; /* dont care of possible wrap
* since there is no more divide
*/
codel_Newton_step(vars);
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
vars->drop_next =
codel_control_law(vars->drop_next,
params->interval,
vars->rec_inv_sqrt);
goto end;
}
stats->drop_len += skb_len_func(skb);
drop_func(skb, ctx);
stats->drop_count++;
skb = dequeue_func(vars, ctx);
if (!codel_should_drop(skb, ctx,
vars, params, stats,
skb_len_func,
skb_time_func,
backlog, now)) {
/* leave dropping state */
vars->dropping = false;
} else {
/* and schedule the next drop */
vars->drop_next =
codel_control_law(vars->drop_next,
params->interval,
vars->rec_inv_sqrt);
}
}
}
} else if (drop) {
u32 delta;
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
stats->drop_len += skb_len_func(skb);
drop_func(skb, ctx);
stats->drop_count++;
skb = dequeue_func(vars, ctx);
drop = codel_should_drop(skb, ctx, vars, params,
stats, skb_len_func,
skb_time_func, backlog, now);
}
vars->dropping = true;
/* if min went above target close to when we last went below it
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
*/
delta = vars->count - vars->lastcount;
if (delta > 1 &&
codel_time_before(now - vars->drop_next,
16 * params->interval)) {
vars->count = delta;
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.
*/
codel_Newton_step(vars);
} else {
vars->count = 1;
vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
}
vars->lastcount = vars->count;
vars->drop_next = codel_control_law(now, params->interval,
vars->rec_inv_sqrt);
}
end:
if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) {
bool set_ce = true;
if (params->ce_threshold_mask) {
int dsfield = skb_get_dsfield(skb);
set_ce = (dsfield >= 0 &&
(((u8)dsfield & params->ce_threshold_mask) ==
params->ce_threshold_selector));
}
if (set_ce && INET_ECN_set_ce(skb))
stats->ce_mark++;
}
return skb;
}
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* vimc-capture.c Virtual Media Controller Driver
*
* Copyright (C) 2015-2017 Helen Koike <[email protected]>
*/
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
#include "vimc-common.h"
#include "vimc-streamer.h"
struct vimc_capture_device {
struct vimc_ent_device ved;
struct video_device vdev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
/*
* NOTE: in a real driver, a spin lock must be used to access the
* queue because the frames are generated from a hardware interruption
* and the isr is not allowed to sleep.
* Even if it is not necessary a spinlock in the vimc driver, we
* use it here as a code reference
*/
spinlock_t qlock;
struct mutex lock;
u32 sequence;
struct vimc_stream stream;
struct media_pad pad;
};
static const struct v4l2_pix_format fmt_default = {
.width = 640,
.height = 480,
.pixelformat = V4L2_PIX_FMT_RGB24,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_SRGB,
};
struct vimc_capture_buffer {
/*
* struct vb2_v4l2_buffer must be the first element
* the videobuf2 framework will allocate this struct based on
* buf_struct_size and use the first sizeof(struct vb2_buffer) bytes of
* memory as a vb2_buffer
*/
struct vb2_v4l2_buffer vb2;
struct list_head list;
};
static int vimc_capture_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", VIMC_PDEV_NAME);
return 0;
}
static void vimc_capture_get_format(struct vimc_ent_device *ved,
struct v4l2_pix_format *fmt)
{
struct vimc_capture_device *vcapture = container_of(ved, struct vimc_capture_device,
ved);
*fmt = vcapture->format;
}
static int vimc_capture_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vimc_capture_device *vcapture = video_drvdata(file);
f->fmt.pix = vcapture->format;
return 0;
}
static int vimc_capture_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *format = &f->fmt.pix;
const struct vimc_pix_map *vpix;
format->width = clamp_t(u32, format->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
format->height = clamp_t(u32, format->height, VIMC_FRAME_MIN_HEIGHT,
VIMC_FRAME_MAX_HEIGHT) & ~1;
/* Don't accept a pixelformat that is not on the table */
vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
if (!vpix) {
format->pixelformat = fmt_default.pixelformat;
vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
}
/* TODO: Add support for custom bytesperline values */
format->bytesperline = format->width * vpix->bpp;
format->sizeimage = format->bytesperline * format->height;
if (format->field == V4L2_FIELD_ANY)
format->field = fmt_default.field;
vimc_colorimetry_clamp(format);
if (format->colorspace == V4L2_COLORSPACE_DEFAULT)
format->colorspace = fmt_default.colorspace;
return 0;
}
static int vimc_capture_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vimc_capture_device *vcapture = video_drvdata(file);
int ret;
/* Do not change the format while stream is on */
if (vb2_is_busy(&vcapture->queue))
return -EBUSY;
ret = vimc_capture_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
dev_dbg(vcapture->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcapture->vdev.name,
/* old */
vcapture->format.width, vcapture->format.height,
vcapture->format.pixelformat, vcapture->format.colorspace,
vcapture->format.quantization, vcapture->format.xfer_func,
vcapture->format.ycbcr_enc,
/* new */
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
f->fmt.pix.quantization, f->fmt.pix.xfer_func,
f->fmt.pix.ycbcr_enc);
vcapture->format = f->fmt.pix;
return 0;
}
static int vimc_capture_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct vimc_pix_map *vpix;
if (f->mbus_code) {
if (f->index > 0)
return -EINVAL;
vpix = vimc_pix_map_by_code(f->mbus_code);
} else {
vpix = vimc_pix_map_by_index(f->index);
}
if (!vpix)
return -EINVAL;
f->pixelformat = vpix->pixelformat;
return 0;
}
static int vimc_capture_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct vimc_pix_map *vpix;
if (fsize->index)
return -EINVAL;
/* Only accept code in the pix map table */
vpix = vimc_pix_map_by_code(fsize->pixel_format);
if (!vpix)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = VIMC_FRAME_MIN_WIDTH;
fsize->stepwise.max_width = VIMC_FRAME_MAX_WIDTH;
fsize->stepwise.min_height = VIMC_FRAME_MIN_HEIGHT;
fsize->stepwise.max_height = VIMC_FRAME_MAX_HEIGHT;
fsize->stepwise.step_width = 1;
fsize->stepwise.step_height = 1;
return 0;
}
static const struct v4l2_file_operations vimc_capture_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops vimc_capture_ioctl_ops = {
.vidioc_querycap = vimc_capture_querycap,
.vidioc_g_fmt_vid_cap = vimc_capture_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vimc_capture_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vimc_capture_try_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap = vimc_capture_enum_fmt_vid_cap,
.vidioc_enum_framesizes = vimc_capture_enum_framesizes,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_remove_bufs = vb2_ioctl_remove_bufs,
};
static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture,
enum vb2_buffer_state state)
{
struct vimc_capture_buffer *vbuf, *node;
spin_lock(&vcapture->qlock);
list_for_each_entry_safe(vbuf, node, &vcapture->buf_list, list) {
list_del(&vbuf->list);
vb2_buffer_done(&vbuf->vb2.vb2_buf, state);
}
spin_unlock(&vcapture->qlock);
}
static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
int ret;
vcapture->sequence = 0;
/* Start the media pipeline */
ret = video_device_pipeline_start(&vcapture->vdev, &vcapture->stream.pipe);
if (ret) {
vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
return ret;
}
ret = vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 1);
if (ret) {
video_device_pipeline_stop(&vcapture->vdev);
vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED);
return ret;
}
return 0;
}
/*
* Stop the stream engine. Any remaining buffers in the stream queue are
* dequeued and passed on to the vb2 framework marked as STATE_ERROR.
*/
static void vimc_capture_stop_streaming(struct vb2_queue *vq)
{
struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 0);
/* Stop the media pipeline */
video_device_pipeline_stop(&vcapture->vdev);
/* Release all active buffers */
vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_ERROR);
}
static void vimc_capture_buf_queue(struct vb2_buffer *vb2_buf)
{
struct vimc_capture_device *vcapture = vb2_get_drv_priv(vb2_buf->vb2_queue);
struct vimc_capture_buffer *buf = container_of(vb2_buf,
struct vimc_capture_buffer,
vb2.vb2_buf);
spin_lock(&vcapture->qlock);
list_add_tail(&buf->list, &vcapture->buf_list);
spin_unlock(&vcapture->qlock);
}
static int vimc_capture_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq);
if (*nplanes)
return sizes[0] < vcapture->format.sizeimage ? -EINVAL : 0;
/* We don't support multiplanes for now */
*nplanes = 1;
sizes[0] = vcapture->format.sizeimage;
return 0;
}
static int vimc_capture_buffer_prepare(struct vb2_buffer *vb)
{
struct vimc_capture_device *vcapture = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size = vcapture->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(vcapture->ved.dev, "%s: buffer too small (%lu < %lu)\n",
vcapture->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
return 0;
}
static const struct vb2_ops vimc_capture_qops = {
.start_streaming = vimc_capture_start_streaming,
.stop_streaming = vimc_capture_stop_streaming,
.buf_queue = vimc_capture_buf_queue,
.queue_setup = vimc_capture_queue_setup,
.buf_prepare = vimc_capture_buffer_prepare,
};
static const struct media_entity_operations vimc_capture_mops = {
.link_validate = vimc_vdev_link_validate,
};
static void vimc_capture_release(struct vimc_ent_device *ved)
{
struct vimc_capture_device *vcapture =
container_of(ved, struct vimc_capture_device, ved);
media_entity_cleanup(vcapture->ved.ent);
kfree(vcapture);
}
static void vimc_capture_unregister(struct vimc_ent_device *ved)
{
struct vimc_capture_device *vcapture =
container_of(ved, struct vimc_capture_device, ved);
vb2_video_unregister_device(&vcapture->vdev);
}
static void *vimc_capture_process_frame(struct vimc_ent_device *ved,
const void *frame)
{
struct vimc_capture_device *vcapture = container_of(ved, struct vimc_capture_device,
ved);
struct vimc_capture_buffer *vimc_buf;
void *vbuf;
spin_lock(&vcapture->qlock);
/* Get the first entry of the list */
vimc_buf = list_first_entry_or_null(&vcapture->buf_list,
typeof(*vimc_buf), list);
if (!vimc_buf) {
spin_unlock(&vcapture->qlock);
return ERR_PTR(-EAGAIN);
}
/* Remove this entry from the list */
list_del(&vimc_buf->list);
spin_unlock(&vcapture->qlock);
/* Fill the buffer */
vimc_buf->vb2.vb2_buf.timestamp = ktime_get_ns();
vimc_buf->vb2.sequence = vcapture->sequence++;
vimc_buf->vb2.field = vcapture->format.field;
vbuf = vb2_plane_vaddr(&vimc_buf->vb2.vb2_buf, 0);
memcpy(vbuf, frame, vcapture->format.sizeimage);
/* Set it as ready */
vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
vcapture->format.sizeimage);
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
return NULL;
}
static struct vimc_ent_device *vimc_capture_add(struct vimc_device *vimc,
const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
struct vimc_capture_device *vcapture;
struct video_device *vdev;
struct vb2_queue *q;
int ret;
/* Allocate the vimc_capture_device struct */
vcapture = kzalloc(sizeof(*vcapture), GFP_KERNEL);
if (!vcapture)
return ERR_PTR(-ENOMEM);
/* Initialize the media entity */
vcapture->vdev.entity.name = vcfg_name;
vcapture->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
vcapture->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vcapture->vdev.entity,
1, &vcapture->pad);
if (ret)
goto err_free_vcapture;
/* Initialize the lock */
mutex_init(&vcapture->lock);
/* Initialize the vb2 queue */
q = &vcapture->queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_DMABUF;
if (vimc_allocator == VIMC_ALLOCATOR_VMALLOC)
q->io_modes |= VB2_USERPTR;
q->drv_priv = vcapture;
q->buf_struct_size = sizeof(struct vimc_capture_buffer);
q->ops = &vimc_capture_qops;
q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
? &vb2_dma_contig_memops : &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_reqbufs_allocation = 2;
q->lock = &vcapture->lock;
q->dev = v4l2_dev->dev;
ret = vb2_queue_init(q);
if (ret) {
dev_err(vimc->mdev.dev, "%s: vb2 queue init failed (err=%d)\n",
vcfg_name, ret);
goto err_clean_m_ent;
}
/* Initialize buffer list and its lock */
INIT_LIST_HEAD(&vcapture->buf_list);
spin_lock_init(&vcapture->qlock);
/* Set default frame format */
vcapture->format = fmt_default;
vpix = vimc_pix_map_by_pixelformat(vcapture->format.pixelformat);
vcapture->format.bytesperline = vcapture->format.width * vpix->bpp;
vcapture->format.sizeimage = vcapture->format.bytesperline *
vcapture->format.height;
/* Fill the vimc_ent_device struct */
vcapture->ved.ent = &vcapture->vdev.entity;
vcapture->ved.process_frame = vimc_capture_process_frame;
vcapture->ved.vdev_get_format = vimc_capture_get_format;
vcapture->ved.dev = vimc->mdev.dev;
/* Initialize the video_device struct */
vdev = &vcapture->vdev;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
| V4L2_CAP_IO_MC;
vdev->entity.ops = &vimc_capture_mops;
vdev->release = video_device_release_empty;
vdev->fops = &vimc_capture_fops;
vdev->ioctl_ops = &vimc_capture_ioctl_ops;
vdev->lock = &vcapture->lock;
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcapture->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(vimc->mdev.dev, "%s: video register failed (err=%d)\n",
vcapture->vdev.name, ret);
goto err_clean_m_ent;
}
return &vcapture->ved;
err_clean_m_ent:
media_entity_cleanup(&vcapture->vdev.entity);
err_free_vcapture:
kfree(vcapture);
return ERR_PTR(ret);
}
const struct vimc_ent_type vimc_capture_type = {
.add = vimc_capture_add,
.unregister = vimc_capture_unregister,
.release = vimc_capture_release
};
|
// SPDX-License-Identifier: GPL-2.0+
/*
* Test cases for bitfield helpers.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/util_macros.h>
#define FIND_CLOSEST_RANGE_CHECK(from, to, array, exp_idx) \
{ \
int i; \
for (i = from; i <= to; i++) { \
int found = find_closest(i, array, ARRAY_SIZE(array)); \
KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
} \
}
static void test_find_closest(struct kunit *ctx)
{
/* This will test a few arrays that are found in drivers */
static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
static const unsigned int ad7616_oversampling_avail[] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
static u32 wd_timeout_table[] = { 2, 4, 6, 8, 16, 32, 48, 64 };
static int array_prog1a[] = { 1, 2, 3, 4, 5 };
static u32 array_prog1b[] = { 2, 3, 4, 5, 6 };
static int array_prog1mix[] = { -2, -1, 0, 1, 2 };
static int array_prog2a[] = { 1, 3, 5, 7 };
static u32 array_prog2b[] = { 2, 4, 6, 8 };
static int array_prog3a[] = { 1, 4, 7, 10 };
static u32 array_prog3b[] = { 2, 5, 8, 11 };
static int array_prog4a[] = { 1, 5, 9, 13 };
static u32 array_prog4b[] = { 2, 6, 10, 14 };
FIND_CLOSEST_RANGE_CHECK(-3, 2, ina226_avg_tab, 0);
FIND_CLOSEST_RANGE_CHECK(3, 10, ina226_avg_tab, 1);
FIND_CLOSEST_RANGE_CHECK(11, 40, ina226_avg_tab, 2);
FIND_CLOSEST_RANGE_CHECK(41, 96, ina226_avg_tab, 3);
FIND_CLOSEST_RANGE_CHECK(97, 192, ina226_avg_tab, 4);
FIND_CLOSEST_RANGE_CHECK(193, 384, ina226_avg_tab, 5);
FIND_CLOSEST_RANGE_CHECK(385, 768, ina226_avg_tab, 6);
FIND_CLOSEST_RANGE_CHECK(769, 2048, ina226_avg_tab, 7);
/* The array that found the bug that caused this kunit to exist */
FIND_CLOSEST_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 0);
FIND_CLOSEST_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 1);
FIND_CLOSEST_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 2);
FIND_CLOSEST_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 3);
FIND_CLOSEST_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 4);
FIND_CLOSEST_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 5);
FIND_CLOSEST_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 6);
FIND_CLOSEST_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 7);
FIND_CLOSEST_RANGE_CHECK(-3, 3, wd_timeout_table, 0);
FIND_CLOSEST_RANGE_CHECK(4, 5, wd_timeout_table, 1);
FIND_CLOSEST_RANGE_CHECK(6, 7, wd_timeout_table, 2);
FIND_CLOSEST_RANGE_CHECK(8, 12, wd_timeout_table, 3);
FIND_CLOSEST_RANGE_CHECK(13, 24, wd_timeout_table, 4);
FIND_CLOSEST_RANGE_CHECK(25, 40, wd_timeout_table, 5);
FIND_CLOSEST_RANGE_CHECK(41, 56, wd_timeout_table, 6);
FIND_CLOSEST_RANGE_CHECK(57, 128, wd_timeout_table, 7);
/* One could argue that find_closest() should not be used for monotonic
* arrays (like 1,2,3,4,5), but even so, it should work as long as the
* array is sorted ascending. */
FIND_CLOSEST_RANGE_CHECK(-3, 1, array_prog1a, 0);
FIND_CLOSEST_RANGE_CHECK(2, 2, array_prog1a, 1);
FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1a, 2);
FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1a, 3);
FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog1a, 4);
FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog1b, 0);
FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1b, 1);
FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1b, 2);
FIND_CLOSEST_RANGE_CHECK(5, 5, array_prog1b, 3);
FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog1b, 4);
FIND_CLOSEST_RANGE_CHECK(-4, -2, array_prog1mix, 0);
FIND_CLOSEST_RANGE_CHECK(-1, -1, array_prog1mix, 1);
FIND_CLOSEST_RANGE_CHECK(0, 0, array_prog1mix, 2);
FIND_CLOSEST_RANGE_CHECK(1, 1, array_prog1mix, 3);
FIND_CLOSEST_RANGE_CHECK(2, 5, array_prog1mix, 4);
FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog2a, 0);
FIND_CLOSEST_RANGE_CHECK(3, 4, array_prog2a, 1);
FIND_CLOSEST_RANGE_CHECK(5, 6, array_prog2a, 2);
FIND_CLOSEST_RANGE_CHECK(7, 10, array_prog2a, 3);
FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog2b, 0);
FIND_CLOSEST_RANGE_CHECK(4, 5, array_prog2b, 1);
FIND_CLOSEST_RANGE_CHECK(6, 7, array_prog2b, 2);
FIND_CLOSEST_RANGE_CHECK(8, 10, array_prog2b, 3);
FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog3a, 0);
FIND_CLOSEST_RANGE_CHECK(3, 5, array_prog3a, 1);
FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog3a, 2);
FIND_CLOSEST_RANGE_CHECK(9, 20, array_prog3a, 3);
FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog3b, 0);
FIND_CLOSEST_RANGE_CHECK(4, 6, array_prog3b, 1);
FIND_CLOSEST_RANGE_CHECK(7, 9, array_prog3b, 2);
FIND_CLOSEST_RANGE_CHECK(10, 20, array_prog3b, 3);
FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog4a, 0);
FIND_CLOSEST_RANGE_CHECK(4, 7, array_prog4a, 1);
FIND_CLOSEST_RANGE_CHECK(8, 11, array_prog4a, 2);
FIND_CLOSEST_RANGE_CHECK(12, 20, array_prog4a, 3);
FIND_CLOSEST_RANGE_CHECK(-3, 4, array_prog4b, 0);
FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog4b, 1);
FIND_CLOSEST_RANGE_CHECK(9, 12, array_prog4b, 2);
FIND_CLOSEST_RANGE_CHECK(13, 20, array_prog4b, 3);
}
#define FIND_CLOSEST_DESC_RANGE_CHECK(from, to, array, exp_idx) \
{ \
int i; \
for (i = from; i <= to; i++) { \
int found = find_closest_descending(i, array, \
ARRAY_SIZE(array)); \
KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
} \
}
static void test_find_closest_descending(struct kunit *ctx)
{
/* Same arrays as 'test_find_closest' but reversed */
static const int ina226_avg_tab[] = { 1024, 512, 256, 128, 64, 16, 4, 1 };
static const unsigned int ad7616_oversampling_avail[] = {
128, 64, 32, 16, 8, 4, 2, 1
};
static u32 wd_timeout_table[] = { 64, 48, 32, 16, 8, 6, 4, 2 };
static int array_prog1a[] = { 5, 4, 3, 2, 1 };
static u32 array_prog1b[] = { 6, 5, 4, 3, 2 };
static int array_prog1mix[] = { 2, 1, 0, -1, -2 };
static int array_prog2a[] = { 7, 5, 3, 1 };
static u32 array_prog2b[] = { 8, 6, 4, 2 };
static int array_prog3a[] = { 10, 7, 4, 1 };
static u32 array_prog3b[] = { 11, 8, 5, 2 };
static int array_prog4a[] = { 13, 9, 5, 1 };
static u32 array_prog4b[] = { 14, 10, 6, 2 };
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, ina226_avg_tab, 7);
FIND_CLOSEST_DESC_RANGE_CHECK(3, 10, ina226_avg_tab, 6);
FIND_CLOSEST_DESC_RANGE_CHECK(11, 40, ina226_avg_tab, 5);
FIND_CLOSEST_DESC_RANGE_CHECK(41, 96, ina226_avg_tab, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(97, 192, ina226_avg_tab, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(193, 384, ina226_avg_tab, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(385, 768, ina226_avg_tab, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(769, 2048, ina226_avg_tab, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 7);
FIND_CLOSEST_DESC_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 6);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 5);
FIND_CLOSEST_DESC_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, wd_timeout_table, 7);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, wd_timeout_table, 6);
FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, wd_timeout_table, 5);
FIND_CLOSEST_DESC_RANGE_CHECK(8, 12, wd_timeout_table, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, wd_timeout_table, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(25, 40, wd_timeout_table, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(41, 56, wd_timeout_table, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(57, 128, wd_timeout_table, 0);
/* One could argue that find_closest_descending() should not be used
* for monotonic arrays (like 5,4,3,2,1), but even so, it should still
* it should work as long as the array is sorted descending. */
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, array_prog1a, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(2, 2, array_prog1a, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1a, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1a, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog1a, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog1b, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1b, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1b, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(5, 5, array_prog1b, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog1b, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-4, -2, array_prog1mix, 4);
FIND_CLOSEST_DESC_RANGE_CHECK(-1, -1, array_prog1mix, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(0, 0, array_prog1mix, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(1, 1, array_prog1mix, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(2, 5, array_prog1mix, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog2a, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(3, 4, array_prog2a, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(5, 6, array_prog2a, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(7, 10, array_prog2a, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog2b, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, array_prog2b, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, array_prog2b, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(8, 10, array_prog2b, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog3a, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(3, 5, array_prog3a, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog3a, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(9, 20, array_prog3a, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog3b, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, array_prog3b, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(7, 9, array_prog3b, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(10, 20, array_prog3b, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog4a, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(4, 7, array_prog4a, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(8, 11, array_prog4a, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(12, 20, array_prog4a, 0);
FIND_CLOSEST_DESC_RANGE_CHECK(-3, 4, array_prog4b, 3);
FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog4b, 2);
FIND_CLOSEST_DESC_RANGE_CHECK(9, 12, array_prog4b, 1);
FIND_CLOSEST_DESC_RANGE_CHECK(13, 20, array_prog4b, 0);
}
static struct kunit_case __refdata util_macros_test_cases[] = {
KUNIT_CASE(test_find_closest),
KUNIT_CASE(test_find_closest_descending),
{}
};
static struct kunit_suite util_macros_test_suite = {
.name = "util_macros.h",
.test_cases = util_macros_test_cases,
};
kunit_test_suites(&util_macros_test_suite);
MODULE_AUTHOR("Alexandru Ardelean <[email protected]>");
MODULE_DESCRIPTION("Test cases for util_macros.h helpers");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/* sound/soc/rockchip/rk_spdif.c
*
* ALSA SoC Audio Layer - Rockchip I2S Controller driver
*
* Copyright (c) 2014 Rockchip Electronics Co. Ltd.
* Author: Jianqun <[email protected]>
* Copyright (c) 2015 Collabora Ltd.
* Author: Sjoerd Simons <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <sound/pcm_params.h>
#include <sound/dmaengine_pcm.h>
#include "rockchip_spdif.h"
enum rk_spdif_type {
RK_SPDIF_RK3066,
RK_SPDIF_RK3188,
RK_SPDIF_RK3288,
RK_SPDIF_RK3366,
};
#define RK3288_GRF_SOC_CON2 0x24c
struct rk_spdif_dev {
struct device *dev;
struct clk *mclk;
struct clk *hclk;
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct regmap *regmap;
};
static const struct of_device_id rk_spdif_match[] __maybe_unused = {
{ .compatible = "rockchip,rk3066-spdif",
.data = (void *)RK_SPDIF_RK3066 },
{ .compatible = "rockchip,rk3188-spdif",
.data = (void *)RK_SPDIF_RK3188 },
{ .compatible = "rockchip,rk3228-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3288-spdif",
.data = (void *)RK_SPDIF_RK3288 },
{ .compatible = "rockchip,rk3328-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3366-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3368-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3399-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3568-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{},
};
MODULE_DEVICE_TABLE(of, rk_spdif_match);
static int __maybe_unused rk_spdif_runtime_suspend(struct device *dev)
{
struct rk_spdif_dev *spdif = dev_get_drvdata(dev);
regcache_cache_only(spdif->regmap, true);
clk_disable_unprepare(spdif->mclk);
clk_disable_unprepare(spdif->hclk);
return 0;
}
static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
{
struct rk_spdif_dev *spdif = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(spdif->mclk);
if (ret) {
dev_err(spdif->dev, "mclk clock enable failed %d\n", ret);
return ret;
}
ret = clk_prepare_enable(spdif->hclk);
if (ret) {
clk_disable_unprepare(spdif->mclk);
dev_err(spdif->dev, "hclk clock enable failed %d\n", ret);
return ret;
}
regcache_cache_only(spdif->regmap, false);
regcache_mark_dirty(spdif->regmap);
ret = regcache_sync(spdif->regmap);
if (ret) {
clk_disable_unprepare(spdif->mclk);
clk_disable_unprepare(spdif->hclk);
}
return ret;
}
static int rk_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct rk_spdif_dev *spdif = snd_soc_dai_get_drvdata(dai);
unsigned int val = SPDIF_CFGR_HALFWORD_ENABLE;
int srate, mclk;
int ret;
srate = params_rate(params);
mclk = srate * 128;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
val |= SPDIF_CFGR_VDW_16;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
val |= SPDIF_CFGR_VDW_20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
val |= SPDIF_CFGR_VDW_24;
break;
default:
return -EINVAL;
}
/* Set clock and calculate divider */
ret = clk_set_rate(spdif->mclk, mclk);
if (ret != 0) {
dev_err(spdif->dev, "Failed to set module clock rate: %d\n",
ret);
return ret;
}
ret = regmap_update_bits(spdif->regmap, SPDIF_CFGR,
SPDIF_CFGR_CLK_DIV_MASK |
SPDIF_CFGR_HALFWORD_ENABLE |
SDPIF_CFGR_VDW_MASK, val);
return ret;
}
static int rk_spdif_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct rk_spdif_dev *spdif = snd_soc_dai_get_drvdata(dai);
int ret;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR,
SPDIF_DMACR_TDE_ENABLE |
SPDIF_DMACR_TDL_MASK,
SPDIF_DMACR_TDE_ENABLE |
SPDIF_DMACR_TDL(16));
if (ret != 0)
return ret;
ret = regmap_update_bits(spdif->regmap, SPDIF_XFER,
SPDIF_XFER_TXS_START,
SPDIF_XFER_TXS_START);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR,
SPDIF_DMACR_TDE_ENABLE,
SPDIF_DMACR_TDE_DISABLE);
if (ret != 0)
return ret;
ret = regmap_update_bits(spdif->regmap, SPDIF_XFER,
SPDIF_XFER_TXS_START,
SPDIF_XFER_TXS_STOP);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int rk_spdif_dai_probe(struct snd_soc_dai *dai)
{
struct rk_spdif_dev *spdif = snd_soc_dai_get_drvdata(dai);
snd_soc_dai_dma_data_set_playback(dai, &spdif->playback_dma_data);
return 0;
}
static const struct snd_soc_dai_ops rk_spdif_dai_ops = {
.probe = rk_spdif_dai_probe,
.hw_params = rk_spdif_hw_params,
.trigger = rk_spdif_trigger,
};
static struct snd_soc_dai_driver rk_spdif_dai = {
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = (SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_96000 |
SNDRV_PCM_RATE_192000),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S20_3LE |
SNDRV_PCM_FMTBIT_S24_LE),
},
.ops = &rk_spdif_dai_ops,
};
static const struct snd_soc_component_driver rk_spdif_component = {
.name = "rockchip-spdif",
.legacy_dai_naming = 1,
};
static bool rk_spdif_wr_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIF_CFGR:
case SPDIF_DMACR:
case SPDIF_INTCR:
case SPDIF_XFER:
case SPDIF_SMPDR:
return true;
default:
return false;
}
}
static bool rk_spdif_rd_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIF_CFGR:
case SPDIF_SDBLR:
case SPDIF_INTCR:
case SPDIF_INTSR:
case SPDIF_XFER:
case SPDIF_SMPDR:
return true;
default:
return false;
}
}
static bool rk_spdif_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIF_INTSR:
case SPDIF_SDBLR:
case SPDIF_SMPDR:
return true;
default:
return false;
}
}
static const struct regmap_config rk_spdif_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = SPDIF_SMPDR,
.writeable_reg = rk_spdif_wr_reg,
.readable_reg = rk_spdif_rd_reg,
.volatile_reg = rk_spdif_volatile_reg,
.cache_type = REGCACHE_FLAT,
};
static int rk_spdif_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct rk_spdif_dev *spdif;
const struct of_device_id *match;
struct resource *res;
void __iomem *regs;
int ret;
match = of_match_node(rk_spdif_match, np);
if (match->data == (void *)RK_SPDIF_RK3288) {
struct regmap *grf;
grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(grf)) {
dev_err(&pdev->dev,
"rockchip_spdif missing 'rockchip,grf'\n");
return PTR_ERR(grf);
}
/* Select the 8 channel SPDIF solution on RK3288 as
* the 2 channel one does not appear to work
*/
regmap_write(grf, RK3288_GRF_SOC_CON2, BIT(1) << 16);
}
spdif = devm_kzalloc(&pdev->dev, sizeof(*spdif), GFP_KERNEL);
if (!spdif)
return -ENOMEM;
spdif->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(spdif->hclk))
return PTR_ERR(spdif->hclk);
spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(spdif->mclk))
return PTR_ERR(spdif->mclk);
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
&rk_spdif_regmap_config);
if (IS_ERR(spdif->regmap))
return PTR_ERR(spdif->regmap);
spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
spdif->playback_dma_data.maxburst = 4;
spdif->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, spdif);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = rk_spdif_runtime_resume(&pdev->dev);
if (ret)
goto err_pm_runtime;
}
ret = devm_snd_soc_register_component(&pdev->dev,
&rk_spdif_component,
&rk_spdif_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Could not register DAI\n");
goto err_pm_suspend;
}
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM\n");
goto err_pm_suspend;
}
return 0;
err_pm_suspend:
if (!pm_runtime_status_suspended(&pdev->dev))
rk_spdif_runtime_suspend(&pdev->dev);
err_pm_runtime:
pm_runtime_disable(&pdev->dev);
return ret;
}
static void rk_spdif_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
rk_spdif_runtime_suspend(&pdev->dev);
}
static const struct dev_pm_ops rk_spdif_pm_ops = {
SET_RUNTIME_PM_OPS(rk_spdif_runtime_suspend, rk_spdif_runtime_resume,
NULL)
};
static struct platform_driver rk_spdif_driver = {
.probe = rk_spdif_probe,
.remove = rk_spdif_remove,
.driver = {
.name = "rockchip-spdif",
.of_match_table = of_match_ptr(rk_spdif_match),
.pm = &rk_spdif_pm_ops,
},
};
module_platform_driver(rk_spdif_driver);
MODULE_ALIAS("platform:rockchip-spdif");
MODULE_DESCRIPTION("ROCKCHIP SPDIF transceiver Interface");
MODULE_AUTHOR("Sjoerd Simons <[email protected]>");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2009 - Maxim Levitsky
* SmartMedia/xD translation layer
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/hdreg.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/sysfs.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include "nand/raw/sm_common.h"
#include "sm_ftl.h"
static struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
static int debug;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* ------------------- sysfs attributes ---------------------------------- */
struct sm_sysfs_attribute {
struct device_attribute dev_attr;
char *data;
int len;
};
static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sm_sysfs_attribute *sm_attr =
container_of(attr, struct sm_sysfs_attribute, dev_attr);
strncpy(buf, sm_attr->data, sm_attr->len);
return sm_attr->len;
}
#define NUM_ATTRIBUTES 1
#define SM_CIS_VENDOR_OFFSET 0x59
static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute_group *attr_group;
struct attribute **attributes;
struct sm_sysfs_attribute *vendor_attribute;
char *vendor;
vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
if (!vendor)
goto error1;
/* Initialize sysfs attributes */
vendor_attribute =
kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
if (!vendor_attribute)
goto error2;
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
vendor_attribute->data = vendor;
vendor_attribute->len = strlen(vendor);
vendor_attribute->dev_attr.attr.name = "vendor";
vendor_attribute->dev_attr.attr.mode = S_IRUGO;
vendor_attribute->dev_attr.show = sm_attr_show;
/* Create array of pointers to the attributes */
attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (!attributes)
goto error3;
attributes[0] = &vendor_attribute->dev_attr.attr;
/* Finally create the attribute group */
attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (!attr_group)
goto error4;
attr_group->attrs = attributes;
return attr_group;
error4:
kfree(attributes);
error3:
kfree(vendor_attribute);
error2:
kfree(vendor);
error1:
return NULL;
}
static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute **attributes = ftl->disk_attributes->attrs;
int i;
for (i = 0; attributes[i] ; i++) {
struct device_attribute *dev_attr = container_of(attributes[i],
struct device_attribute, attr);
struct sm_sysfs_attribute *sm_attr =
container_of(dev_attr,
struct sm_sysfs_attribute, dev_attr);
kfree(sm_attr->data);
kfree(sm_attr);
}
kfree(ftl->disk_attributes->attrs);
kfree(ftl->disk_attributes);
}
/* ----------------------- oob helpers -------------------------------------- */
static int sm_get_lba(uint8_t *lba)
{
/* check fixed bits */
if ((lba[0] & 0xF8) != 0x10)
return -2;
/* check parity - endianness doesn't matter */
if (hweight16(*(uint16_t *)lba) & 1)
return -2;
return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
}
/*
* Read LBA associated with block
* returns -1, if block is erased
* returns -2 if error happens
*/
static int sm_read_lba(struct sm_oob *oob)
{
static const uint32_t erased_pattern[4] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
uint16_t lba_test;
int lba;
/* First test for erased block */
if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
return -1;
/* Now check is both copies of the LBA differ too much */
lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
if (lba_test && !is_power_of_2(lba_test))
return -2;
/* And read it */
lba = sm_get_lba(oob->lba_copy1);
if (lba == -2)
lba = sm_get_lba(oob->lba_copy2);
return lba;
}
static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
{
uint8_t tmp[2];
WARN_ON(lba >= 1000);
tmp[0] = 0x10 | ((lba >> 7) & 0x07);
tmp[1] = (lba << 1) & 0xFF;
if (hweight16(*(uint16_t *)tmp) & 0x01)
tmp[1] |= 1;
oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
}
/* Make offset from parts */
static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
{
WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
WARN_ON(zone < 0 || zone >= ftl->zone_count);
WARN_ON(block >= ftl->zone_size);
WARN_ON(boffset >= ftl->block_size);
if (block == -1)
return -1;
return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
}
/* Breaks offset into parts */
static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
int *zone, int *block, int *boffset)
{
u64 offset = loffset;
*boffset = do_div(offset, ftl->block_size);
*block = do_div(offset, ftl->max_lba);
*zone = offset >= ftl->zone_count ? -1 : offset;
}
/* ---------------------- low level IO ------------------------------------- */
static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
uint8_t ecc[3];
ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
sm_order) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
sm_order) < 0)
return -EIO;
return 0;
}
/* Reads a sector + oob*/
static int sm_read_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_info *mtd = ftl->trans->mtd;
struct mtd_oob_ops ops = { };
struct sm_oob tmp_oob;
int ret = -EIO;
int try = 0;
/* FTL can contain -1 entries that are by default filled with bits */
if (block == -1) {
if (buffer)
memset(buffer, 0xFF, SM_SECTOR_SIZE);
return 0;
}
/* User might not need the oob, but we do for data verification */
if (!oob)
oob = &tmp_oob;
ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
again:
if (try++) {
/* Avoid infinite recursion on CIS reads, sm_recheck_media
* won't help anyway
*/
if (zone == 0 && block == ftl->cis_block && boffset ==
ftl->cis_boffset)
return ret;
/* Test if media is stable */
if (try == 3 || sm_recheck_media(ftl))
return ret;
}
/* Unfortunately, oob read will _always_ succeed,
* despite card removal.....
*/
ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
dbg("read of block %d at zone %d, failed due to error (%d)",
block, zone, ret);
goto again;
}
/* Do a basic test on the oob, to guard against returned garbage */
if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
goto again;
/* This should never happen, unless there is a bug in the mtd driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
if (!buffer)
return 0;
/* Test if sector marked as bad */
if (!sm_sector_valid(oob)) {
dbg("read of block %d at zone %d, failed because it is marked"
" as bad" , block, zone);
goto again;
}
/* Test ECC*/
if (mtd_is_eccerr(ret) ||
(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
dbg("read of block %d at zone %d, failed due to ECC error",
block, zone);
goto again;
}
return 0;
}
/* Writes a sector to media */
static int sm_write_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_oob_ops ops = { };
struct mtd_info *mtd = ftl->trans->mtd;
int ret;
BUG_ON(ftl->readonly);
if (zone == 0 && (block == ftl->cis_block || block == 0)) {
dbg("attempted to write the CIS!");
return -EIO;
}
if (ftl->unstable)
return -EIO;
ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
if (ret) {
dbg("write to block %d at zone %d, failed with error %d",
block, zone, ret);
sm_recheck_media(ftl);
return ret;
}
/* This should never happen, unless there is a bug in the driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
return 0;
}
/* ------------------------ block IO ------------------------------------- */
/* Write a block using data and lba, and invalid sector bitmap */
static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
int zone, int block, int lba,
unsigned long invalid_bitmap)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
struct sm_oob oob;
int boffset;
int retry = 0;
/* Initialize the oob with requested values */
memset(&oob, 0xFF, SM_OOB_SIZE);
sm_write_lba(&oob, lba);
restart:
if (ftl->unstable)
return -EIO;
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
oob.data_status = 0xFF;
if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
sm_printk("sector %d of block at LBA %d of zone %d"
" couldn't be read, marking it as invalid",
boffset / SM_SECTOR_SIZE, lba, zone);
oob.data_status = 0;
}
if (ftl->smallpagenand) {
ecc_sw_hamming_calculate(buf + boffset,
SM_SMALL_PAGE, oob.ecc1,
sm_order);
ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
SM_SMALL_PAGE, oob.ecc2,
sm_order);
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
continue;
if (!retry) {
/* If write fails. try to erase the block */
/* This is safe, because we never write in blocks
* that contain valuable data.
* This is intended to repair block that are marked
* as erased, but that isn't fully erased
*/
if (sm_erase_block(ftl, zone, block, 0))
return -EIO;
retry = 1;
goto restart;
} else {
sm_mark_block_bad(ftl, zone, block);
return -EIO;
}
}
return 0;
}
/* Mark whole block at offset 'offs' as bad. */
static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
{
struct sm_oob oob;
int boffset;
memset(&oob, 0xFF, SM_OOB_SIZE);
oob.block_status = 0xF0;
if (ftl->unstable)
return;
if (sm_recheck_media(ftl))
return;
sm_printk("marking block %d of zone %d as bad", block, zone);
/* We aren't checking the return value, because we don't care */
/* This also fails on fake xD cards, but I guess these won't expose
* any bad blocks till fail completely
*/
for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
/*
* Erase a block within a zone
* If erase succeeds, it updates free block fifo, otherwise marks block as bad
*/
static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
int put_free)
{
struct ftl_zone *zone = &ftl->zones[zone_num];
struct mtd_info *mtd = ftl->trans->mtd;
struct erase_info erase;
erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
erase.len = ftl->block_size;
if (ftl->unstable)
return -EIO;
BUG_ON(ftl->readonly);
if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
sm_printk("attempted to erase the CIS!");
return -EIO;
}
if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
}
if (put_free)
kfifo_in(&zone->free_sectors,
(const unsigned char *)&block, sizeof(block));
return 0;
error:
sm_mark_block_bad(ftl, zone_num, block);
return -EIO;
}
/* Thoroughly test that block is valid. */
static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
{
int boffset;
struct sm_oob oob;
int lbas[] = { -3, 0, 0, 0 };
int i = 0;
int test_lba;
/* First just check that block doesn't look fishy */
/* Only blocks that are valid or are sliced in two parts, are
* accepted
*/
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
/* This shouldn't happen anyway */
if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
return -2;
test_lba = sm_read_lba(&oob);
if (lbas[i] != test_lba)
lbas[++i] = test_lba;
/* If we found three different LBAs, something is fishy */
if (i == 3)
return -EIO;
}
/* If the block is sliced (partially erased usually) erase it */
if (i == 2) {
sm_erase_block(ftl, zone, block, 1);
return 1;
}
return 0;
}
/* ----------------- media scanning --------------------------------- */
static const struct chs_entry chs_table[] = {
{ 1, 125, 4, 4 },
{ 2, 125, 4, 8 },
{ 4, 250, 4, 8 },
{ 8, 250, 4, 16 },
{ 16, 500, 4, 16 },
{ 32, 500, 8, 16 },
{ 64, 500, 8, 32 },
{ 128, 500, 16, 32 },
{ 256, 1000, 16, 32 },
{ 512, 1015, 32, 63 },
{ 1024, 985, 33, 63 },
{ 2048, 985, 33, 63 },
{ 0 },
};
static const uint8_t cis_signature[] = {
0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
};
/* Find out media parameters.
* This ideally has to be based on nand id, but for now device size is enough
*/
static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{
int i;
int size_in_megs = mtd->size / (1024 * 1024);
ftl->readonly = mtd->type == MTD_ROM;
/* Manual settings for very old devices */
ftl->zone_count = 1;
ftl->smallpagenand = 0;
switch (size_in_megs) {
case 1:
/* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
ftl->zone_size = 256;
ftl->max_lba = 250;
ftl->block_size = 8 * SM_SECTOR_SIZE;
ftl->smallpagenand = 1;
break;
case 2:
/* 2 MiB flash SmartMedia (256 byte pages)*/
if (mtd->writesize == SM_SMALL_PAGE) {
ftl->zone_size = 512;
ftl->max_lba = 500;
ftl->block_size = 8 * SM_SECTOR_SIZE;
ftl->smallpagenand = 1;
/* 2 MiB rom SmartMedia */
} else {
if (!ftl->readonly)
return -ENODEV;
ftl->zone_size = 256;
ftl->max_lba = 250;
ftl->block_size = 16 * SM_SECTOR_SIZE;
}
break;
case 4:
/* 4 MiB flash/rom SmartMedia device */
ftl->zone_size = 512;
ftl->max_lba = 500;
ftl->block_size = 16 * SM_SECTOR_SIZE;
break;
case 8:
/* 8 MiB flash/rom SmartMedia device */
ftl->zone_size = 1024;
ftl->max_lba = 1000;
ftl->block_size = 16 * SM_SECTOR_SIZE;
}
/* Minimum xD size is 16MiB. Also, all xD cards have standard zone
* sizes. SmartMedia cards exist up to 128 MiB and have same layout
*/
if (size_in_megs >= 16) {
ftl->zone_count = size_in_megs / 16;
ftl->zone_size = 1024;
ftl->max_lba = 1000;
ftl->block_size = 32 * SM_SECTOR_SIZE;
}
/* Test for proper write,erase and oob sizes */
if (mtd->erasesize > ftl->block_size)
return -ENODEV;
if (mtd->writesize > SM_SECTOR_SIZE)
return -ENODEV;
if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
return -ENODEV;
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
/* We use OOB */
if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
if (chs_table[i].size == size_in_megs) {
ftl->cylinders = chs_table[i].cyl;
ftl->heads = chs_table[i].head;
ftl->sectors = chs_table[i].sec;
return 0;
}
}
sm_printk("media has unknown size : %dMiB", size_in_megs);
ftl->cylinders = 985;
ftl->heads = 33;
ftl->sectors = 63;
return 0;
}
/* Validate the CIS */
static int sm_read_cis(struct sm_ftl *ftl)
{
struct sm_oob oob;
if (sm_read_sector(ftl,
0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
return -EIO;
if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
return -EIO;
if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
cis_signature, sizeof(cis_signature))) {
return 0;
}
return -EIO;
}
/* Scan the media for the CIS */
static int sm_find_cis(struct sm_ftl *ftl)
{
struct sm_oob oob;
int block, boffset;
int block_found = 0;
int cis_found = 0;
/* Search for first valid block */
for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
continue;
if (!sm_block_valid(&oob))
continue;
block_found = 1;
break;
}
if (!block_found)
return -EIO;
/* Search for first valid sector in this block */
for (boffset = 0 ; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
continue;
if (!sm_sector_valid(&oob))
continue;
break;
}
if (boffset == ftl->block_size)
return -EIO;
ftl->cis_block = block;
ftl->cis_boffset = boffset;
ftl->cis_page_offset = 0;
cis_found = !sm_read_cis(ftl);
if (!cis_found) {
ftl->cis_page_offset = SM_SMALL_PAGE;
cis_found = !sm_read_cis(ftl);
}
if (cis_found) {
dbg("CIS block found at offset %x",
block * ftl->block_size +
boffset + ftl->cis_page_offset);
return 0;
}
return -EIO;
}
/* Basic test to determine if underlying mtd device if functional */
static int sm_recheck_media(struct sm_ftl *ftl)
{
if (sm_read_cis(ftl)) {
if (!ftl->unstable) {
sm_printk("media unstable, not allowing writes");
ftl->unstable = 1;
}
return -EIO;
}
return 0;
}
/* Initialize a FTL zone */
static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone = &ftl->zones[zone_num];
struct sm_oob oob;
uint16_t block;
int lba;
int i = 0;
int len;
dbg("initializing zone %d", zone_num);
/* Allocate memory for FTL table */
zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
if (!zone->lba_to_phys_table)
return -ENOMEM;
memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
/* Allocate memory for free sectors FIFO */
if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
kfree(zone->lba_to_phys_table);
return -ENOMEM;
}
/* Now scan the zone */
for (block = 0 ; block < ftl->zone_size ; block++) {
/* Skip blocks till the CIS (including) */
if (zone_num == 0 && block <= ftl->cis_block)
continue;
/* Read the oob of first sector */
if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
kfifo_free(&zone->free_sectors);
kfree(zone->lba_to_phys_table);
return -EIO;
}
/* Test to see if block is erased. It is enough to test
* first sector, because erase happens in one shot
*/
if (sm_block_erased(&oob)) {
kfifo_in(&zone->free_sectors,
(unsigned char *)&block, 2);
continue;
}
/* If block is marked as bad, skip it */
/* This assumes we can trust first sector*/
/* However the way the block valid status is defined, ensures
* very low probability of failure here
*/
if (!sm_block_valid(&oob)) {
dbg("PH %04d <-> <marked bad>", block);
continue;
}
lba = sm_read_lba(&oob);
/* Invalid LBA means that block is damaged. */
/* We can try to erase it, or mark it as bad, but
* lets leave that to recovery application
*/
if (lba == -2 || lba >= ftl->max_lba) {
dbg("PH %04d <-> LBA %04d(bad)", block, lba);
continue;
}
/* If there is no collision,
* just put the sector in the FTL table
*/
if (zone->lba_to_phys_table[lba] < 0) {
dbg_verbose("PH %04d <-> LBA %04d", block, lba);
zone->lba_to_phys_table[lba] = block;
continue;
}
sm_printk("collision"
" of LBA %d between blocks %d and %d in zone %d",
lba, zone->lba_to_phys_table[lba], block, zone_num);
/* Test that this block is valid*/
if (sm_check_block(ftl, zone_num, block))
continue;
/* Test now the old block */
if (sm_check_block(ftl, zone_num,
zone->lba_to_phys_table[lba])) {
zone->lba_to_phys_table[lba] = block;
continue;
}
/* If both blocks are valid and share same LBA, it means that
* they hold different versions of same data. It not
* known which is more recent, thus just erase one of them
*/
sm_printk("both blocks are valid, erasing the later");
sm_erase_block(ftl, zone_num, block, 1);
}
dbg("zone initialized");
zone->initialized = 1;
/* No free sectors, means that the zone is heavily damaged, write won't
* work, but it can still can be (partially) read
*/
if (!kfifo_len(&zone->free_sectors)) {
sm_printk("no free blocks in zone %d", zone_num);
return 0;
}
/* Randomize first block we write to */
get_random_bytes(&i, 2);
i %= (kfifo_len(&zone->free_sectors) / 2);
while (i--) {
len = kfifo_out(&zone->free_sectors,
(unsigned char *)&block, 2);
WARN_ON(len != 2);
kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
}
return 0;
}
/* Get and automatically initialize an FTL mapping for one zone */
static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone;
int error;
BUG_ON(zone_num >= ftl->zone_count);
zone = &ftl->zones[zone_num];
if (!zone->initialized) {
error = sm_init_zone(ftl, zone_num);
if (error)
return ERR_PTR(error);
}
return zone;
}
/* ----------------- cache handling ------------------------------------------*/
/* Initialize the one block cache */
static void sm_cache_init(struct sm_ftl *ftl)
{
ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
ftl->cache_clean = 1;
ftl->cache_zone = -1;
ftl->cache_block = -1;
/*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
}
/* Put sector in one block cache */
static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
{
memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
ftl->cache_clean = 0;
}
/* Read a sector from the cache */
static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
{
if (test_bit(boffset / SM_SECTOR_SIZE,
&ftl->cache_data_invalid_bitmap))
return -1;
memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
return 0;
}
/* Write the cache to hardware */
static int sm_cache_flush(struct sm_ftl *ftl)
{
struct ftl_zone *zone;
int sector_num;
uint16_t write_sector;
int zone_num = ftl->cache_zone;
int block_num;
if (ftl->cache_clean)
return 0;
if (ftl->unstable)
return -EIO;
BUG_ON(zone_num < 0);
zone = &ftl->zones[zone_num];
block_num = zone->lba_to_phys_table[ftl->cache_block];
/* Try to read all unread areas of the cache block*/
for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
ftl->block_size / SM_SECTOR_SIZE) {
if (!sm_read_sector(ftl,
zone_num, block_num, sector_num * SM_SECTOR_SIZE,
ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
clear_bit(sector_num,
&ftl->cache_data_invalid_bitmap);
}
restart:
if (ftl->unstable)
return -EIO;
/* If there are no spare blocks, */
/* we could still continue by erasing/writing the current block,
* but for such worn out media it doesn't worth the trouble,
* and the dangers
*/
if (kfifo_out(&zone->free_sectors,
(unsigned char *)&write_sector, 2) != 2) {
dbg("no free sectors for write!");
return -EIO;
}
if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
ftl->cache_block, ftl->cache_data_invalid_bitmap))
goto restart;
/* Update the FTL table */
zone->lba_to_phys_table[ftl->cache_block] = write_sector;
/* Write successful, so erase and free the old block */
if (block_num > 0)
sm_erase_block(ftl, zone_num, block_num, 1);
sm_cache_init(ftl);
return 0;
}
/* flush timer, runs a second after last write */
static void sm_cache_flush_timer(struct timer_list *t)
{
struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
/* cache flush work, kicked by timer */
static void sm_cache_flush_work(struct work_struct *work)
{
struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
return;
}
/* ---------------- outside interface -------------------------------------- */
/* outside interface: read a sector */
static int sm_read(struct mtd_blktrans_dev *dev,
unsigned long sect_no, char *buf)
{
struct sm_ftl *ftl = dev->priv;
struct ftl_zone *zone;
int error = 0, in_cache = 0;
int zone_num, block, boffset;
sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
if (IS_ERR(zone)) {
error = PTR_ERR(zone);
goto unlock;
}
/* Have to look at cache first */
if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
in_cache = 1;
if (!sm_cache_get(ftl, buf, boffset))
goto unlock;
}
/* Translate the block and return if doesn't exist in the table */
block = zone->lba_to_phys_table[block];
if (block == -1) {
memset(buf, 0xFF, SM_SECTOR_SIZE);
goto unlock;
}
if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
error = -EIO;
goto unlock;
}
if (in_cache)
sm_cache_put(ftl, buf, boffset);
unlock:
mutex_unlock(&ftl->mutex);
return error;
}
/* outside interface: write a sector */
static int sm_write(struct mtd_blktrans_dev *dev,
unsigned long sec_no, char *buf)
{
struct sm_ftl *ftl = dev->priv;
struct ftl_zone *zone;
int error = 0, zone_num, block, boffset;
BUG_ON(ftl->readonly);
sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
/* No need in flush thread running now */
del_timer(&ftl->timer);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
if (IS_ERR(zone)) {
error = PTR_ERR(zone);
goto unlock;
}
/* If entry is not in cache, flush it */
if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
error = sm_cache_flush(ftl);
if (error)
goto unlock;
ftl->cache_block = block;
ftl->cache_zone = zone_num;
}
sm_cache_put(ftl, buf, boffset);
unlock:
mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
mutex_unlock(&ftl->mutex);
return error;
}
/* outside interface: flush everything */
static int sm_flush(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
int retval;
mutex_lock(&ftl->mutex);
retval = sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
return retval;
}
/* outside interface: device is released */
static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
/* outside interface: get geometry */
static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct sm_ftl *ftl = dev->priv;
geo->heads = ftl->heads;
geo->sectors = ftl->sectors;
geo->cylinders = ftl->cylinders;
return 0;
}
/* external interface: main initialization function */
static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *trans;
struct sm_ftl *ftl;
/* Allocate & initialize our private structure */
ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
if (!ftl)
goto error1;
mutex_init(&ftl->mutex);
timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
/* Read media information */
if (sm_get_media_info(ftl, mtd)) {
dbg("found unsupported mtd device, aborting");
goto error2;
}
/* Allocate temporary CIS buffer for read retry support */
ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
if (!ftl->cis_buffer)
goto error2;
/* Allocate zone array, it will be initialized on demand */
ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
GFP_KERNEL);
if (!ftl->zones)
goto error3;
/* Allocate the cache*/
ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
if (!ftl->cache_data)
goto error4;
sm_cache_init(ftl);
/* Allocate upper layer structure and initialize it */
trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
if (!trans)
goto error5;
ftl->trans = trans;
trans->priv = ftl;
trans->tr = tr;
trans->mtd = mtd;
trans->devnum = -1;
trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
trans->readonly = ftl->readonly;
if (sm_find_cis(ftl)) {
dbg("CIS not found on mtd device, aborting");
goto error6;
}
ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
if (!ftl->disk_attributes)
goto error6;
trans->disk_attributes = ftl->disk_attributes;
sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
(int)(mtd->size / (1024 * 1024)), mtd->index);
dbg("FTL layout:");
dbg("%d zone(s), each consists of %d blocks (+%d spares)",
ftl->zone_count, ftl->max_lba,
ftl->zone_size - ftl->max_lba);
dbg("each block consists of %d bytes",
ftl->block_size);
/* Register device*/
if (add_mtd_blktrans_dev(trans)) {
dbg("error in mtdblktrans layer");
goto error6;
}
return;
error6:
kfree(trans);
error5:
kfree(ftl->cache_data);
error4:
kfree(ftl->zones);
error3:
kfree(ftl->cis_buffer);
error2:
kfree(ftl);
error1:
return;
}
/* main interface: device {surprise,} removal */
static void sm_remove_dev(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
int i;
del_mtd_blktrans_dev(dev);
ftl->trans = NULL;
for (i = 0 ; i < ftl->zone_count; i++) {
if (!ftl->zones[i].initialized)
continue;
kfree(ftl->zones[i].lba_to_phys_table);
kfifo_free(&ftl->zones[i].free_sectors);
}
sm_delete_sysfs_attributes(ftl);
kfree(ftl->cis_buffer);
kfree(ftl->zones);
kfree(ftl->cache_data);
kfree(ftl);
}
static struct mtd_blktrans_ops sm_ftl_ops = {
.name = "smblk",
.major = 0,
.part_bits = SM_FTL_PARTN_BITS,
.blksize = SM_SECTOR_SIZE,
.getgeo = sm_getgeo,
.add_mtd = sm_add_mtd,
.remove_dev = sm_remove_dev,
.readsect = sm_read,
.writesect = sm_write,
.flush = sm_flush,
.release = sm_release,
.owner = THIS_MODULE,
};
static __init int sm_module_init(void)
{
int error = 0;
cache_flush_workqueue = create_freezable_workqueue("smflush");
if (!cache_flush_workqueue)
return -ENOMEM;
error = register_mtd_blktrans(&sm_ftl_ops);
if (error)
destroy_workqueue(cache_flush_workqueue);
return error;
}
static void __exit sm_module_exit(void)
{
destroy_workqueue(cache_flush_workqueue);
deregister_mtd_blktrans(&sm_ftl_ops);
}
module_init(sm_module_init);
module_exit(sm_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Driver for the NXP SAA7164 PCIe bridge
*
* Copyright (c) 2010-2015 Steven Toth <[email protected]>
*/
/* TODO: Cleanup and shorten the namespace */
/* Some structures are passed directly to/from the firmware and
* have strict alignment requirements. This is one of them.
*/
struct tmComResHWDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
u16 bcdSpecVersion;
u32 dwClockFrequency;
u32 dwClockUpdateRes;
u8 bCapabilities;
u32 dwDeviceRegistersLocation;
u32 dwHostMemoryRegion;
u32 dwHostMemoryRegionSize;
u32 dwHostHibernatMemRegion;
u32 dwHostHibernatMemRegionSize;
} __attribute__((packed));
/* This is DWORD aligned on windows but I can't find the right
* gcc syntax to match the binary data from the device.
* I've manually padded with Reserved[3] bytes to match the hardware,
* but this could break if GCC decides to pack in a different way.
*/
struct tmComResInterfaceDescr {
u8 bLength;
u8 bDescriptorType;
u8 bDescriptorSubtype;
u8 bFlags;
u8 bInterfaceType;
u8 bInterfaceId;
u8 bBaseInterface;
u8 bInterruptId;
u8 bDebugInterruptId;
u8 BARLocation;
u8 Reserved[3];
};
struct tmComResBusDescr {
u64 CommandRing;
u64 ResponseRing;
u32 CommandWrite;
u32 CommandRead;
u32 ResponseWrite;
u32 ResponseRead;
};
enum tmBusType {
NONE = 0,
TYPE_BUS_PCI = 1,
TYPE_BUS_PCIe = 2,
TYPE_BUS_USB = 3,
TYPE_BUS_I2C = 4
};
struct tmComResBusInfo {
enum tmBusType Type;
u16 m_wMaxReqSize;
u8 __iomem *m_pdwSetRing;
u32 m_dwSizeSetRing;
u8 __iomem *m_pdwGetRing;
u32 m_dwSizeGetRing;
u32 m_dwSetWritePos;
u32 m_dwSetReadPos;
u32 m_dwGetWritePos;
u32 m_dwGetReadPos;
/* All access is protected */
struct mutex lock;
};
struct tmComResInfo {
u8 id;
u8 flags;
u16 size;
u32 command;
u16 controlselector;
u8 seqno;
} __attribute__((packed));
enum tmComResCmd {
SET_CUR = 0x01,
GET_CUR = 0x81,
GET_MIN = 0x82,
GET_MAX = 0x83,
GET_RES = 0x84,
GET_LEN = 0x85,
GET_INFO = 0x86,
GET_DEF = 0x87
};
struct cmd {
u8 seqno;
u32 inuse;
u32 timeout;
u32 signalled;
struct mutex lock;
wait_queue_head_t wait;
};
struct tmDescriptor {
u32 pathid;
u32 size;
void *descriptor;
};
struct tmComResDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
} __attribute__((packed));
struct tmComResExtDevDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u32 devicetype;
u16 deviceid;
u32 numgpiopins;
u8 numgpiogroups;
u8 controlsize;
} __attribute__((packed));
struct tmComResGPIO {
u32 pin;
u8 state;
} __attribute__((packed));
struct tmComResPathDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 pathid;
} __attribute__((packed));
/* terminaltype */
enum tmComResTermType {
ITT_ANTENNA = 0x0203,
LINE_CONNECTOR = 0x0603,
SPDIF_CONNECTOR = 0x0605,
COMPOSITE_CONNECTOR = 0x0401,
SVIDEO_CONNECTOR = 0x0402,
COMPONENT_CONNECTOR = 0x0403,
STANDARD_DMA = 0xF101
};
struct tmComResAntTermDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 terminalid;
u16 terminaltype;
u8 assocterminal;
u8 iterminal;
u8 controlsize;
} __attribute__((packed));
struct tmComResTunerDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u8 sourceid;
u8 iunit;
u32 tuningstandards;
u8 controlsize;
u32 controls;
} __attribute__((packed));
enum tmBufferFlag {
/* the buffer does not contain any valid data */
TM_BUFFER_FLAG_EMPTY,
/* the buffer is filled with valid data */
TM_BUFFER_FLAG_DONE,
/* the buffer is the dummy buffer - TODO??? */
TM_BUFFER_FLAG_DUMMY_BUFFER
};
struct tmBuffer {
u64 *pagetablevirt;
u64 pagetablephys;
u16 offset;
u8 *context;
u64 timestamp;
enum tmBufferFlag BufferFlag;
u32 lostbuffers;
u32 validbuffers;
u64 *dummypagevirt;
u64 dummypagephys;
u64 *addressvirt;
};
struct tmHWStreamParameters {
u32 bitspersample;
u32 samplesperline;
u32 numberoflines;
u32 pitch;
u32 linethreshold;
u64 **pagetablelistvirt;
u64 *pagetablelistphys;
u32 numpagetables;
u32 numpagetableentries;
};
struct tmStreamParameters {
struct tmHWStreamParameters HWStreamParameters;
u64 qwDummyPageTablePhys;
u64 *pDummyPageTableVirt;
};
struct tmComResDMATermDescrHeader {
u8 len;
u8 type;
u8 subtyle;
u8 unitid;
u16 terminaltype;
u8 assocterminal;
u8 sourceid;
u8 iterminal;
u32 BARLocation;
u8 flags;
u8 interruptid;
u8 buffercount;
u8 metadatasize;
u8 numformats;
u8 controlsize;
} __attribute__((packed));
/*
*
* Description:
* This is the transport stream format header.
*
* Settings:
* bLength - The size of this descriptor in bytes.
* bDescriptorType - CS_INTERFACE.
* bDescriptorSubtype - VS_FORMAT_MPEG2TS descriptor subtype.
* bFormatIndex - A non-zero constant that uniquely identifies the
* format.
* bDataOffset - Offset to TSP packet within MPEG-2 TS transport
* stride, in bytes.
* bPacketLength - Length of TSP packet, in bytes (typically 188).
* bStrideLength - Length of MPEG-2 TS transport stride.
* guidStrideFormat - A Globally Unique Identifier indicating the
* format of the stride data (if any). Set to zeros
* if there is no Stride Data, or if the Stride
* Data is to be ignored by the application.
*
*/
struct tmComResTSFormatDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 bFormatIndex;
u8 bDataOffset;
u8 bPacketLength;
u8 bStrideLength;
u8 guidStrideFormat[16];
} __attribute__((packed));
/* Encoder related structures */
/* A/V Mux Selector */
struct tmComResSelDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u8 nrinpins;
u8 sourceid;
} __attribute__((packed));
/* A/V Audio processor definitions */
struct tmComResProcDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u8 sourceid;
u16 wreserved;
u8 controlsize;
} __attribute__((packed));
/* Video bitrate control message */
#define EU_VIDEO_BIT_RATE_MODE_CONSTANT (0)
#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_AVERAGE (1)
#define EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK (2)
struct tmComResEncVideoBitRate {
u8 ucVideoBitRateMode;
u32 dwVideoBitRate;
u32 dwVideoBitRatePeak;
} __attribute__((packed));
/* Video Encoder Aspect Ratio message */
struct tmComResEncVideoInputAspectRatio {
u8 width;
u8 height;
} __attribute__((packed));
/* Video Encoder GOP IBP message */
/* 1. IPPPPPPPPPPPPPP */
/* 2. IBPBPBPBPBPBPBP */
/* 3. IBBPBBPBBPBBP */
#define SAA7164_ENCODER_DEFAULT_GOP_DIST (1)
#define SAA7164_ENCODER_DEFAULT_GOP_SIZE (15)
struct tmComResEncVideoGopStructure {
u8 ucGOPSize; /* GOP Size 12, 15 */
u8 ucRefFrameDist; /* Reference Frame Distance */
} __attribute__((packed));
/* Encoder processor definition */
struct tmComResEncoderDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u8 vsourceid;
u8 asourceid;
u8 iunit;
u32 dwmControlCap;
u32 dwmProfileCap;
u32 dwmVidFormatCap;
u8 bmVidBitrateCap;
u16 wmVidResolutionsCap;
u16 wmVidFrmRateCap;
u32 dwmAudFormatCap;
u8 bmAudBitrateCap;
} __attribute__((packed));
/* Audio processor definition */
struct tmComResAFeatureDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 unitid;
u8 sourceid;
u8 controlsize;
} __attribute__((packed));
/* Audio control messages */
struct tmComResAudioDefaults {
u8 ucDecoderLevel;
u8 ucDecoderFM_Level;
u8 ucMonoLevel;
u8 ucNICAM_Level;
u8 ucSAP_Level;
u8 ucADC_Level;
} __attribute__((packed));
/* Audio bitrate control message */
struct tmComResEncAudioBitRate {
u8 ucAudioBitRateMode;
u32 dwAudioBitRate;
u32 dwAudioBitRatePeak;
} __attribute__((packed));
/* Tuner / AV Decoder messages */
struct tmComResTunerStandard {
u8 std;
u32 country;
} __attribute__((packed));
struct tmComResTunerStandardAuto {
u8 mode;
} __attribute__((packed));
/* EEPROM definition for PS stream types */
struct tmComResPSFormatDescrHeader {
u8 len;
u8 type;
u8 subtype;
u8 bFormatIndex;
u16 wPacketLength;
u16 wPackLength;
u8 bPackDataType;
} __attribute__((packed));
/* VBI control structure */
struct tmComResVBIFormatDescrHeader {
u8 len;
u8 type;
u8 subtype; /* VS_FORMAT_VBI */
u8 bFormatIndex;
u32 VideoStandard; /* See KS_AnalogVideoStandard, NTSC = 1 */
u8 StartLine; /* NTSC Start = 10 */
u8 EndLine; /* NTSC = 21 */
u8 FieldRate; /* 60 for NTSC */
u8 bNumLines; /* Unused - scheduled for removal */
} __attribute__((packed));
struct tmComResProbeCommit {
u16 bmHint;
u8 bFormatIndex;
u8 bFrameIndex;
} __attribute__((packed));
struct tmComResDebugSetLevel {
u32 dwDebugLevel;
} __attribute__((packed));
struct tmComResDebugGetData {
u32 dwResult;
u8 ucDebugData[256];
} __attribute__((packed));
struct tmFwInfoStruct {
u32 status;
u32 mode;
u32 devicespec;
u32 deviceinst;
u32 CPULoad;
u32 RemainHeap;
u32 CPUClock;
u32 RAMSpeed;
} __attribute__((packed));
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common Device Tree Source for IGEPv2
*
* Copyright (C) 2014 Javier Martinez Canillas <[email protected]>
* Copyright (C) 2014 Enric Balletbo i Serra <[email protected]>
*/
#include "omap3-igep.dtsi"
#include "omap-gpmc-smsc9221.dtsi"
/ {
leds {
pinctrl-names = "default";
pinctrl-0 = <&leds_pins>;
compatible = "gpio-leds";
boot {
label = "omap3:green:boot";
gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
user0 {
label = "omap3:red:user0";
gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
user1 {
label = "omap3:red:user1";
gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
user2 {
label = "omap3:green:user1";
gpios = <&twl_gpio 19 GPIO_ACTIVE_LOW>;
};
};
/* HS USB Port 1 Power */
hsusb1_power: hsusb1_power_reg {
compatible = "regulator-fixed";
regulator-name = "hsusb1_vbus";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&twl_gpio 18 GPIO_ACTIVE_LOW>; /* GPIO LEDA */
startup-delay-us = <70000>;
};
/* HS USB Host PHY on PORT 1 */
hsusb1_phy: hsusb1_phy {
compatible = "usb-nop-xceiv";
reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
vcc-supply = <&hsusb1_power>;
#phy-cells = <0>;
};
tfp410: encoder {
compatible = "ti,tfp410";
powerdown-gpios = <&gpio6 10 GPIO_ACTIVE_LOW>; /* gpio_170 */
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
port@1 {
reg = <1>;
tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
dvi0: connector {
compatible = "dvi-connector";
label = "dvi";
digital;
ddc-i2c-bus = <&i2c3>;
port {
dvi_connector_in: endpoint {
remote-endpoint = <&tfp410_out>;
};
};
};
};
&omap3_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
&tfp410_pins
&dss_dpi_pins
>;
tfp410_pins: tfp410-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21c6, PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
>;
};
dss_dpi_pins: dss-dpi-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20d4, PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
OMAP3_CORE1_IOPAD(0x20d6, PIN_OUTPUT | MUX_MODE0) /* dss_hsync.dss_hsync */
OMAP3_CORE1_IOPAD(0x20d8, PIN_OUTPUT | MUX_MODE0) /* dss_vsync.dss_vsync */
OMAP3_CORE1_IOPAD(0x20da, PIN_OUTPUT | MUX_MODE0) /* dss_acbias.dss_acbias */
OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE0) /* dss_data0.dss_data0 */
OMAP3_CORE1_IOPAD(0x20de, PIN_OUTPUT | MUX_MODE0) /* dss_data1.dss_data1 */
OMAP3_CORE1_IOPAD(0x20e0, PIN_OUTPUT | MUX_MODE0) /* dss_data2.dss_data2 */
OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE0) /* dss_data3.dss_data3 */
OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE0) /* dss_data4.dss_data4 */
OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE0) /* dss_data5.dss_data5 */
OMAP3_CORE1_IOPAD(0x20e8, PIN_OUTPUT | MUX_MODE0) /* dss_data6.dss_data6 */
OMAP3_CORE1_IOPAD(0x20ea, PIN_OUTPUT | MUX_MODE0) /* dss_data7.dss_data7 */
OMAP3_CORE1_IOPAD(0x20ec, PIN_OUTPUT | MUX_MODE0) /* dss_data8.dss_data8 */
OMAP3_CORE1_IOPAD(0x20ee, PIN_OUTPUT | MUX_MODE0) /* dss_data9.dss_data9 */
OMAP3_CORE1_IOPAD(0x20f0, PIN_OUTPUT | MUX_MODE0) /* dss_data10.dss_data10 */
OMAP3_CORE1_IOPAD(0x20f2, PIN_OUTPUT | MUX_MODE0) /* dss_data11.dss_data11 */
OMAP3_CORE1_IOPAD(0x20f4, PIN_OUTPUT | MUX_MODE0) /* dss_data12.dss_data12 */
OMAP3_CORE1_IOPAD(0x20f6, PIN_OUTPUT | MUX_MODE0) /* dss_data13.dss_data13 */
OMAP3_CORE1_IOPAD(0x20f8, PIN_OUTPUT | MUX_MODE0) /* dss_data14.dss_data14 */
OMAP3_CORE1_IOPAD(0x20fa, PIN_OUTPUT | MUX_MODE0) /* dss_data15.dss_data15 */
OMAP3_CORE1_IOPAD(0x20fc, PIN_OUTPUT | MUX_MODE0) /* dss_data16.dss_data16 */
OMAP3_CORE1_IOPAD(0x20fe, PIN_OUTPUT | MUX_MODE0) /* dss_data17.dss_data17 */
OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE0) /* dss_data18.dss_data18 */
OMAP3_CORE1_IOPAD(0x2102, PIN_OUTPUT | MUX_MODE0) /* dss_data19.dss_data19 */
OMAP3_CORE1_IOPAD(0x2104, PIN_OUTPUT | MUX_MODE0) /* dss_data20.dss_data20 */
OMAP3_CORE1_IOPAD(0x2106, PIN_OUTPUT | MUX_MODE0) /* dss_data21.dss_data21 */
OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0) /* dss_data22.dss_data22 */
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0) /* dss_data23.dss_data23 */
>;
};
uart2_pins: uart2-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT | MUX_MODE0) /* uart2_cts.uart2_cts */
OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts .uart2_rts*/
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
>;
};
smsc9221_pins: smsc9221-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
>;
};
};
&omap3_pmx_core2 {
pinctrl-names = "default";
pinctrl-0 = <
&hsusbb1_pins
>;
hsusbb1_pins: hsusbb1-pins {
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25da, PIN_OUTPUT | MUX_MODE3) /* etk_ctl.hsusb1_clk */
OMAP3630_CORE2_IOPAD(0x25d8, PIN_OUTPUT | MUX_MODE3) /* etk_clk.hsusb1_stp */
OMAP3630_CORE2_IOPAD(0x25ec, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d8.hsusb1_dir */
OMAP3630_CORE2_IOPAD(0x25ee, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d9.hsusb1_nxt */
OMAP3630_CORE2_IOPAD(0x25dc, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d0.hsusb1_data0 */
OMAP3630_CORE2_IOPAD(0x25de, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d1.hsusb1_data1 */
OMAP3630_CORE2_IOPAD(0x25e0, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d2.hsusb1_data2 */
OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d3.hsusb1_data7 */
OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d4.hsusb1_data4 */
OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d5.hsusb1_data5 */
OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d6.hsusb1_data6 */
OMAP3630_CORE2_IOPAD(0x25ea, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d7.hsusb1_data3 */
>;
};
leds_pins: leds-pins {
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25f4, PIN_OUTPUT | MUX_MODE4) /* etk_d12.gpio_26 */
OMAP3630_CORE2_IOPAD(0x25f6, PIN_OUTPUT | MUX_MODE4) /* etk_d13.gpio_27 */
OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
>;
};
mmc1_wp_pins: mmc1-cd-pins {
pinctrl-single,pins = <
OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
>;
};
};
&i2c3 {
clock-frequency = <100000>;
/*
* Display monitor features are burnt in the EEPROM
* as EDID data.
*/
eeprom@50 {
compatible = "ti,eeprom";
reg = <0x50>;
};
};
&gpmc {
ranges = <0 0 0x30000000 0x01000000>, /* CS0: 16MB for NAND */
<5 0 0x2c000000 0x01000000>; /* CS5: 16MB for ethernet */
ethernet@gpmc {
pinctrl-names = "default";
pinctrl-0 = <&smsc9221_pins>;
reg = <5 0 0xff>;
interrupt-parent = <&gpio6>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
};
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
};
&usbhshost {
port1-mode = "ehci-phy";
};
&usbhsehci {
phys = <&hsusb1_phy>;
};
&vpll2 {
/* Needed for DSS */
regulator-name = "vdds_dsi";
};
&dss {
status = "okay";
port {
dpi_out: endpoint {
remote-endpoint = <&tfp410_in>;
data-lines = <24>;
};
};
};
&mmc1 {
pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015 Verifone Int.
*
* Author: Nicolas Saenz Julienne <[email protected]>
*
* This driver is based on the gpio-tps65912 implementation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65218.h>
struct tps65218_gpio {
struct tps65218 *tps65218;
struct gpio_chip gpio_chip;
};
static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
unsigned int val;
int ret;
ret = regmap_read(tps65218->regmap, TPS65218_REG_ENABLE2, &val);
if (ret)
return ret;
return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
}
static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
if (value)
tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_GPIO1 << offset,
TPS65218_ENABLE2_GPIO1 << offset,
TPS65218_PROTECT_L1);
else
tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_GPIO1 << offset,
TPS65218_PROTECT_L1);
}
static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
/* Only drives GPOs */
tps65218_gpio_set(gc, offset, value);
return 0;
}
static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
{
return -EPERM;
}
static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
int ret;
if (gpiochip_line_is_open_source(gc, offset)) {
dev_err(gc->parent, "can't work as open source\n");
return -EINVAL;
}
switch (offset) {
case 0:
if (!gpiochip_line_is_open_drain(gc, offset)) {
dev_err(gc->parent, "GPO1 works only as open drain\n");
return -EINVAL;
}
/* Disable sequencer for GPO1 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
TPS65218_SEQ7_GPO1_SEQ_MASK,
TPS65218_PROTECT_L1);
if (ret)
return ret;
/* Setup GPO1 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_IO1_SEL,
TPS65218_PROTECT_L1);
if (ret)
return ret;
break;
case 1:
/* Setup GPO2 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_IO1_SEL,
TPS65218_PROTECT_L1);
if (ret)
return ret;
break;
case 2:
if (!gpiochip_line_is_open_drain(gc, offset)) {
dev_err(gc->parent, "GPO3 works only as open drain\n");
return -EINVAL;
}
/* Disable sequencer for GPO3 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
TPS65218_SEQ7_GPO3_SEQ_MASK,
TPS65218_PROTECT_L1);
if (ret)
return ret;
/* Setup GPO3 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG2,
TPS65218_CONFIG2_DC12_RST,
TPS65218_PROTECT_L1);
if (ret)
return ret;
break;
default:
return -EINVAL;
}
return 0;
}
static int tps65218_gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
enum pin_config_param param = pinconf_to_config_param(config);
switch (offset) {
case 0:
case 2:
/* GPO1 is hardwired to be open drain */
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return 0;
return -ENOTSUPP;
case 1:
/* GPO2 is push-pull by default, can be set as open drain. */
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
return tps65218_clear_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
TPS65218_PROTECT_L1);
if (param == PIN_CONFIG_DRIVE_PUSH_PULL)
return tps65218_set_bits(tps65218,
TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_GPO2_BUF,
TPS65218_CONFIG1_GPO2_BUF,
TPS65218_PROTECT_L1);
return -ENOTSUPP;
default:
break;
}
return -ENOTSUPP;
}
static const struct gpio_chip template_chip = {
.label = "gpio-tps65218",
.owner = THIS_MODULE,
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
.direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
.base = -1,
};
static int tps65218_gpio_probe(struct platform_device *pdev)
{
struct tps65218 *tps65218 = dev_get_drvdata(pdev->dev.parent);
struct tps65218_gpio *tps65218_gpio;
tps65218_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65218_gpio),
GFP_KERNEL);
if (!tps65218_gpio)
return -ENOMEM;
tps65218_gpio->tps65218 = tps65218;
tps65218_gpio->gpio_chip = template_chip;
tps65218_gpio->gpio_chip.parent = &pdev->dev;
return devm_gpiochip_add_data(&pdev->dev, &tps65218_gpio->gpio_chip,
tps65218_gpio);
}
static const struct of_device_id tps65218_dt_match[] = {
{ .compatible = "ti,tps65218-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, tps65218_dt_match);
static const struct platform_device_id tps65218_gpio_id_table[] = {
{ "tps65218-gpio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, tps65218_gpio_id_table);
static struct platform_driver tps65218_gpio_driver = {
.driver = {
.name = "tps65218-gpio",
.of_match_table = tps65218_dt_match,
},
.probe = tps65218_gpio_probe,
.id_table = tps65218_gpio_id_table,
};
module_platform_driver(tps65218_gpio_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <[email protected]>");
MODULE_DESCRIPTION("GPO interface for TPS65218 PMICs");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Xen mmu operations
*
* This file contains the various mmu fetch and update operations.
* The most important job they must perform is the mapping between the
* domain's pfn and the overall machine mfns.
*
* Xen allows guests to directly update the pagetable, in a controlled
* fashion. In other words, the guest modifies the same pagetable
* that the CPU actually uses, which eliminates the overhead of having
* a separate shadow pagetable.
*
* In order to allow this, it falls on the guest domain to map its
* notion of a "physical" pfn - which is just a domain-local linear
* address - into a real "machine address" which the CPU's MMU can
* use.
*
* A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
* inserted directly into the pagetable. When creating a new
* pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
* when reading the content back with __(pgd|pmd|pte)_val, it converts
* the mfn back into a pfn.
*
* The other constraint is that all pages which make up a pagetable
* must be mapped read-only in the guest. This prevents uncontrolled
* guest updates to the pagetable. Xen strictly enforces this, and
* will disallow any pagetable update which will end up mapping a
* pagetable page RW, and will disallow using any writable page as a
* pagetable.
*
* Naively, when loading %cr3 with the base of a new pagetable, Xen
* would need to validate the whole pagetable before going on.
* Naturally, this is quite slow. The solution is to "pin" a
* pagetable, which enforces all the constraints on the pagetable even
* when it is not actively in use. This means that Xen can be assured
* that it is still valid when you do load it into %cr3, and doesn't
* need to revalidate it.
*
* Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
*/
#include <linux/sched/mm.h>
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#include <linux/pgtable.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <trace/events/xen.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/paravirt.h>
#include <asm/e820/api.h>
#include <asm/linkage.h>
#include <asm/page.h>
#include <asm/init.h>
#include <asm/memtype.h>
#include <asm/smp.h>
#include <asm/tlb.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/version.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
#include "xen-ops.h"
/*
* Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
* to avoid warnings with "-Wmissing-prototypes".
*/
pteval_t xen_pte_val(pte_t pte);
pgdval_t xen_pgd_val(pgd_t pgd);
pmdval_t xen_pmd_val(pmd_t pmd);
pudval_t xen_pud_val(pud_t pud);
p4dval_t xen_p4d_val(p4d_t p4d);
pte_t xen_make_pte(pteval_t pte);
pgd_t xen_make_pgd(pgdval_t pgd);
pmd_t xen_make_pmd(pmdval_t pmd);
pud_t xen_make_pud(pudval_t pud);
p4d_t xen_make_p4d(p4dval_t p4d);
pte_t xen_make_pte_init(pteval_t pte);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif
/*
* Protects atomic reservation decrease/increase against concurrent increases.
* Also protects non-atomic updates of current_pages and balloon lists.
*/
static DEFINE_SPINLOCK(xen_reservation_lock);
/*
* Note about cr3 (pagetable base) values:
*
* xen_cr3 contains the current logical cr3 value; it contains the
* last set cr3. This may not be the current effective cr3, because
* its update may be being lazily deferred. However, a vcpu looking
* at its own cr3 can use this value knowing that it everything will
* be self-consistent.
*
* xen_current_cr3 contains the actual vcpu cr3; it is set once the
* hypercall to set the vcpu cr3 is complete (so it may be a little
* out of date, but it will never be set early). If one vcpu is
* looking at another vcpu's cr3 value, it should use this variable.
*/
DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
static DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
static phys_addr_t xen_pt_base, xen_pt_size __initdata;
static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
/*
* Just beyond the highest usermode address. STACK_TOP_MAX has a
* redzone above it, so round it up to a PGD boundary.
*/
#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
void make_lowmem_page_readonly(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_wrprotect(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
void make_lowmem_page_readwrite(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_mkwrite_novma(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
/*
* During early boot all page table pages are pinned, but we do not have struct
* pages, so return true until struct pages are ready.
*/
static bool xen_page_pinned(void *ptr)
{
if (static_branch_likely(&xen_struct_pages_ready)) {
struct page *page = virt_to_page(ptr);
return PagePinned(page);
}
return true;
}
static void xen_extend_mmu_update(const struct mmu_update *update)
{
struct multicall_space mcs;
struct mmu_update *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *update;
}
static void xen_extend_mmuext_op(const struct mmuext_op *op)
{
struct multicall_space mcs;
struct mmuext_op *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *op;
}
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
trace_xen_mmu_set_pmd(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pmd_hyper(ptr, val);
}
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
UVMF_INVLPG))
BUG();
}
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{
struct mmu_update u;
if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false;
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
return true;
}
static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
if (!xen_batched_set_pte(ptep, pteval)) {
/*
* Could call native_set_pte() here and trap and
* emulate the PTE write, but a hypercall is much cheaper.
*/
struct mmu_update u;
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
}
}
static void xen_set_pte(pte_t *ptep, pte_t pteval)
{
trace_xen_mmu_set_pte(ptep, pteval);
__xen_set_pte(ptep, pteval);
}
static pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
return *ptep;
}
static void xen_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
struct mmu_update u;
trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
}
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
pteval_t flags = val & PTE_FLAGS_MASK;
if (unlikely(pfn == ~0))
val = flags & ~_PAGE_PRESENT;
else
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
}
return val;
}
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
mfn = __pfn_to_mfn(pfn);
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
* information about the original pfn, so
* pte_mfn_to_pfn is asymmetric.
*/
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
} else
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
}
__visible pteval_t xen_pte_val(pte_t pte)
{
pteval_t pteval = pte.pte;
return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
__visible pgdval_t xen_pgd_val(pgd_t pgd)
{
return pte_mfn_to_pfn(pgd.pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
__visible pte_t xen_make_pte(pteval_t pte)
{
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
__visible pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
return native_make_pgd(pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
__visible pmdval_t xen_pmd_val(pmd_t pmd)
{
return pte_mfn_to_pfn(pmd.pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pud_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_pud(pud_t *ptr, pud_t val)
{
trace_xen_mmu_set_pud(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pud_hyper(ptr, val);
}
__visible pmd_t xen_make_pmd(pmdval_t pmd)
{
pmd = pte_pfn_to_mfn(pmd);
return native_make_pmd(pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
__visible pud_t xen_make_pud(pudval_t pud)
{
pud = pte_pfn_to_mfn(pud);
return native_make_pud(pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
static pgd_t *xen_get_user_pgd(pgd_t *pgd)
{
pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
unsigned offset = pgd - pgd_page;
pgd_t *user_ptr = NULL;
if (offset < pgd_index(USER_LIMIT)) {
struct page *page = virt_to_page(pgd_page);
user_ptr = (pgd_t *)page->private;
if (user_ptr)
user_ptr += offset;
}
return user_ptr;
}
static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
struct mmu_update u;
u.ptr = virt_to_machine(ptr).maddr;
u.val = p4d_val_ma(val);
xen_extend_mmu_update(&u);
}
/*
* Raw hypercall-based set_p4d, intended for in early boot before
* there's a page structure. This implies:
* 1. The only existing pagetable is the kernel's
* 2. It is always pinned
* 3. It has no user pagetable attached to it
*/
static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
preempt_disable();
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_set_p4d(p4d_t *ptr, p4d_t val)
{
pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
pgd_t pgd_val;
trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
if (user_ptr) {
WARN_ON(xen_page_pinned(user_ptr));
pgd_val.pgd = p4d_val_ma(val);
*user_ptr = pgd_val;
}
return;
}
/* If it's pinned, then we can at least batch the kernel and
user updates together. */
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(XEN_LAZY_MMU);
}
#if CONFIG_PGTABLE_LEVELS >= 5
__visible p4dval_t xen_p4d_val(p4d_t p4d)
{
return pte_mfn_to_pfn(p4d.p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
__visible p4d_t xen_make_p4d(p4dval_t p4d)
{
p4d = pte_pfn_to_mfn(p4d);
return native_make_p4d(p4d);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
int i, nr;
nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
for (i = 0; i < nr; i++) {
if (!pmd_none(pmd[i]))
(*func)(mm, pmd_page(pmd[i]), PT_PTE);
}
}
static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
int i, nr;
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
for (i = 0; i < nr; i++) {
pmd_t *pmd;
if (pud_none(pud[i]))
continue;
pmd = pmd_offset(&pud[i], 0);
if (PTRS_PER_PMD > 1)
(*func)(mm, virt_to_page(pmd), PT_PMD);
xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
}
}
static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
bool last, unsigned long limit)
{
pud_t *pud;
if (p4d_none(*p4d))
return;
pud = pud_offset(p4d, 0);
if (PTRS_PER_PUD > 1)
(*func)(mm, virt_to_page(pud), PT_PUD);
xen_pud_walk(mm, pud, func, last, limit);
}
/*
* (Yet another) pagetable walker. This one is intended for pinning a
* pagetable. This means that it walks a pagetable and calls the
* callback function on each page it finds making up the page table,
* at every level. It walks the entire pagetable, but it only bothers
* pinning pte pages which are below limit. In the normal case this
* will be STACK_TOP_MAX, but at boot we need to pin up to
* FIXADDR_TOP.
*
* We must skip the Xen hole in the middle of the address space, just after
* the big x86-64 virtual hole.
*/
static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
int i, nr;
unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */
limit--;
BUG_ON(limit >= FIXADDR_TOP);
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings.
*/
hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
hole_high = pgd_index(GUARD_HOLE_END_ADDR);
nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) {
p4d_t *p4d;
if (i >= hole_low && i < hole_high)
continue;
if (pgd_none(pgd[i]))
continue;
p4d = p4d_offset(&pgd[i], 0);
xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
/* Do the top level last, so that the callbacks can use it as
a cue to do final things like tlb flushes. */
(*func)(mm, virt_to_page(pgd), PT_PGD);
}
static void xen_pgd_walk(struct mm_struct *mm,
void (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
__xen_pgd_walk(mm, mm->pgd, func, limit);
}
/* If we're using split pte locks, then take the page's lock and
return a pointer to it. Otherwise return NULL. */
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
return ptl;
}
static void xen_pte_unlock(void *v)
{
spinlock_t *ptl = v;
spin_unlock(ptl);
}
static void xen_do_pin(unsigned level, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = level;
op.arg1.mfn = pfn_to_mfn(pfn);
xen_extend_mmuext_op(&op);
}
static void xen_pin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestSetPagePinned(page);
if (!pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0);
spinlock_t *ptl;
/*
* We need to hold the pagetable lock between the time
* we make the pagetable RO and when we actually pin
* it. If we don't, then other users may come in and
* attempt to update the pagetable by writing it,
* which will fail because the memory is RO but not
* pinned, so Xen won't do the trap'n'emulate.
*
* If we're using split pte locks, we can't hold the
* entire pagetable's worth of locks during the
* traverse, because we may wrap the preempt count (8
* bits). The solution is to mark RO and pin each PTE
* page while holding the lock. This means the number
* of locks we end up holding is never more than a
* batch size (~32 entries, at present).
*
* If we're not using split pte locks, we needn't pin
* the PTE pages independently, because we're
* protected by the overall pagetable lock.
*/
ptl = NULL;
if (level == PT_PTE)
ptl = xen_pte_lock(page, mm);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL_RO),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
/* Queue a deferred unlock for when this batch
is completed. */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
}
/* This is called just after a mm has been created, but it has not
been used yet. We need to make sure that its pagetable is all
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
trace_xen_mmu_pgd_pin(mm, pgd);
xen_mc_batch();
__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
xen_do_pin(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa(user_pgd)));
}
xen_mc_issue(0);
}
static void xen_pgd_pin(struct mm_struct *mm)
{
__xen_pgd_pin(mm, mm->pgd);
}
/*
* On save, we need to pin all pagetables to make sure they get their
* mfns turned into pfns. Search the list for any unpinned pgds and pin
* them (unpinned pgds are not currently in use, probably because the
* process is under construction or destruction).
*
* Expected to be called in stop_machine() ("equivalent to taking
* every spinlock in the system"), so the locking doesn't really
* matter all that much.
*/
void xen_mm_pin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (!PagePinned(page)) {
__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
SetPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
SetPagePinned(page);
}
/*
* The init_mm pagetable is really pinned as soon as its created, but
* that's before we have page structures to store the bits. So do all
* the book-keeping now once struct pages for allocated pages are
* initialized. This happens only after memblock_free_all() is called.
*/
static void __init xen_after_bootmem(void)
{
static_branch_enable(&xen_struct_pages_ready);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}
static void xen_unpin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestClearPagePinned(page);
if (pgfl) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
spinlock_t *ptl = NULL;
struct multicall_space mcs;
/*
* Do the converse to pin_page. If we're using split
* pte locks, we must be holding the lock for while
* the pte page is unpinned but still RO to prevent
* concurrent updates from seeing it in this
* partially-pinned state.
*/
if (level == PT_PTE) {
ptl = xen_pte_lock(page, mm);
if (ptl)
xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
}
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
/* unlock when batch completed */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
}
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
trace_xen_mmu_pgd_unpin(mm, pgd);
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
xen_do_pin(MMUEXT_UNPIN_TABLE,
PFN_DOWN(__pa(user_pgd)));
xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
}
__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0);
}
static void xen_pgd_unpin(struct mm_struct *mm)
{
__xen_pgd_unpin(mm, mm->pgd);
}
/*
* On resume, undo any pinning done at save, so that the rest of the
* kernel doesn't see any unexpected pinned pagetables.
*/
void xen_mm_unpin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (PageSavePinned(page)) {
BUG_ON(!PagePinned(page));
__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
ClearPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
static void xen_enter_mmap(struct mm_struct *mm)
{
spin_lock(&mm->page_table_lock);
xen_pgd_pin(mm);
spin_unlock(&mm->page_table_lock);
}
static void drop_mm_ref_this_cpu(void *info)
{
struct mm_struct *mm = info;
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
leave_mm();
/*
* If this cpu still has a stale cr3 reference, then make sure
* it has been flushed.
*/
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
xen_mc_flush();
}
#ifdef CONFIG_SMP
/*
* Another cpu may still have their %cr3 pointing at the pagetable, so
* we need to repoint it somewhere else before we can unpin it.
*/
static void xen_drop_mm_ref(struct mm_struct *mm)
{
cpumask_var_t mask;
unsigned cpu;
drop_mm_ref_this_cpu(mm);
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
}
return;
}
/*
* It's possible that a vcpu may have a stale reference to our
* cr3, because its in lazy mode, and it hasn't yet flushed
* its set of pending hypercalls yet. In this case, we can
* look at its actual current cr3 value, and force it to flush
* if needed.
*/
cpumask_clear(mask);
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpumask_set_cpu(cpu, mask);
}
smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
free_cpumask_var(mask);
}
#else
static void xen_drop_mm_ref(struct mm_struct *mm)
{
drop_mm_ref_this_cpu(mm);
}
#endif
/*
* While a process runs, Xen pins its pagetables, which means that the
* hypervisor forces it to be read-only, and it controls all updates
* to it. This means that all pagetable updates have to go via the
* hypervisor, which is moderately expensive.
*
* Since we're pulling the pagetable down, we switch to use init_mm,
* unpin old process pagetable and mark it all read-write, which
* allows further operations on it to be simple memory accesses.
*
* The only subtle point is that another CPU may be still using the
* pagetable because of lazy tlb flushing. This means we need need to
* switch all CPUs off this pagetable before we can unpin it.
*/
static void xen_exit_mmap(struct mm_struct *mm)
{
get_cpu(); /* make sure we don't move around */
xen_drop_mm_ref(mm);
put_cpu();
spin_lock(&mm->page_table_lock);
/* pgd may not be pinned in the error exit path of execve */
if (xen_page_pinned(mm->pgd))
xen_pgd_unpin(mm);
spin_unlock(&mm->page_table_lock);
}
static void xen_post_allocator_init(void);
static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = cmd;
op.arg1.mfn = pfn_to_mfn(pfn);
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
}
static void __init xen_cleanhighmap(unsigned long vaddr,
unsigned long vaddr_end)
{
unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
if (vaddr < (unsigned long) _text || vaddr > kernel_end)
set_pmd(pmd, __pmd(0));
}
/* In case we did something silly, we should crash in this function
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
/*
* Make a page range writeable and free it.
*/
static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
{
void *vaddr = __va(paddr);
void *vaddr_end = vaddr + size;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
make_lowmem_page_readwrite(vaddr);
memblock_phys_free(paddr, size);
}
static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
{
unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
if (unpin)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
ClearPagePinned(virt_to_page(__va(pa)));
xen_free_ro_pages(pa, PAGE_SIZE);
}
static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
{
unsigned long pa;
pte_t *pte_tbl;
int i;
if (pmd_leaf(*pmd)) {
pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PMD_SIZE);
return;
}
pte_tbl = pte_offset_kernel(pmd, 0);
for (i = 0; i < PTRS_PER_PTE; i++) {
if (pte_none(pte_tbl[i]))
continue;
pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
xen_free_ro_pages(pa, PAGE_SIZE);
}
set_pmd(pmd, __pmd(0));
xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
}
static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
{
unsigned long pa;
pmd_t *pmd_tbl;
int i;
if (pud_leaf(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE);
return;
}
pmd_tbl = pmd_offset(pud, 0);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (pmd_none(pmd_tbl[i]))
continue;
xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
}
set_pud(pud, __pud(0));
xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
}
static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
{
unsigned long pa;
pud_t *pud_tbl;
int i;
if (p4d_leaf(*p4d)) {
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, P4D_SIZE);
return;
}
pud_tbl = pud_offset(p4d, 0);
for (i = 0; i < PTRS_PER_PUD; i++) {
if (pud_none(pud_tbl[i]))
continue;
xen_cleanmfnmap_pud(pud_tbl + i, unpin);
}
set_p4d(p4d, __p4d(0));
xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
}
/*
* Since it is well isolated we can (and since it is perhaps large we should)
* also free the page tables mapping the initial P->M table.
*/
static void __init xen_cleanmfnmap(unsigned long vaddr)
{
pgd_t *pgd;
p4d_t *p4d;
bool unpin;
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
if (!p4d_none(*p4d))
xen_cleanmfnmap_p4d(p4d, unpin);
}
static void __init xen_pagetable_p2m_free(void)
{
unsigned long size;
unsigned long addr;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
/* No memory or already called. */
if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */
memset((void *)xen_start_info->mfn_list, 0xff, size);
addr = xen_start_info->mfn_list;
/*
* We could be in __ka space.
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunately
* we have already revectored in xen_setup_kernel_pagetable.
*/
size = roundup(size, PMD_SIZE);
if (addr >= __START_KERNEL_map) {
xen_cleanhighmap(addr, addr + size);
size = PAGE_ALIGN(xen_start_info->nr_pages *
sizeof(unsigned long));
memblock_free((void *)addr, size);
} else {
xen_cleanmfnmap(addr);
}
}
static void __init xen_pagetable_cleanhighmap(void)
{
unsigned long size;
unsigned long addr;
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
* tables - do note that they are accessible at this stage via __va.
* As Xen is aligning the memory end to a 4MB boundary, for good
* measure we also round up to PMD_SIZE * 2 - which means that if
* anybody is using __ka address to the initial boot-stack - and try
* to use it - they are going to crash. The xen_start_info has been
* taken care of already in xen_setup_kernel_pagetable. */
addr = xen_start_info->pt_base;
size = xen_start_info->nr_pt_frames * PAGE_SIZE;
xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
}
static void __init xen_pagetable_p2m_setup(void)
{
xen_vmalloc_p2m_tree();
xen_pagetable_p2m_free();
xen_pagetable_cleanhighmap();
/* And revector! Bye bye old array */
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
}
static void __init xen_pagetable_init(void)
{
/*
* The majority of further PTE writes is to pagetables already
* announced as such to Xen. Hence it is more efficient to use
* hypercalls for these updates.
*/
pv_ops.mmu.set_pte = __xen_set_pte;
paging_init();
xen_post_allocator_init();
xen_pagetable_p2m_setup();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
xen_setup_mfn_list_list();
}
static noinstr void xen_write_cr2(unsigned long cr2)
{
this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
}
static noinline void xen_flush_tlb(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_one_user(unsigned long addr)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_one_user(addr);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_INVLPG_LOCAL;
op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_multi(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
struct {
struct mmuext_op op;
DECLARE_BITMAP(mask, NR_CPUS);
} *args;
struct multicall_space mcs;
const size_t mc_entry_size = sizeof(args->op) +
sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
mcs = xen_mc_entry(mc_entry_size);
args = mcs.args;
args->op.arg2.vcpumask = to_cpumask(args->mask);
/* Remove any offline CPUs */
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (info->end != TLB_FLUSH_ALL &&
(info->end - info->start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = info->start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
}
static unsigned long xen_read_cr3(void)
{
return this_cpu_read(xen_cr3);
}
static void set_current_cr3(void *v)
{
this_cpu_write(xen_current_cr3, (unsigned long)v);
}
static void __xen_write_cr3(bool kernel, unsigned long cr3)
{
struct mmuext_op op;
unsigned long mfn;
trace_xen_mmu_write_cr3(kernel, cr3);
if (cr3)
mfn = pfn_to_mfn(PFN_DOWN(cr3));
else
mfn = 0;
WARN_ON(mfn == 0 && kernel);
op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
op.arg1.mfn = mfn;
xen_extend_mmuext_op(&op);
if (kernel) {
this_cpu_write(xen_cr3, cr3);
/* Update xen_current_cr3 once the batch has actually
been submitted. */
xen_mc_callback(set_current_cr3, (void *)cr3);
}
}
static void xen_write_cr3(unsigned long cr3)
{
pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
if (user_pgd)
__xen_write_cr3(false, __pa(user_pgd));
else
__xen_write_cr3(false, 0);
xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
/*
* At the start of the day - when Xen launches a guest, it has already
* built pagetables for the guest. We diligently look over them
* in xen_setup_kernel_pagetable and graft as appropriate them in the
* init_top_pgt and its friends. Then when we are happy we load
* the new init_top_pgt - and continue on.
*
* The generic code starts (start_kernel) and 'init_mem_mapping' sets
* up the rest of the pagetables. When it has completed it loads the cr3.
* N.B. that baremetal would start at 'start_kernel' (and the early
* #PF handler would create bootstrap pagetables) - so we are running
* with the same assumptions as what to do when write_cr3 is executed
* at this point.
*
* Since there are no user-page tables at all, we have two variants
* of xen_write_cr3 - the early bootup (this one), and the late one
* (xen_write_cr3). The reason we have to do that is that in 64-bit
* the Linux kernel and user-space are both in ring 3 while the
* hypervisor is in ring 0.
*/
static void __init xen_write_cr3_init(unsigned long cr3)
{
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
}
static int xen_pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = mm->pgd;
struct page *page = virt_to_page(pgd);
pgd_t *user_pgd;
int ret = -ENOMEM;
BUG_ON(PagePinned(virt_to_page(pgd)));
BUG_ON(page->private != 0);
user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
page->private = (unsigned long)user_pgd;
if (user_pgd != NULL) {
#ifdef CONFIG_X86_VSYSCALL_EMULATION
user_pgd[pgd_index(VSYSCALL_ADDR)] =
__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
#endif
ret = 0;
}
BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
return ret;
}
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
if (user_pgd)
free_page((unsigned long)user_pgd);
}
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
*
* If there is no MFN for this PFN then this page is initially
* ballooned out so clear the PTE (as in decrease_reservation() in
* drivers/xen/balloon.c).
*
* Many of these PTE updates are done on unpinned and writable pages
* and doing a hypercall for these is unnecessary and expensive. At
* this point it is rarely possible to tell if a page is pinned, so
* mostly write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
if (unlikely(is_early_ioremap_ptep(ptep)))
__xen_set_pte(ptep, pte);
else
native_set_pte(ptep, pte);
}
__visible pte_t xen_make_pte_init(pteval_t pte)
{
unsigned long pfn;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
if (xen_start_info->mfn_list < __START_KERNEL_map &&
pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte &= ~_PAGE_RW;
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
}
/* Used for pmd and pud */
static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
}
/* Early release_pte assumes that all pts are pinned, since there's
only init_mm and anything attached to that is pinned. */
static void __init xen_release_pte_init(unsigned long pfn)
{
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static void __init xen_release_pmd_init(unsigned long pfn)
{
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct multicall_space mcs;
struct mmuext_op *op;
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = cmd;
op->arg1.mfn = pfn_to_mfn(pfn);
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
{
struct multicall_space mcs;
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
pfn_pte(pfn, prot), 0);
}
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
unsigned level)
{
bool pinned = xen_page_pinned(mm->pgd);
trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
if (pinned) {
struct page *page = pfn_to_page(pfn);
pinned = false;
if (static_branch_likely(&xen_struct_pages_ready)) {
pinned = PagePinned(page);
SetPagePinned(page);
}
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
!pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU);
}
}
static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PTE);
}
static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PMD);
}
/* This should never happen until we're OK to use struct page */
static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
bool pinned = PagePinned(page);
trace_xen_mmu_release_ptpage(pfn, level, pinned);
if (pinned) {
xen_mc_batch();
if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page);
}
}
static void xen_release_pte(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PTE);
}
static void xen_release_pmd(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PMD);
}
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
}
static void xen_release_pud(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PUD);
}
/*
* Like __va(), but returns address in the kernel mapping (which is
* all we have until the physical memory mapping has been set up.
*/
static void * __init __ka(phys_addr_t paddr)
{
return (void *)(paddr + __START_KERNEL_map);
}
/* Convert a machine address to physical address */
static unsigned long __init m2p(phys_addr_t maddr)
{
phys_addr_t paddr;
maddr &= XEN_PTE_MFN_MASK;
paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
return paddr;
}
/* Convert a machine address to kernel virtual */
static void * __init m2v(phys_addr_t maddr)
{
return __ka(m2p(maddr));
}
/* Set the page permissions on an identity-mapped pages */
static void __init set_page_prot_flags(void *addr, pgprot_t prot,
unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
static void __init set_page_prot(void *addr, pgprot_t prot)
{
return set_page_prot_flags(addr, prot, UVMF_NONE);
}
void __init xen_setup_machphys_mapping(void)
{
struct xen_machphys_mapping mapping;
if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
machine_to_phys_mapping = (unsigned long *)mapping.v_start;
machine_to_phys_nr = mapping.max_mfn + 1;
} else {
machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
}
}
static void __init convert_pfn_mfn(void *v)
{
pte_t *pte = v;
int i;
/* All levels are converted the same way, so just treat them
as ptes. */
for (i = 0; i < PTRS_PER_PTE; i++)
pte[i] = xen_make_pte(pte[i].pte);
}
static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_end)--;
}
}
/*
* Set up the initial kernel pagetable.
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
* level2_ident_pgt, and level2_kernel_pgt. This means that only the
* kernel has a physical mapping to start with - but that's enough to
* get __va working. We need to fill in the rest of the physical
* mapping once some sort of allocator has been set up.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
pud_t *l3;
pmd_t *l2;
unsigned long addr[3];
unsigned long pt_base, pt_end;
unsigned i;
/* max_pfn_mapped is the last pfn mapped in the initial memory
* mappings. Considering that on Xen after the kernel mappings we
* have the mappings of some pages that don't exist in pfn space, we
* set max_pfn_mapped to the last real pfn mapped. */
if (xen_start_info->mfn_list < __START_KERNEL_map)
max_pfn_mapped = xen_start_info->first_p2m_pfn;
else
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
pt_end = pt_base + xen_start_info->nr_pt_frames;
/* Zap identity mapping */
init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_top_pgt);
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt */
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
/* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
* Both L4[273][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2);
/* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2);
/*
* Zap execute permission from the ident map. Due to the sharing of
* L1 entries we need to do this in the L2.
*/
if (__supported_pte_mask & _PAGE_NX) {
for (i = 0; i < PTRS_PER_PMD; ++i) {
if (pmd_none(level2_ident_pgt[i]))
continue;
level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
}
}
/* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map))
init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
/* Make pagetable pieces RO */
set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
for (i = 0; i < FIXMAP_PMD_NUM; i++) {
set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
PAGE_KERNEL_RO);
}
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_top_pgt)));
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Pin user vsyscall L3 */
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
#endif
/*
* At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd.
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
* the initial domain. For guests using the toolstack, they are in:
* [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
* rip out the [L4] (pgd), but for guests we shave off three pages.
*/
for (i = 0; i < ARRAY_SIZE(addr); i++)
check_pt_base(&pt_base, &pt_end, addr[i]);
/* Our (by three pages) smaller Xen pagetable that we are using */
xen_pt_base = PFN_PHYS(pt_base);
xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
memblock_reserve(xen_pt_base, xen_pt_size);
/* Revector the xen_start_info */
xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
}
/*
* Read a value from a physical address.
*/
static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
{
unsigned long *vaddr;
unsigned long val;
vaddr = early_memremap_ro(addr, sizeof(val));
val = *vaddr;
early_memunmap(vaddr, sizeof(val));
return val;
}
/*
* Translate a virtual address to a physical one without relying on mapped
* page tables. Don't rely on big pages being aligned in (guest) physical
* space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
phys_addr_t pa;
pgd_t pgd;
pud_t pud;
pmd_t pmd;
pte_t pte;
pa = read_cr3_pa();
pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
sizeof(pgd)));
if (!pgd_present(pgd))
return 0;
pa = pgd_val(pgd) & PTE_PFN_MASK;
pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
sizeof(pud)));
if (!pud_present(pud))
return 0;
pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_leaf(pud))
return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_leaf(pmd))
return pa + (vaddr & ~PMD_MASK);
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
sizeof(pte)));
if (!pte_present(pte))
return 0;
pa = pte_pfn(pte) << PAGE_SHIFT;
return pa | (vaddr & ~PAGE_MASK);
}
/*
* Find a new area for the hypervisor supplied p2m list and relocate the p2m to
* this area.
*/
void __init xen_relocate_p2m(void)
{
phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
unsigned long *new_p2m;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
n_frames = n_pte + n_pt + n_pmd + n_pud;
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
BUG();
}
/*
* Setup the page tables for addressing the new p2m list.
* We have asked the hypervisor to map the p2m list at the user address
* PUD_SIZE. It may have done so, or it may have used a kernel space
* address depending on the Xen version.
* To avoid any possible virtual address collision, just use
* 2 * PUD_SIZE for the new area.
*/
pud_phys = new_area;
pmd_phys = pud_phys + PFN_PHYS(n_pud);
pt_phys = pmd_phys + PFN_PHYS(n_pmd);
p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
pud = early_memremap(pud_phys, PAGE_SIZE);
clear_page(pud);
for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
idx_pmd++) {
pmd = early_memremap(pmd_phys, PAGE_SIZE);
clear_page(pmd);
for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
idx_pt++) {
pt = early_memremap(pt_phys, PAGE_SIZE);
clear_page(pt);
for (idx_pte = 0;
idx_pte < min(n_pte, PTRS_PER_PTE);
idx_pte++) {
pt[idx_pte] = pfn_pte(p2m_pfn,
PAGE_KERNEL);
p2m_pfn++;
}
n_pte -= PTRS_PER_PTE;
early_memunmap(pt, PAGE_SIZE);
make_lowmem_page_readonly(__va(pt_phys));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
PFN_DOWN(pt_phys));
pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
pt_phys += PAGE_SIZE;
}
n_pt -= PTRS_PER_PMD;
early_memunmap(pmd, PAGE_SIZE);
make_lowmem_page_readonly(__va(pmd_phys));
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
PFN_DOWN(pmd_phys));
pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
pmd_phys += PAGE_SIZE;
}
n_pmd -= PTRS_PER_PUD;
early_memunmap(pud, PAGE_SIZE);
make_lowmem_page_readonly(__va(pud_phys));
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
pud_phys += PAGE_SIZE;
}
/* Now copy the old p2m info to the new area. */
memcpy(new_p2m, xen_p2m_addr, size);
xen_p2m_addr = new_p2m;
/* Release the old p2m list and set new list info. */
p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
BUG_ON(!p2m_pfn);
p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
if (xen_start_info->mfn_list < __START_KERNEL_map) {
pfn = xen_start_info->first_p2m_pfn;
pfn_end = xen_start_info->first_p2m_pfn +
xen_start_info->nr_p2m_frames;
set_pgd(pgd + 1, __pgd(0));
} else {
pfn = p2m_pfn;
pfn_end = p2m_pfn_end;
}
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
while (pfn < pfn_end) {
if (pfn == p2m_pfn) {
pfn = p2m_pfn_end;
continue;
}
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn++;
}
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
xen_start_info->nr_p2m_frames = n_frames;
}
void __init xen_reserve_special_pages(void)
{
phys_addr_t paddr;
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
if (xen_start_info->store_mfn) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
if (!xen_initial_domain()) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
}
void __init xen_pt_check_e820(void)
{
xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
}
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{
pte_t pte;
unsigned long vaddr;
phys >>= PAGE_SHIFT;
switch (idx) {
case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
#ifdef CONFIG_X86_VSYSCALL_EMULATION
case VSYSCALL_PAGE:
#endif
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
#ifdef CONFIG_X86_LOCAL_APIC
case FIX_APIC_BASE: /* maps dummy local APIC */
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
#ifdef CONFIG_X86_IO_APIC
case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
/*
* We just don't map the IO APIC - all access is via
* hypercalls. Keep the address in the pte for reference.
*/
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
case FIX_PARAVIRT_BOOTMAP:
/* This is an MFN, but it isn't an IO mapping from the
IO domain */
pte = mfn_pte(phys, prot);
break;
default:
/* By default, set_fixmap is used for hardware mappings */
pte = mfn_pte(phys, prot);
break;
}
vaddr = __fix_to_virt(idx);
if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
BUG();
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Replicate changes to map the vsyscall page into the user
pagetable vsyscall mapping. */
if (idx == VSYSCALL_PAGE)
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
#endif
}
static void xen_enter_lazy_mmu(void)
{
enter_lazy(XEN_LAZY_MMU);
}
static void xen_flush_lazy_mmu(void)
{
preempt_disable();
if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
static void __init xen_post_allocator_init(void)
{
pv_ops.mmu.set_pte = xen_set_pte;
pv_ops.mmu.set_pmd = xen_set_pmd;
pv_ops.mmu.set_pud = xen_set_pud;
pv_ops.mmu.set_p4d = xen_set_p4d;
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
pv_ops.mmu.alloc_pte = xen_alloc_pte;
pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
pv_ops.mmu.release_pte = xen_release_pte;
pv_ops.mmu.release_pmd = xen_release_pmd;
pv_ops.mmu.alloc_pud = xen_alloc_pud;
pv_ops.mmu.release_pud = xen_release_pud;
pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
pv_ops.mmu.write_cr3 = &xen_write_cr3;
}
static void xen_leave_lazy_mmu(void)
{
preempt_disable();
xen_mc_flush();
leave_lazy(XEN_LAZY_MMU);
preempt_enable();
}
static const typeof(pv_ops) xen_mmu_ops __initconst = {
.mmu = {
.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
.write_cr2 = xen_write_cr2,
.read_cr3 = xen_read_cr3,
.write_cr3 = xen_write_cr3_init,
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_multi = xen_flush_tlb_multi,
.tlb_remove_table = tlb_remove_table,
.pgd_alloc = xen_pgd_alloc,
.pgd_free = xen_pgd_free,
.alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init,
.alloc_pmd = xen_alloc_pmd_init,
.release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init,
.set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = xen_ptep_modify_prot_start,
.ptep_modify_prot_commit = xen_ptep_modify_prot_commit,
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
.set_pud = xen_set_pud_hyper,
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_p4d = xen_set_p4d_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
#if CONFIG_PGTABLE_LEVELS >= 5
.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
#endif
.enter_mmap = xen_enter_mmap,
.exit_mmap = xen_exit_mmap,
.lazy_mode = {
.enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
.flush = xen_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
},
};
void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
x86_init.hyper.init_after_bootmem = xen_after_bootmem;
pv_ops.mmu = xen_mmu_ops.mmu;
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
unsigned long *out_frames)
{
int i;
struct multicall_space mcs;
xen_mc_batch();
for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
mcs = __xen_mc_entry(0);
if (in_frames)
in_frames[i] = virt_to_mfn((void *)vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
__set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
if (out_frames)
out_frames[i] = virt_to_pfn((void *)vaddr);
}
xen_mc_issue(0);
}
/*
* Update the pfn-to-mfn mappings for a virtual address range, either to
* point to an array of mfns, or contiguously from a single starting
* mfn.
*/
static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
unsigned long *mfns,
unsigned long first_mfn)
{
unsigned i, limit;
unsigned long mfn;
xen_mc_batch();
limit = 1u << order;
for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
struct multicall_space mcs;
unsigned flags;
mcs = __xen_mc_entry(0);
if (mfns)
mfn = mfns[i];
else
mfn = first_mfn + i;
if (i < (limit - 1))
flags = 0;
else {
if (order == 0)
flags = UVMF_INVLPG | UVMF_ALL;
else
flags = UVMF_TLB_FLUSH | UVMF_ALL;
}
MULTI_update_va_mapping(mcs.mc, vaddr,
mfn_pte(mfn, PAGE_KERNEL), flags);
set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
}
xen_mc_issue(0);
}
/*
* Perform the hypercall to exchange a region of our pfns to point to
* memory with the required contiguous alignment. Takes the pfns as
* input, and populates mfns as output.
*
* Returns a success code indicating whether the hypervisor was able to
* satisfy the request or not.
*/
static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
unsigned long *pfns_in,
unsigned long extents_out,
unsigned int order_out,
unsigned long *mfns_out,
unsigned int address_bits)
{
long rc;
int success;
struct xen_memory_exchange exchange = {
.in = {
.nr_extents = extents_in,
.extent_order = order_in,
.extent_start = pfns_in,
.domid = DOMID_SELF
},
.out = {
.nr_extents = extents_out,
.extent_order = order_out,
.extent_start = mfns_out,
.address_bits = address_bits,
.domid = DOMID_SELF
}
};
BUG_ON(extents_in << order_in != extents_out << order_out);
rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
success = (exchange.nr_exchanged == extents_in);
BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
BUG_ON(success && (rc != 0));
return success;
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
/* 2. Get a new contiguous memory extent. */
out_frame = virt_to_pfn((void *)vstart);
success = xen_exchange_memory(1UL << order, 0, in_frames,
1, order, &out_frame,
address_bits);
/* 3. Map the new extent in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
else
xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
*dma_handle = virt_to_machine(vstart).maddr;
return success ? 0 : -ENOMEM;
}
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (unlikely(order > MAX_CONTIG_ORDER))
return;
vstart = (unsigned long)phys_to_virt(pstart);
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn((void *)vstart);
/* 2. Zap current PTEs. */
xen_zap_pfn_range(vstart, order, NULL, out_frames);
/* 3. Do the exchange for non-contiguous MFNs. */
success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
0, out_frames, 0);
/* 4. Map new pages in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
else
xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
}
static noinline void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(XEN_LAZY_MMU);
preempt_enable();
}
#define REMAP_BATCH_SIZE 16
struct remap_data {
xen_pfn_t *pfn;
bool contiguous;
bool no_translate;
pgprot_t prot;
struct mmu_update *mmu_update;
};
static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_data *rmd = data;
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
/*
* If we have a contiguous range, just update the pfn itself,
* else update pointer to be "next pfn".
*/
if (rmd->contiguous)
(*rmd->pfn)++;
else
rmd->pfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
rmd->mmu_update->ptr |= rmd->no_translate ?
MMU_PT_UPDATE_NO_TRANSLATE :
MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0;
}
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
unsigned int domid, bool no_translate)
{
int err = 0;
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
unsigned long range;
int mapped = 0;
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
rmd.pfn = pfn;
rmd.prot = prot;
/*
* We use the err_ptr to indicate if there we are doing a contiguous
* mapping or a discontiguous mapping.
*/
rmd.contiguous = !err_ptr;
rmd.no_translate = no_translate;
while (nr) {
int index = 0;
int done = 0;
int batch = min(REMAP_BATCH_SIZE, nr);
int batch_left = batch;
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
remap_area_pfn_pte_fn, &rmd);
if (err)
goto out;
/*
* We record the error for each page that gives an error, but
* continue mapping until the whole set is done
*/
do {
int i;
err = HYPERVISOR_mmu_update(&mmu_update[index],
batch_left, &done, domid);
/*
* @err_ptr may be the same buffer as @gfn, so
* only clear it after each chunk of @gfn is
* used.
*/
if (err_ptr) {
for (i = index; i < index + done; i++)
err_ptr[i] = 0;
}
if (err < 0) {
if (!err_ptr)
goto out;
err_ptr[i] = err;
done++; /* Skip failed frame. */
} else
mapped += done;
batch_left -= done;
index += done;
} while (batch_left);
nr -= batch;
addr += range;
if (err_ptr)
err_ptr += batch;
cond_resched();
}
out:
xen_flush_tlb_all();
return err < 0 ? err : mapped;
}
EXPORT_SYMBOL_GPL(xen_remap_pfn);
#ifdef CONFIG_VMCORE_INFO
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
return virt_to_machine(vmcoreinfo_note).maddr;
else
return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_SPACE_INFO_H
#define BTRFS_SPACE_INFO_H
#include <trace/events/btrfs.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/lockdep.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include "volumes.h"
struct btrfs_fs_info;
struct btrfs_block_group;
/*
* Different levels for to flush space when doing space reservations.
*
* The higher the level, the more methods we try to reclaim space.
*/
enum btrfs_reserve_flush_enum {
/* If we are in the transaction, we can't flush anything.*/
BTRFS_RESERVE_NO_FLUSH,
/*
* Flush space by:
* - Running delayed inode items
* - Allocating a new chunk
*/
BTRFS_RESERVE_FLUSH_LIMIT,
/*
* Flush space by:
* - Running delayed inode items
* - Running delayed refs
* - Running delalloc and waiting for ordered extents
* - Allocating a new chunk
* - Committing transaction
*/
BTRFS_RESERVE_FLUSH_EVICT,
/*
* Flush space by above mentioned methods and by:
* - Running delayed iputs
* - Committing transaction
*
* Can be interrupted by a fatal signal.
*/
BTRFS_RESERVE_FLUSH_DATA,
BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE,
BTRFS_RESERVE_FLUSH_ALL,
/*
* Pretty much the same as FLUSH_ALL, but can also steal space from
* global rsv.
*
* Can be interrupted by a fatal signal.
*/
BTRFS_RESERVE_FLUSH_ALL_STEAL,
/*
* This is for btrfs_use_block_rsv only. We have exhausted our block
* rsv and our global block rsv. This can happen for things like
* delalloc where we are overwriting a lot of extents with a single
* extent and didn't reserve enough space. Alternatively it can happen
* with delalloc where we reserve 1 extents worth for a large extent but
* fragmentation leads to multiple extents being created. This will
* give us the reservation in the case of
*
* if (num_bytes < (space_info->total_bytes -
* btrfs_space_info_used(space_info, false))
*
* Which ignores bytes_may_use. This is potentially dangerous, but our
* reservation system is generally pessimistic so is able to absorb this
* style of mistake.
*/
BTRFS_RESERVE_FLUSH_EMERGENCY,
};
enum btrfs_flush_state {
FLUSH_DELAYED_ITEMS_NR = 1,
FLUSH_DELAYED_ITEMS = 2,
FLUSH_DELAYED_REFS_NR = 3,
FLUSH_DELAYED_REFS = 4,
FLUSH_DELALLOC = 5,
FLUSH_DELALLOC_WAIT = 6,
FLUSH_DELALLOC_FULL = 7,
ALLOC_CHUNK = 8,
ALLOC_CHUNK_FORCE = 9,
RUN_DELAYED_IPUTS = 10,
COMMIT_TRANS = 11,
};
struct btrfs_space_info {
struct btrfs_fs_info *fs_info;
spinlock_t lock;
u64 total_bytes; /* total bytes in the space,
this doesn't take mirrors into account */
u64 bytes_used; /* total bytes used,
this doesn't take mirrors into account */
u64 bytes_pinned; /* total bytes pinned, will be freed when the
transaction finishes */
u64 bytes_reserved; /* total bytes the allocator has reserved for
current allocations */
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
u64 bytes_zone_unusable; /* total bytes that are unusable until
resetting the device zone */
u64 max_extent_size; /* This will hold the maximum extent size of
the space info if we had an ENOSPC in the
allocator. */
/* Chunk size in bytes */
u64 chunk_size;
/*
* Once a block group drops below this threshold (percents) we'll
* schedule it for reclaim.
*/
int bg_reclaim_threshold;
int clamp; /* Used to scale our threshold for preemptive
flushing. The value is >> clamp, so turns
out to be a 2^clamp divisor. */
unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
unsigned int flush:1; /* set if we are trying to make space */
unsigned int force_alloc; /* set if we need to force a chunk
alloc for this space */
u64 disk_used; /* total bytes used on disk */
u64 disk_total; /* total bytes on disk, takes mirrors into
account */
u64 flags;
struct list_head list;
/* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
struct list_head priority_tickets;
struct list_head tickets;
/*
* Size of space that needs to be reclaimed in order to satisfy pending
* tickets
*/
u64 reclaim_size;
/*
* tickets_id just indicates the next ticket will be handled, so note
* it's not stored per ticket.
*/
u64 tickets_id;
struct rw_semaphore groups_sem;
/* for block groups in our same type */
struct list_head block_groups[BTRFS_NR_RAID_TYPES];
struct kobject kobj;
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
/*
* Monotonically increasing counter of block group reclaim attempts
* Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count
*/
u64 reclaim_count;
/*
* Monotonically increasing counter of reclaimed bytes
* Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes
*/
u64 reclaim_bytes;
/*
* Monotonically increasing counter of reclaim errors
* Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors
*/
u64 reclaim_errors;
/*
* If true, use the dynamic relocation threshold, instead of the
* fixed bg_reclaim_threshold.
*/
bool dynamic_reclaim;
/*
* Periodically check all block groups against the reclaim
* threshold in the cleaner thread.
*/
bool periodic_reclaim;
/*
* Periodic reclaim should be a no-op if a space_info hasn't
* freed any space since the last time we tried.
*/
bool periodic_reclaim_ready;
/*
* Net bytes freed or allocated since the last reclaim pass.
*/
s64 reclaimable_bytes;
};
struct reserve_ticket {
u64 bytes;
int error;
bool steal;
struct list_head list;
wait_queue_head_t wait;
};
static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info)
{
return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
}
/*
*
* Declare a helper function to detect underflow of various space info members
*/
#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \
static inline void \
btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
struct btrfs_space_info *sinfo, \
s64 bytes) \
{ \
const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \
lockdep_assert_held(&sinfo->lock); \
trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \
trace_btrfs_space_reservation(fs_info, trace_name, \
sinfo->flags, abs_bytes, \
bytes > 0); \
if (bytes < 0 && sinfo->name < -bytes) { \
WARN_ON(1); \
sinfo->name = 0; \
return; \
} \
sinfo->name += bytes; \
}
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
struct btrfs_block_group *block_group);
void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
u64 chunk_size);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
bool may_use_included);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info);
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
const struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
static inline void btrfs_space_info_free_bytes_may_use(
struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes)
{
spin_lock(&space_info->lock);
btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
}
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
#endif /* BTRFS_SPACE_INFO_H */
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BCH Error Location Module
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __ELM_H
#define __ELM_H
enum bch_ecc {
BCH4_ECC = 0,
BCH8_ECC,
BCH16_ECC,
};
/* ELM support 8 error syndrome process */
#define ERROR_VECTOR_MAX 8
/**
* struct elm_errorvec - error vector for elm
* @error_reported: set true for vectors error is reported
* @error_uncorrectable: number of uncorrectable errors
* @error_count: number of correctable errors in the sector
* @error_loc: buffer for error location
*
*/
struct elm_errorvec {
bool error_reported;
bool error_uncorrectable;
int error_count;
int error_loc[16];
};
#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)
void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec);
int elm_config(struct device *dev, enum bch_ecc bch_type,
int ecc_steps, int ecc_step_size, int ecc_syndrome_size);
#else
static inline void
elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec)
{
}
static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
int ecc_steps, int ecc_step_size,
int ecc_syndrome_size)
{
return -ENOSYS;
}
#endif /* CONFIG_MTD_NAND_OMAP_BCH */
#endif /* __ELM_H */
|
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_imu.h"
#include "amdgpu_dpm.h"
#include "imu_v11_0_3.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
//adev->gfx.imu_feature_version = le32_to_cpu(imu_hdr->ucode_feature_version);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_IMU_I];
info->ucode_id = AMDGPU_UCODE_ID_IMU_I;
info->fw = adev->gfx.imu_fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_IMU_D];
info->ucode_id = AMDGPU_UCODE_ID_IMU_D;
info->fw = adev->gfx.imu_fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE);
} else
adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version);
out:
if (err) {
dev_err(adev->dev,
"gfx11: Failed to load firmware \"%s_imu.bin\"\n",
ucode_prefix);
amdgpu_ucode_release(&adev->gfx.imu_fw);
}
return err;
}
static int imu_v11_0_load_microcode(struct amdgpu_device *adev)
{
const struct imu_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.imu_fw)
return -EINVAL;
hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
//amdgpu_ucode_print_rlc_hdr(&hdr->header);
fw_data = (const __le32 *)(adev->gfx.imu_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->imu_iram_ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_ADDR, adev->gfx.imu_fw_version);
fw_data = (const __le32 *)(adev->gfx.imu_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->imu_iram_ucode_size_bytes));
fw_size = le32_to_cpu(hdr->imu_dram_ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_ADDR, adev->gfx.imu_fw_version);
return 0;
}
static int imu_v11_0_wait_for_reset_status(struct amdgpu_device *adev)
{
int i, imu_reg_val = 0;
for (i = 0; i < adev->usec_timeout; i++) {
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_GFX_RESET_CTRL);
if ((imu_reg_val & 0x1f) == 0x1f)
break;
udelay(1);
}
if (i >= adev->usec_timeout) {
dev_err(adev->dev, "init imu: IMU start timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static void imu_v11_0_setup(struct amdgpu_device *adev)
{
int imu_reg_val;
//enable IMU debug mode
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL0, 0xffffff);
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL1, 0xffff);
if (adev->gfx.imu.mode == DEBUG_MODE) {
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16);
imu_reg_val |= 0x1;
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val);
}
//disable imu Rtavfs, SmsRepair, DfllBTC, and ClkB
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10);
imu_reg_val |= 0x10007;
WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val);
}
static int imu_v11_0_start(struct amdgpu_device *adev)
{
int imu_reg_val;
//Start IMU by set GFX_IMU_CORE_CTRL.CRESET = 0
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_CORE_CTRL);
imu_reg_val &= 0xfffffffe;
WREG32_SOC15(GC, 0, regGFX_IMU_CORE_CTRL, imu_reg_val);
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_gfx_power_up_by_imu(adev);
return imu_v11_0_wait_for_reset_status(adev);
}
static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11[] =
{
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS , 0x003f3fff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS , 0x003f3fbf, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10201000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000080, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000003f7, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0x0fffff01, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000fffff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000501, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000545, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x13455431, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_1, 0x13455431, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_2, 0x76027602, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_3, 0x76207620, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000345, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x0000003e, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000061ff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0)
};
static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] =
{
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS, 0x003f3fbf, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10200800, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000088, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000007ef, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_DRAM_PAGE_BURST, 0x20080200, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS, 0x003f3fff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0x00000f01, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL1_PIPE_STEER, 0x000000e4, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCH_PIPE_STEER, 0x000000e4, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x01231023, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000243, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x00000002, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000001ff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000061ff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00002825, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000501, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0),
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0)
};
static void program_imu_rlc_ram(struct amdgpu_device *adev,
const struct imu_rlc_ram_golden *regs,
const u32 array_size)
{
const struct imu_rlc_ram_golden *entry;
u32 reg, data;
int i;
for (i = 0; i < array_size; ++i) {
entry = ®s[i];
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
reg |= entry->addr_mask;
data = entry->data;
if (entry->reg == regGCMC_VM_AGP_BASE)
data = 0x00ffffff;
else if (entry->reg == regGCMC_VM_AGP_TOP)
data = 0x0;
else if (entry->reg == regGCMC_VM_FB_LOCATION_BASE)
data = adev->gmc.vram_start >> 24;
else if (entry->reg == regGCMC_VM_FB_LOCATION_TOP)
data = adev->gmc.vram_end >> 24;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, reg);
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, data);
}
//Indicate the latest entry
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, 0);
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, 0);
}
static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
program_imu_rlc_ram(adev, imu_rlc_ram_golden_11,
(const u32)ARRAY_SIZE(imu_rlc_ram_golden_11));
break;
case IP_VERSION(11, 0, 2):
program_imu_rlc_ram(adev, imu_rlc_ram_golden_11_0_2,
(const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_2));
break;
case IP_VERSION(11, 0, 3):
imu_v11_0_3_program_rlc_ram(adev);
break;
default:
BUG();
break;
}
//Indicate the contents of the RAM are valid
reg_data = RREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX);
reg_data |= GFX_IMU_RLC_RAM_INDEX__RAM_VALID_MASK;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, reg_data);
}
const struct amdgpu_imu_funcs gfx_v11_0_imu_funcs = {
.init_microcode = imu_v11_0_init_microcode,
.load_microcode = imu_v11_0_load_microcode,
.setup_imu = imu_v11_0_setup,
.start_imu = imu_v11_0_start,
.program_rlc_ram = imu_v11_0_program_rlc_ram,
.wait_for_reset_status = imu_v11_0_wait_for_reset_status,
};
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "fw/api/coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"
/* 20MHz / 40MHz below / 40Mhz above*/
static const __le64 iwl_ci_mask[][3] = {
/* dummy entry for channel 0 */
{cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
{
cpu_to_le64(0x0000001FFFULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x00007FFFFFULL),
},
{
cpu_to_le64(0x000000FFFFULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x0003FFFFFFULL),
},
{
cpu_to_le64(0x000003FFFCULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x000FFFFFFCULL),
},
{
cpu_to_le64(0x00001FFFE0ULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x007FFFFFE0ULL),
},
{
cpu_to_le64(0x00007FFF80ULL),
cpu_to_le64(0x00007FFFFFULL),
cpu_to_le64(0x01FFFFFF80ULL),
},
{
cpu_to_le64(0x0003FFFC00ULL),
cpu_to_le64(0x0003FFFFFFULL),
cpu_to_le64(0x0FFFFFFC00ULL),
},
{
cpu_to_le64(0x000FFFF000ULL),
cpu_to_le64(0x000FFFFFFCULL),
cpu_to_le64(0x3FFFFFF000ULL),
},
{
cpu_to_le64(0x007FFF8000ULL),
cpu_to_le64(0x007FFFFFE0ULL),
cpu_to_le64(0xFFFFFF8000ULL),
},
{
cpu_to_le64(0x01FFFE0000ULL),
cpu_to_le64(0x01FFFFFF80ULL),
cpu_to_le64(0xFFFFFE0000ULL),
},
{
cpu_to_le64(0x0FFFF00000ULL),
cpu_to_le64(0x0FFFFFFC00ULL),
cpu_to_le64(0x0ULL),
},
{
cpu_to_le64(0x3FFFC00000ULL),
cpu_to_le64(0x3FFFFFF000ULL),
cpu_to_le64(0x0)
},
{
cpu_to_le64(0xFFFE000000ULL),
cpu_to_le64(0xFFFFFF8000ULL),
cpu_to_le64(0x0)
},
{
cpu_to_le64(0xFFF8000000ULL),
cpu_to_le64(0xFFFFFE0000ULL),
cpu_to_le64(0x0)
},
{
cpu_to_le64(0xFE00000000ULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x0ULL)
},
};
static enum iwl_bt_coex_lut_type
iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
{
struct ieee80211_chanctx_conf *chanctx_conf;
enum iwl_bt_coex_lut_type ret;
u16 phy_ctx_id;
u32 primary_ch_phy_id, secondary_ch_phy_id;
/*
* Checking that we hold mvm->mutex is a good idea, but the rate
* control can't acquire the mutex since it runs in Tx path.
* So this is racy in that case, but in the worst case, the AMPDU
* size limit will be wrong for a short time which is not a big
* issue.
*/
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
if (!chanctx_conf ||
chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
rcu_read_unlock();
return BT_COEX_INVALID_LUT;
}
ret = BT_COEX_TX_DIS_LUT;
phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
secondary_ch_phy_id =
le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
if (primary_ch_phy_id == phy_ctx_id)
ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
else if (secondary_ch_phy_id == phy_ctx_id)
ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
/* else - default = TX TX disallowed */
rcu_read_unlock();
return ret;
}
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
switch (mvm->bt_force_ant_mode) {
case BT_FORCE_ANT_BT:
mode = BT_COEX_BT;
break;
case BT_FORCE_ANT_WIFI:
mode = BT_COEX_WIFI;
break;
default:
WARN_ON(1);
mode = 0;
}
bt_cmd.mode = cpu_to_le32(mode);
goto send_cmd;
}
bt_cmd.mode = cpu_to_le32(BT_COEX_NW);
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (iwl_mvm_is_mplut_supported(mvm))
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
bool enable)
{
struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
struct iwl_mvm_sta *mvmsta;
u32 value;
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
/* nothing to do */
if (mvmsta->bt_reduced_txpower == enable)
return 0;
value = mvmsta->deflink.sta_id;
if (enable)
value |= BT_REDUCED_TX_POWER_BIT;
IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
enable ? "en" : "dis", sta_id);
cmd.reduced_txp = cpu_to_le32(value);
mvmsta->bt_reduced_txpower = enable;
return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP,
CMD_ASYNC, sizeof(cmd), &cmd);
}
struct iwl_bt_iterator_data {
struct iwl_bt_coex_prof_old_notif *notif;
struct iwl_mvm *mvm;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
bool primary_ll;
u8 primary_load;
u8 secondary_load;
};
static inline
void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif_link_info *link_info,
bool enable, int rssi)
{
link_info->bf_data.last_bt_coex_event = rssi;
link_info->bf_data.bt_coex_max_thold =
enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
link_info->bf_data.bt_coex_min_thold =
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
#define MVM_COEX_TCM_PERIOD (HZ * 10)
static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
struct iwl_bt_iterator_data *data)
{
unsigned long now = jiffies;
if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
return;
mvm->bt_coex_last_tcm_ts = now;
/* We assume here that we don't have more than 2 vifs on 2.4GHz */
/* if the primary is low latency, it will stay primary */
if (data->primary_ll)
return;
if (data->primary_load >= data->secondary_load)
return;
swap(data->primary, data->secondary);
}
/*
* This function receives the LB link id and checks if eSR should be
* enabled or disabled (due to BT coex)
*/
bool
iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
s32 link_rssi,
bool primary)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool have_wifi_loss_rate =
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
BT_PROFILE_NOTIFICATION, 0) > 4 ||
iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
PROFILE_NOTIF, 0) >= 1;
u8 wifi_loss_mid_high_rssi;
u8 wifi_loss_low_rssi;
u8 wifi_loss_rate;
if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
PROFILE_NOTIF, 0) >= 1) {
/* For now, we consider 2.4 GHz band / ANT_A only */
wifi_loss_mid_high_rssi =
mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
wifi_loss_low_rssi =
mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
} else {
wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
}
if (wifi_loss_low_rssi == BT_OFF)
return true;
if (primary)
return false;
/* The feature is not supported */
if (!have_wifi_loss_rate)
return true;
/*
* In case we don't know the RSSI - take the lower wifi loss,
* so we will more likely enter eSR, and if RSSI is low -
* we will get an update on this and exit eSR.
*/
if (!link_rssi)
wifi_loss_rate = wifi_loss_mid_high_rssi;
else if (mvmvif->esr_active)
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
wifi_loss_low_rssi :
wifi_loss_mid_high_rssi;
else
/* ...And really high before we enable it back */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
wifi_loss_low_rssi :
wifi_loss_mid_high_rssi;
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
if (!ieee80211_vif_is_mld(vif) ||
!iwl_mvm_vif_from_mac80211(vif)->authorized ||
WARN_ON(!link))
return;
if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
(s8)link->beacon_stats.avg_signal,
link_id == iwl_mvm_get_primary_link(vif)))
/* In case we decided to exit eSR - stay with the primary */
iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
iwl_mvm_get_primary_link(vif));
}
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
unsigned int link_id)
{
/* default smps_mode is AUTOMATIC - only used for client modes */
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 bt_activity_grading, min_ag_for_static_smps;
struct ieee80211_chanctx_conf *chanctx_conf;
struct iwl_mvm_vif_link_info *link_info;
struct ieee80211_bss_conf *link_conf;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
link_info = mvmvif->link[link_id];
if (!link_info)
return;
link_conf = rcu_dereference(vif->link_conf[link_id]);
/* This can happen due to races: if we receive the notification
* and have the mutex held, while mac80211 is stuck on our mutex
* in the middle of removing the link.
*/
if (!link_conf)
return;
chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
if (vif->type == NL80211_IFTYPE_STATION) {
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode, link_id);
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false,
0);
}
return;
}
iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
min_ag_for_static_smps = BT_HIGH_TRAFFIC;
bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
if (bt_activity_grading >= min_ag_for_static_smps)
smps_mode = IEEE80211_SMPS_STATIC;
else if (bt_activity_grading >= BT_LOW_TRAFFIC)
smps_mode = IEEE80211_SMPS_DYNAMIC;
/* relax SMPS constraints for next association */
if (!vif->cfg.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (link_info->phy_ctxt &&
(mvm->last_bt_notif.rrc_status & BIT(link_info->phy_ctxt->id)))
smps_mode = IEEE80211_SMPS_AUTOMATIC;
IWL_DEBUG_COEX(data->mvm,
"mac %d link %d: bt_activity_grading %d smps_req %d\n",
mvmvif->id, link_info->fw_link_id,
bt_activity_grading, smps_mode);
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode, link_id);
/* low latency is always primary */
if (iwl_mvm_vif_low_latency(mvmvif)) {
data->primary_ll = true;
data->secondary = data->primary;
data->primary = chanctx_conf;
}
if (vif->type == NL80211_IFTYPE_AP) {
if (!mvmvif->ap_ibss_active)
return;
if (chanctx_conf == data->primary)
return;
if (!data->primary_ll) {
/*
* downgrade the current primary no matter what its
* type is.
*/
data->secondary = data->primary;
data->primary = chanctx_conf;
} else {
/* there is low latency vif - we will be secondary */
data->secondary = chanctx_conf;
}
/* FIXME: TCM load per interface? or need something per link? */
if (data->primary == chanctx_conf)
data->primary_load = mvm->tcm.result.load[mvmvif->id];
else if (data->secondary == chanctx_conf)
data->secondary_load = mvm->tcm.result.load[mvmvif->id];
return;
}
/*
* STA / P2P Client, try to be primary if first vif. If we are in low
* latency mode, we are already in primary and just don't do much
*/
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
/* FIXME: TCM load per interface? or need something per link? */
if (data->primary == chanctx_conf)
data->primary_load = mvm->tcm.result.load[mvmvif->id];
else if (data->secondary == chanctx_conf)
data->secondary_load = mvm->tcm.result.load[mvmvif->id];
/*
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
* BT is inactive
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
!vif->cfg.assoc) {
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0);
return;
}
/* try to get the avg rssi from fw */
ave_rssi = link_info->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
ave_rssi = -100;
if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
true))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
} else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
false))
IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi);
}
/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_bt_iterator_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
unsigned int link_id;
lockdep_assert_held(&mvm->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_AP:
if (!mvmvif->ap_ibss_active)
return;
break;
default:
return;
}
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
/* must be called under rcu_read_lock */
static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = _data;
lockdep_assert_held(&mvm->mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
for (int link_id = 0;
link_id < IEEE80211_MLD_MAX_NUM_LINKS;
link_id++) {
struct ieee80211_bss_conf *link_conf =
rcu_dereference_check(vif->link_conf[link_id],
lockdep_is_held(&mvm->mutex));
struct ieee80211_chanctx_conf *chanctx_conf =
rcu_dereference_check(link_conf->chanctx_conf,
lockdep_is_held(&mvm->mutex));
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
continue;
iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
}
}
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
.mvm = mvm,
.notif = &mvm->last_bt_notif,
};
struct iwl_bt_coex_ci_cmd cmd = {};
u8 ci_bw_idx;
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
return;
rcu_read_lock();
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rcu_read_unlock();
return;
}
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
struct ieee80211_chanctx_conf *chan = data.primary;
if (WARN_ON(!chan->def.chan)) {
rcu_read_unlock();
return;
}
if (chan->def.width < NL80211_CHAN_WIDTH_40) {
ci_bw_idx = 0;
} else {
if (chan->def.center_freq1 >
chan->def.chan->center_freq)
ci_bw_idx = 2;
else
ci_bw_idx = 1;
}
cmd.bt_primary_ci =
iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
cmd.primary_ch_phy_id =
cpu_to_le32(*((u16 *)data.primary->drv_priv));
}
if (data.secondary) {
struct ieee80211_chanctx_conf *chan = data.secondary;
if (WARN_ON(!data.secondary->def.chan)) {
rcu_read_unlock();
return;
}
if (chan->def.width < NL80211_CHAN_WIDTH_40) {
ci_bw_idx = 0;
} else {
if (chan->def.center_freq1 >
chan->def.chan->center_freq)
ci_bw_idx = 2;
else
ci_bw_idx = 1;
}
cmd.bt_secondary_ci =
iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
cmd.secondary_ch_phy_id =
cpu_to_le32(*((u16 *)data.secondary->drv_priv));
}
rcu_read_unlock();
/* Don't spam the fw with the same command over and over */
if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
}
}
void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data;
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
le32_to_cpu(notif->primary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
le32_to_cpu(notif->secondary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
le32_to_cpu(notif->bt_activity_grading));
/* remember this notification for future use: rssi fluctuations */
memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
}
void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
const struct iwl_rx_packet *pkt = rxb_addr(rxb);
const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
lockdep_assert_held(&mvm->mutex);
mvm->last_bt_wifi_loss = *notif;
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_coex_notif_iterator,
mvm);
}
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
return;
/*
* Rssi update while not associated - can happen since the statistics
* are handled asynchronously
*/
if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA)
return;
/* No BT - reports should be disabled */
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
return;
IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
/*
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
if (rssi_event == RSSI_EVENT_LOW ||
iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm,
mvmvif->deflink.ap_sta_id,
false);
else
ret = iwl_mvm_bt_coex_reduced_txp(mvm,
mvmvif->deflink.ap_sta_id,
true);
if (ret)
IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC)
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
/* tight coex, high bt traffic, reduce AGG time limit */
return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
}
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
return true;
if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC)
return true;
/*
* In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
* since BT is already killed.
* In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
* we Tx.
* When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
*/
lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
return lut_type != BT_COEX_LOOSE_LUT;
}
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
{
if (ant & mvm->cfg->non_shared_ant)
return true;
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum nl80211_band band)
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
if (band != NL80211_BAND_2GHZ)
return false;
return bt_activity >= BT_LOW_TRAFFIC;
}
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants)
{
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
(mvm->cfg->non_shared_ant & enabled_ants))
return mvm->cfg->non_shared_ant;
return first_antenna(enabled_ants);
}
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
__le16 fc = hdr->frame_control;
bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
if (info->band != NL80211_BAND_2GHZ)
return 0;
if (unlikely(mvm->bt_tx_prio))
return mvm->bt_tx_prio - 1;
if (likely(ieee80211_is_data(fc))) {
if (likely(ieee80211_is_data_qos(fc))) {
switch (ac) {
case IEEE80211_AC_BE:
return mplut_enabled ? 1 : 0;
case IEEE80211_AC_VI:
return mplut_enabled ? 2 : 3;
case IEEE80211_AC_VO:
return 3;
default:
return 0;
}
} else if (is_multicast_ether_addr(hdr->addr1)) {
return 3;
} else
return 0;
} else if (ieee80211_is_mgmt(fc)) {
return ieee80211_is_disassoc(fc) ? 0 : 3;
} else if (ieee80211_is_ctl(fc)) {
/* ignore cfend and cfendack frames as we never send those */
return 3;
}
return 0;
}
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
iwl_mvm_bt_coex_notif_handle(mvm);
}
|
/*
* Copyright (C) 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _sdma1_4_2_2_SH_MASK_HEADER
#define _sdma1_4_2_2_SH_MASK_HEADER
// addressBlock: sdma1_sdma1dec
//SDMA1_UCODE_ADDR
#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
//SDMA1_UCODE_DATA
#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
//SDMA1_VM_CNTL
#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
//SDMA1_VM_CTX_LO
#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_VM_CTX_HI
#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_ACTIVE_FCN_ID
#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
//SDMA1_VM_CTX_CNTL
#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
//SDMA1_VIRT_RESET_REQ
#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
//SDMA1_VF_ENABLE
#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0
#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
//SDMA1_CONTEXT_REG_TYPE0
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L
#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L
//SDMA1_CONTEXT_REG_TYPE1
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd
#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L
#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L
#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
//SDMA1_CONTEXT_REG_TYPE2
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9
#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L
#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L
#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
//SDMA1_CONTEXT_REG_TYPE3
#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
//SDMA1_PUB_REG_TYPE0
#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0
#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1
#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6
#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8
#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9
#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12
#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13
#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b
#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L
#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L
#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L
#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L
#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L
#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L
#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L
#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
//SDMA1_PUB_REG_TYPE1
#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0
#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2
#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7
#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa
#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc
#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd
#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f
#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L
#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L
#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L
#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L
#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L
#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L
#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L
//SDMA1_PUB_REG_TYPE2
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8
#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9
#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf
#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
#define SDMA1_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c
#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e
#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L
#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L
#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
#define SDMA1_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L
#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
//SDMA1_PUB_REG_TYPE3
#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x3
#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L
//SDMA1_MMHUB_CNTL
#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
//SDMA1_CONTEXT_GROUP_BOUNDARY
#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
//SDMA1_POWER_CNTL
#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
//SDMA1_CLK_CTRL
#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0
#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
//SDMA1_CNTL
#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1
#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
//SDMA1_CHICKEN_BITS
#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
//SDMA1_GB_ADDR_CONFIG
#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
//SDMA1_GB_ADDR_CONFIG_READ
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
//SDMA1_RB_RPTR_FETCH_HI
#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
//SDMA1_RB_RPTR_FETCH
#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
//SDMA1_IB_OFFSET_FETCH
#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
//SDMA1_PROGRAM
#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
//SDMA1_STATUS_REG
#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
//SDMA1_STATUS1_REG
#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf
#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L
#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
//SDMA1_RD_BURST_CNTL
#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
//SDMA1_HBM_PAGE_CONFIG
#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
//SDMA1_UCODE_CHECKSUM
#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
//SDMA1_F32_CNTL
#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
#define SDMA1_F32_CNTL__STEP__SHIFT 0x1
#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L
//SDMA1_FREEZE
#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
//SDMA1_PHASE0_QUANTUM
#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0
#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8
#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
//SDMA1_PHASE1_QUANTUM
#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0
#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8
#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
//SDMA1_EDC_CONFIG
#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
//SDMA1_BA_THRESHOLD
#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
//SDMA1_ID
#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
//SDMA1_VERSION
#define SDMA1_VERSION__MINVER__SHIFT 0x0
#define SDMA1_VERSION__MAJVER__SHIFT 0x8
#define SDMA1_VERSION__REV__SHIFT 0x10
#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
#define SDMA1_VERSION__REV_MASK 0x003F0000L
//SDMA1_EDC_COUNTER
#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0
#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16
#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17
#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18
#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L
#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L
#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L
#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L
#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L
//SDMA1_EDC_COUNTER_CLEAR
#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
//SDMA1_STATUS2_REG
#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
#define SDMA1_STATUS2_REG__ID_MASK 0x00000007L
#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
//SDMA1_ATOMIC_CNTL
#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
//SDMA1_ATOMIC_PREOP_LO
#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
//SDMA1_ATOMIC_PREOP_HI
#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
//SDMA1_UTCL1_CNTL
#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
//SDMA1_UTCL1_WATERMK
#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9
#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11
#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19
#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL
#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L
#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L
#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L
//SDMA1_UTCL1_RD_STATUS
#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
//SDMA1_UTCL1_WR_STATUS
#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
//SDMA1_UTCL1_INV0
#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
//SDMA1_UTCL1_INV1
#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
//SDMA1_UTCL1_INV2
#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
//SDMA1_UTCL1_RD_XNACK0
#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
//SDMA1_UTCL1_RD_XNACK1
#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
//SDMA1_UTCL1_WR_XNACK0
#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
//SDMA1_UTCL1_WR_XNACK1
#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
//SDMA1_UTCL1_TIMEOUT
#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
//SDMA1_UTCL1_PAGE
#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
//SDMA1_POWER_CNTL_IDLE
#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
//SDMA1_RELAX_ORDERING_LUT
#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
//SDMA1_CHICKEN_BITS_2
#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
//SDMA1_STATUS3_REG
#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
//SDMA1_PHYSICAL_ADDR_LO
#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
//SDMA1_PHYSICAL_ADDR_HI
#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
//SDMA1_PHASE2_QUANTUM
#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0
#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8
#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
//SDMA1_ERROR_LOG
#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
//SDMA1_PUB_DUMMY_REG0
#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
//SDMA1_PUB_DUMMY_REG1
#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
//SDMA1_PUB_DUMMY_REG2
#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
//SDMA1_PUB_DUMMY_REG3
#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
//SDMA1_F32_COUNTER
#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
//SDMA1_UNBREAKABLE
#define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0
#define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L
//SDMA1_PERFMON_CNTL
#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
//SDMA1_PERFCOUNTER0_RESULT
#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
//SDMA1_PERFCOUNTER1_RESULT
#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
//SDMA1_CRD_CNTL
#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
//SDMA1_GPU_IOV_VIOLATION_LOG
#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
//SDMA1_ULV_CNTL
#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0
#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
//SDMA1_EA_DBIT_ADDR_DATA
#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
//SDMA1_EA_DBIT_ADDR_INDEX
#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
//SDMA1_GPU_IOV_VIOLATION_LOG2
#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL
//SDMA1_GFX_RB_CNTL
#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_GFX_RB_BASE
#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_BASE_HI
#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_GFX_RB_RPTR
#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_RPTR_HI
#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_WPTR
#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_WPTR_HI
#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_WPTR_POLL_CNTL
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_GFX_RB_RPTR_ADDR_HI
#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_RPTR_ADDR_LO
#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_GFX_IB_CNTL
#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_GFX_IB_RPTR
#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_GFX_IB_OFFSET
#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_GFX_IB_BASE_LO
#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_GFX_IB_BASE_HI
#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_GFX_IB_SIZE
#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_GFX_SKIP_CNTL
#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_GFX_CONTEXT_STATUS
#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_GFX_DOORBELL
#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_GFX_CONTEXT_CNTL
#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
//SDMA1_GFX_STATUS
#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_GFX_DOORBELL_LOG
#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_GFX_WATERMARK
#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_GFX_DOORBELL_OFFSET
#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_GFX_CSA_ADDR_LO
#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_GFX_CSA_ADDR_HI
#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_GFX_IB_SUB_REMAIN
#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_GFX_PREEMPT
#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_GFX_DUMMY_REG
#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI
#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO
#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_GFX_RB_AQL_CNTL
#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_GFX_MINOR_PTR_UPDATE
#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_GFX_MIDCMD_DATA0
#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA1
#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA2
#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA3
#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA4
#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA5
#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA6
#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA7
#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_DATA8
#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_GFX_MIDCMD_CNTL
#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_PAGE_RB_CNTL
#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_PAGE_RB_BASE
#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_BASE_HI
#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_PAGE_RB_RPTR
#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_RPTR_HI
#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_WPTR
#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_WPTR_HI
#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_WPTR_POLL_CNTL
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_PAGE_RB_RPTR_ADDR_HI
#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_RPTR_ADDR_LO
#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_PAGE_IB_CNTL
#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_PAGE_IB_RPTR
#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_PAGE_IB_OFFSET
#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_PAGE_IB_BASE_LO
#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_PAGE_IB_BASE_HI
#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_PAGE_IB_SIZE
#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_PAGE_SKIP_CNTL
#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_PAGE_CONTEXT_STATUS
#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_PAGE_DOORBELL
#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_PAGE_STATUS
#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_PAGE_DOORBELL_LOG
#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_PAGE_WATERMARK
#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_PAGE_DOORBELL_OFFSET
#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_PAGE_CSA_ADDR_LO
#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_PAGE_CSA_ADDR_HI
#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_PAGE_IB_SUB_REMAIN
#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_PAGE_PREEMPT
#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_PAGE_DUMMY_REG
#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI
#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO
#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_PAGE_RB_AQL_CNTL
#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_PAGE_MINOR_PTR_UPDATE
#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_PAGE_MIDCMD_DATA0
#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA1
#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA2
#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA3
#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA4
#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA5
#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA6
#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA7
#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_DATA8
#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_PAGE_MIDCMD_CNTL
#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC0_RB_CNTL
#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC0_RB_BASE
#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_BASE_HI
#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC0_RB_RPTR
#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_RPTR_HI
#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_WPTR
#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_WPTR_HI
#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_WPTR_POLL_CNTL
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC0_RB_RPTR_ADDR_HI
#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_RPTR_ADDR_LO
#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC0_IB_CNTL
#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC0_IB_RPTR
#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC0_IB_OFFSET
#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC0_IB_BASE_LO
#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC0_IB_BASE_HI
#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC0_IB_SIZE
#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC0_SKIP_CNTL
#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC0_CONTEXT_STATUS
#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC0_DOORBELL
#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC0_STATUS
#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC0_DOORBELL_LOG
#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC0_WATERMARK
#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC0_DOORBELL_OFFSET
#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC0_CSA_ADDR_LO
#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC0_CSA_ADDR_HI
#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC0_IB_SUB_REMAIN
#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC0_PREEMPT
#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC0_DUMMY_REG
#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC0_RB_AQL_CNTL
#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC0_MINOR_PTR_UPDATE
#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC0_MIDCMD_DATA0
#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA1
#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA2
#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA3
#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA4
#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA5
#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA6
#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA7
#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_DATA8
#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC0_MIDCMD_CNTL
#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC1_RB_CNTL
#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC1_RB_BASE
#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_BASE_HI
#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC1_RB_RPTR
#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_RPTR_HI
#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_WPTR
#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_WPTR_HI
#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_WPTR_POLL_CNTL
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC1_RB_RPTR_ADDR_HI
#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_RPTR_ADDR_LO
#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC1_IB_CNTL
#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC1_IB_RPTR
#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC1_IB_OFFSET
#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC1_IB_BASE_LO
#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC1_IB_BASE_HI
#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC1_IB_SIZE
#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC1_SKIP_CNTL
#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC1_CONTEXT_STATUS
#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC1_DOORBELL
#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC1_STATUS
#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC1_DOORBELL_LOG
#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC1_WATERMARK
#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC1_DOORBELL_OFFSET
#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC1_CSA_ADDR_LO
#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC1_CSA_ADDR_HI
#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC1_IB_SUB_REMAIN
#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC1_PREEMPT
#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC1_DUMMY_REG
#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC1_RB_AQL_CNTL
#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC1_MINOR_PTR_UPDATE
#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC1_MIDCMD_DATA0
#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA1
#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA2
#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA3
#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA4
#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA5
#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA6
#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA7
#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_DATA8
#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC1_MIDCMD_CNTL
#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC2_RB_CNTL
#define SDMA1_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC2_RB_BASE
#define SDMA1_RLC2_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_BASE_HI
#define SDMA1_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC2_RB_RPTR
#define SDMA1_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_RPTR_HI
#define SDMA1_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_WPTR
#define SDMA1_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_WPTR_HI
#define SDMA1_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_WPTR_POLL_CNTL
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC2_RB_RPTR_ADDR_HI
#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_RPTR_ADDR_LO
#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC2_IB_CNTL
#define SDMA1_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC2_IB_RPTR
#define SDMA1_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC2_IB_OFFSET
#define SDMA1_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC2_IB_BASE_LO
#define SDMA1_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC2_IB_BASE_HI
#define SDMA1_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC2_IB_SIZE
#define SDMA1_RLC2_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC2_SKIP_CNTL
#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC2_CONTEXT_STATUS
#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC2_DOORBELL
#define SDMA1_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC2_STATUS
#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC2_DOORBELL_LOG
#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC2_WATERMARK
#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC2_DOORBELL_OFFSET
#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC2_CSA_ADDR_LO
#define SDMA1_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC2_CSA_ADDR_HI
#define SDMA1_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC2_IB_SUB_REMAIN
#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC2_PREEMPT
#define SDMA1_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC2_DUMMY_REG
#define SDMA1_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC2_RB_AQL_CNTL
#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC2_MINOR_PTR_UPDATE
#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC2_MIDCMD_DATA0
#define SDMA1_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA1
#define SDMA1_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA2
#define SDMA1_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA3
#define SDMA1_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA4
#define SDMA1_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA5
#define SDMA1_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA6
#define SDMA1_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA7
#define SDMA1_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_DATA8
#define SDMA1_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC2_MIDCMD_CNTL
#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC3_RB_CNTL
#define SDMA1_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC3_RB_BASE
#define SDMA1_RLC3_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_BASE_HI
#define SDMA1_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC3_RB_RPTR
#define SDMA1_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_RPTR_HI
#define SDMA1_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_WPTR
#define SDMA1_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_WPTR_HI
#define SDMA1_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_WPTR_POLL_CNTL
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC3_RB_RPTR_ADDR_HI
#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_RPTR_ADDR_LO
#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC3_IB_CNTL
#define SDMA1_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC3_IB_RPTR
#define SDMA1_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC3_IB_OFFSET
#define SDMA1_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC3_IB_BASE_LO
#define SDMA1_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC3_IB_BASE_HI
#define SDMA1_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC3_IB_SIZE
#define SDMA1_RLC3_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC3_SKIP_CNTL
#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC3_CONTEXT_STATUS
#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC3_DOORBELL
#define SDMA1_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC3_STATUS
#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC3_DOORBELL_LOG
#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC3_WATERMARK
#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC3_DOORBELL_OFFSET
#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC3_CSA_ADDR_LO
#define SDMA1_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC3_CSA_ADDR_HI
#define SDMA1_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC3_IB_SUB_REMAIN
#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC3_PREEMPT
#define SDMA1_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC3_DUMMY_REG
#define SDMA1_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC3_RB_AQL_CNTL
#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC3_MINOR_PTR_UPDATE
#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC3_MIDCMD_DATA0
#define SDMA1_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA1
#define SDMA1_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA2
#define SDMA1_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA3
#define SDMA1_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA4
#define SDMA1_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA5
#define SDMA1_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA6
#define SDMA1_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA7
#define SDMA1_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_DATA8
#define SDMA1_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC3_MIDCMD_CNTL
#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC4_RB_CNTL
#define SDMA1_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC4_RB_BASE
#define SDMA1_RLC4_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_BASE_HI
#define SDMA1_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC4_RB_RPTR
#define SDMA1_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_RPTR_HI
#define SDMA1_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_WPTR
#define SDMA1_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_WPTR_HI
#define SDMA1_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_WPTR_POLL_CNTL
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC4_RB_RPTR_ADDR_HI
#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_RPTR_ADDR_LO
#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC4_IB_CNTL
#define SDMA1_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC4_IB_RPTR
#define SDMA1_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC4_IB_OFFSET
#define SDMA1_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC4_IB_BASE_LO
#define SDMA1_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC4_IB_BASE_HI
#define SDMA1_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC4_IB_SIZE
#define SDMA1_RLC4_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC4_SKIP_CNTL
#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC4_CONTEXT_STATUS
#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC4_DOORBELL
#define SDMA1_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC4_STATUS
#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC4_DOORBELL_LOG
#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC4_WATERMARK
#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC4_DOORBELL_OFFSET
#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC4_CSA_ADDR_LO
#define SDMA1_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC4_CSA_ADDR_HI
#define SDMA1_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC4_IB_SUB_REMAIN
#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC4_PREEMPT
#define SDMA1_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC4_DUMMY_REG
#define SDMA1_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC4_RB_AQL_CNTL
#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC4_MINOR_PTR_UPDATE
#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC4_MIDCMD_DATA0
#define SDMA1_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA1
#define SDMA1_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA2
#define SDMA1_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA3
#define SDMA1_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA4
#define SDMA1_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA5
#define SDMA1_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA6
#define SDMA1_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA7
#define SDMA1_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_DATA8
#define SDMA1_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC4_MIDCMD_CNTL
#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC5_RB_CNTL
#define SDMA1_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC5_RB_BASE
#define SDMA1_RLC5_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_BASE_HI
#define SDMA1_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC5_RB_RPTR
#define SDMA1_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_RPTR_HI
#define SDMA1_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_WPTR
#define SDMA1_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_WPTR_HI
#define SDMA1_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_WPTR_POLL_CNTL
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC5_RB_RPTR_ADDR_HI
#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_RPTR_ADDR_LO
#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC5_IB_CNTL
#define SDMA1_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC5_IB_RPTR
#define SDMA1_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC5_IB_OFFSET
#define SDMA1_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC5_IB_BASE_LO
#define SDMA1_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC5_IB_BASE_HI
#define SDMA1_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC5_IB_SIZE
#define SDMA1_RLC5_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC5_SKIP_CNTL
#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC5_CONTEXT_STATUS
#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC5_DOORBELL
#define SDMA1_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC5_STATUS
#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC5_DOORBELL_LOG
#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC5_WATERMARK
#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC5_DOORBELL_OFFSET
#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC5_CSA_ADDR_LO
#define SDMA1_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC5_CSA_ADDR_HI
#define SDMA1_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC5_IB_SUB_REMAIN
#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC5_PREEMPT
#define SDMA1_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC5_DUMMY_REG
#define SDMA1_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC5_RB_AQL_CNTL
#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC5_MINOR_PTR_UPDATE
#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC5_MIDCMD_DATA0
#define SDMA1_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA1
#define SDMA1_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA2
#define SDMA1_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA3
#define SDMA1_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA4
#define SDMA1_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA5
#define SDMA1_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA6
#define SDMA1_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA7
#define SDMA1_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_DATA8
#define SDMA1_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC5_MIDCMD_CNTL
#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC6_RB_CNTL
#define SDMA1_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC6_RB_BASE
#define SDMA1_RLC6_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_BASE_HI
#define SDMA1_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC6_RB_RPTR
#define SDMA1_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_RPTR_HI
#define SDMA1_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_WPTR
#define SDMA1_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_WPTR_HI
#define SDMA1_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_WPTR_POLL_CNTL
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC6_RB_RPTR_ADDR_HI
#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_RPTR_ADDR_LO
#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC6_IB_CNTL
#define SDMA1_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC6_IB_RPTR
#define SDMA1_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC6_IB_OFFSET
#define SDMA1_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC6_IB_BASE_LO
#define SDMA1_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC6_IB_BASE_HI
#define SDMA1_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC6_IB_SIZE
#define SDMA1_RLC6_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC6_SKIP_CNTL
#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC6_CONTEXT_STATUS
#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC6_DOORBELL
#define SDMA1_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC6_STATUS
#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC6_DOORBELL_LOG
#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC6_WATERMARK
#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC6_DOORBELL_OFFSET
#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC6_CSA_ADDR_LO
#define SDMA1_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC6_CSA_ADDR_HI
#define SDMA1_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC6_IB_SUB_REMAIN
#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC6_PREEMPT
#define SDMA1_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC6_DUMMY_REG
#define SDMA1_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC6_RB_AQL_CNTL
#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC6_MINOR_PTR_UPDATE
#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC6_MIDCMD_DATA0
#define SDMA1_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA1
#define SDMA1_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA2
#define SDMA1_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA3
#define SDMA1_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA4
#define SDMA1_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA5
#define SDMA1_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA6
#define SDMA1_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA7
#define SDMA1_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_DATA8
#define SDMA1_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC6_MIDCMD_CNTL
#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
//SDMA1_RLC7_RB_CNTL
#define SDMA1_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
#define SDMA1_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
#define SDMA1_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
#define SDMA1_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
#define SDMA1_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
#define SDMA1_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
#define SDMA1_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
//SDMA1_RLC7_RB_BASE
#define SDMA1_RLC7_RB_BASE__ADDR__SHIFT 0x0
#define SDMA1_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_BASE_HI
#define SDMA1_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
//SDMA1_RLC7_RB_RPTR
#define SDMA1_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_RPTR_HI
#define SDMA1_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_WPTR
#define SDMA1_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
#define SDMA1_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_WPTR_HI
#define SDMA1_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
#define SDMA1_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_WPTR_POLL_CNTL
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
//SDMA1_RLC7_RB_RPTR_ADDR_HI
#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_RPTR_ADDR_LO
#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC7_IB_CNTL
#define SDMA1_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
#define SDMA1_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
#define SDMA1_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
#define SDMA1_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
//SDMA1_RLC7_IB_RPTR
#define SDMA1_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
#define SDMA1_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC7_IB_OFFSET
#define SDMA1_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
//SDMA1_RLC7_IB_BASE_LO
#define SDMA1_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
#define SDMA1_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
//SDMA1_RLC7_IB_BASE_HI
#define SDMA1_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC7_IB_SIZE
#define SDMA1_RLC7_IB_SIZE__SIZE__SHIFT 0x0
#define SDMA1_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC7_SKIP_CNTL
#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
//SDMA1_RLC7_CONTEXT_STATUS
#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
#define SDMA1_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
#define SDMA1_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
//SDMA1_RLC7_DOORBELL
#define SDMA1_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
#define SDMA1_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
#define SDMA1_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
#define SDMA1_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
//SDMA1_RLC7_STATUS
#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
//SDMA1_RLC7_DOORBELL_LOG
#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
#define SDMA1_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
#define SDMA1_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
//SDMA1_RLC7_WATERMARK
#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
//SDMA1_RLC7_DOORBELL_OFFSET
#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
//SDMA1_RLC7_CSA_ADDR_LO
#define SDMA1_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC7_CSA_ADDR_HI
#define SDMA1_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC7_IB_SUB_REMAIN
#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
//SDMA1_RLC7_PREEMPT
#define SDMA1_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
#define SDMA1_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
//SDMA1_RLC7_DUMMY_REG
#define SDMA1_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
#define SDMA1_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI
#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
//SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO
#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
//SDMA1_RLC7_RB_AQL_CNTL
#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
//SDMA1_RLC7_MINOR_PTR_UPDATE
#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
//SDMA1_RLC7_MIDCMD_DATA0
#define SDMA1_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA1
#define SDMA1_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA2
#define SDMA1_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA3
#define SDMA1_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA4
#define SDMA1_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA5
#define SDMA1_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA6
#define SDMA1_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA7
#define SDMA1_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_DATA8
#define SDMA1_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
//SDMA1_RLC7_MIDCMD_CNTL
#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
* Copyright (c) 2011 Jiri Pirko <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/if_team.h>
struct rr_priv {
unsigned int sent_packets;
};
static struct rr_priv *rr_priv(struct team *team)
{
return (struct rr_priv *) &team->mode_priv;
}
static bool rr_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
port_index = team_num_to_port_index(team,
rr_priv(team)->sent_packets++);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
return false;
return true;
drop:
dev_kfree_skb_any(skb);
return false;
}
static const struct team_mode_ops rr_mode_ops = {
.transmit = rr_transmit,
.port_enter = team_modeop_port_enter,
.port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode rr_mode = {
.kind = "roundrobin",
.owner = THIS_MODULE,
.priv_size = sizeof(struct rr_priv),
.ops = &rr_mode_ops,
.lag_tx_type = NETDEV_LAG_TX_TYPE_ROUNDROBIN,
};
static int __init rr_init_module(void)
{
return team_mode_register(&rr_mode);
}
static void __exit rr_cleanup_module(void)
{
team_mode_unregister(&rr_mode);
}
module_init(rr_init_module);
module_exit(rr_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <[email protected]>");
MODULE_DESCRIPTION("Round-robin mode for team");
MODULE_ALIAS_TEAM_MODE("roundrobin");
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#ifndef __LOONGSON_MODULE_H__
#define __LOONGSON_MODULE_H__
extern int loongson_vblank;
extern struct pci_driver lsdc_pci_driver;
#endif
|
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <stdint.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <linux/perf_event.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "perf-sys.h"
#include "trace_helpers.h"
static struct bpf_program *progs[2];
static struct bpf_link *links[2];
#define CHECK_PERROR_RET(condition) ({ \
int __ret = !!(condition); \
if (__ret) { \
printf("FAIL: %s:\n", __func__); \
perror(" "); \
return -1; \
} \
})
#define CHECK_AND_RET(condition) ({ \
int __ret = !!(condition); \
if (__ret) \
return -1; \
})
static __u64 ptr_to_u64(void *ptr)
{
return (__u64) (unsigned long) ptr;
}
#define PMU_TYPE_FILE "/sys/bus/event_source/devices/%s/type"
static int bpf_find_probe_type(const char *event_type)
{
char buf[256];
int fd, ret;
ret = snprintf(buf, sizeof(buf), PMU_TYPE_FILE, event_type);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
fd = open(buf, O_RDONLY);
CHECK_PERROR_RET(fd < 0);
ret = read(fd, buf, sizeof(buf));
close(fd);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
errno = 0;
ret = (int)strtol(buf, NULL, 10);
CHECK_PERROR_RET(errno);
return ret;
}
#define PMU_RETPROBE_FILE "/sys/bus/event_source/devices/%s/format/retprobe"
static int bpf_get_retprobe_bit(const char *event_type)
{
char buf[256];
int fd, ret;
ret = snprintf(buf, sizeof(buf), PMU_RETPROBE_FILE, event_type);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
fd = open(buf, O_RDONLY);
CHECK_PERROR_RET(fd < 0);
ret = read(fd, buf, sizeof(buf));
close(fd);
CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
CHECK_PERROR_RET(strlen(buf) < strlen("config:"));
errno = 0;
ret = (int)strtol(buf + strlen("config:"), NULL, 10);
CHECK_PERROR_RET(errno);
return ret;
}
static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
__u32 expected_fd_type)
{
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
int err, event_fd;
char buf[256];
len = sizeof(buf);
event_fd = bpf_link__fd(links[link_idx]);
err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
__func__, link_idx, fn_name);
perror(" :");
return -1;
}
if (strcmp(buf, fn_name) != 0 ||
fd_type != expected_fd_type ||
probe_offset != 0x0 || probe_addr != 0x0) {
printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
link_idx);
printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
" probe_addr: 0x%llx\n",
buf, fd_type, probe_offset, probe_addr);
return -1;
}
return 0;
}
static int test_nondebug_fs_kuprobe_common(const char *event_type,
const char *name, __u64 offset, __u64 addr, bool is_return,
char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr)
{
int is_return_bit = bpf_get_retprobe_bit(event_type);
int type = bpf_find_probe_type(event_type);
struct perf_event_attr attr = {};
struct bpf_link *link;
int fd, err = -1;
if (type < 0 || is_return_bit < 0) {
printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
__func__, type, is_return_bit);
return err;
}
attr.sample_period = 1;
attr.wakeup_events = 1;
if (is_return)
attr.config |= 1 << is_return_bit;
if (name) {
attr.config1 = ptr_to_u64((void *)name);
attr.config2 = offset;
} else {
attr.config1 = 0;
attr.config2 = addr;
}
attr.size = sizeof(attr);
attr.type = type;
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
link = bpf_program__attach_perf_event(progs[0], fd);
if (libbpf_get_error(link)) {
printf("ERROR: bpf_program__attach_perf_event failed\n");
link = NULL;
close(fd);
goto cleanup;
}
CHECK_PERROR_RET(bpf_task_fd_query(getpid(), fd, 0, buf, buf_len,
prog_id, fd_type, probe_offset, probe_addr) < 0);
err = 0;
cleanup:
bpf_link__destroy(link);
return err;
}
static int test_nondebug_fs_probe(const char *event_type, const char *name,
__u64 offset, __u64 addr, bool is_return,
__u32 expected_fd_type,
__u32 expected_ret_fd_type,
char *buf, __u32 buf_len)
{
__u64 probe_offset, probe_addr;
__u32 prog_id, fd_type;
int err;
err = test_nondebug_fs_kuprobe_common(event_type, name,
offset, addr, is_return,
buf, &buf_len, &prog_id,
&fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, "
"for name %s, offset 0x%llx, addr 0x%llx, is_return %d\n",
__func__, name ? name : "", offset, addr, is_return);
perror(" :");
return -1;
}
if ((is_return && fd_type != expected_ret_fd_type) ||
(!is_return && fd_type != expected_fd_type)) {
printf("FAIL: %s, incorrect fd_type %u\n",
__func__, fd_type);
return -1;
}
if (name) {
if (strcmp(name, buf) != 0) {
printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
return -1;
}
if (probe_offset != offset) {
printf("FAIL: %s, incorrect probe_offset 0x%llx\n",
__func__, probe_offset);
return -1;
}
} else {
if (buf_len != 0) {
printf("FAIL: %s, incorrect buf %p\n",
__func__, buf);
return -1;
}
if (probe_addr != addr) {
printf("FAIL: %s, incorrect probe_addr 0x%llx\n",
__func__, probe_addr);
return -1;
}
}
return 0;
}
static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
{
char buf[256], event_alias[sizeof("test_1234567890")];
const char *event_type = "uprobe";
struct perf_event_attr attr = {};
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
int err = -1, res, kfd, efd;
struct bpf_link *link;
ssize_t bytes;
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/%s_events",
event_type);
kfd = open(buf, O_WRONLY | O_TRUNC, 0);
CHECK_PERROR_RET(kfd < 0);
res = snprintf(event_alias, sizeof(event_alias), "test_%d", getpid());
CHECK_PERROR_RET(res < 0 || res >= sizeof(event_alias));
res = snprintf(buf, sizeof(buf), "%c:%ss/%s %s:0x%lx",
is_return ? 'r' : 'p', event_type, event_alias,
binary_path, offset);
CHECK_PERROR_RET(res < 0 || res >= sizeof(buf));
CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0);
close(kfd);
kfd = -1;
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/events/%ss/%s/id",
event_type, event_alias);
efd = open(buf, O_RDONLY, 0);
CHECK_PERROR_RET(efd < 0);
bytes = read(efd, buf, sizeof(buf));
CHECK_PERROR_RET(bytes <= 0 || bytes >= sizeof(buf));
close(efd);
buf[bytes] = '\0';
attr.config = strtol(buf, NULL, 0);
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_period = 1;
attr.wakeup_events = 1;
kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
link = bpf_program__attach_perf_event(progs[0], kfd);
if (libbpf_get_error(link)) {
printf("ERROR: bpf_program__attach_perf_event failed\n");
link = NULL;
close(kfd);
goto cleanup;
}
len = sizeof(buf);
err = bpf_task_fd_query(getpid(), kfd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, binary_path %s\n", __func__, binary_path);
perror(" :");
return -1;
}
if ((is_return && fd_type != BPF_FD_TYPE_URETPROBE) ||
(!is_return && fd_type != BPF_FD_TYPE_UPROBE)) {
printf("FAIL: %s, incorrect fd_type %u\n", __func__,
fd_type);
return -1;
}
if (strcmp(binary_path, buf) != 0) {
printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
return -1;
}
if (probe_offset != offset) {
printf("FAIL: %s, incorrect probe_offset 0x%llx\n", __func__,
probe_offset);
return -1;
}
err = 0;
cleanup:
bpf_link__destroy(link);
return err;
}
int main(int argc, char **argv)
{
extern char __executable_start;
char filename[256], buf[256];
__u64 uprobe_file_offset;
struct bpf_program *prog;
struct bpf_object *obj;
int i = 0, err = -1;
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
return err;
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
return err;
}
/* load BPF program */
if (bpf_object__load(obj)) {
fprintf(stderr, "ERROR: loading BPF object file failed\n");
goto cleanup;
}
bpf_object__for_each_program(prog, obj) {
progs[i] = prog;
links[i] = bpf_program__attach(progs[i]);
if (libbpf_get_error(links[i])) {
fprintf(stderr, "ERROR: bpf_program__attach failed\n");
links[i] = NULL;
goto cleanup;
}
i++;
}
/* test two functions in the corresponding *_kern.c file */
CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
BPF_FD_TYPE_KPROBE));
CHECK_AND_RET(test_debug_fs_kprobe(1, "__blk_account_io_done",
BPF_FD_TYPE_KRETPROBE));
/* test nondebug fs kprobe */
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
false, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
#ifdef __x86_64__
/* set a kprobe on "bpf_check + 0x5", which is x64 specific */
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x5, 0x0,
false, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
#endif
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
true, BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), false,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), false,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
NULL, 0));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), true,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
ksym_get_addr("bpf_check"), true,
BPF_FD_TYPE_KPROBE,
BPF_FD_TYPE_KRETPROBE,
0, 0));
/* test nondebug fs uprobe */
/* the calculation of uprobe file offset is based on gcc 7.3.1 on x64
* and the default linker script, which defines __executable_start as
* the start of the .text section. The calculation could be different
* on different systems with different compilers. The right way is
* to parse the ELF file. We took a shortcut here.
*/
uprobe_file_offset = (unsigned long)main - (unsigned long)&__executable_start;
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, false,
BPF_FD_TYPE_UPROBE,
BPF_FD_TYPE_URETPROBE,
buf, sizeof(buf)));
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, true,
BPF_FD_TYPE_UPROBE,
BPF_FD_TYPE_URETPROBE,
buf, sizeof(buf)));
/* test debug fs uprobe */
CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
false));
CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
true));
err = 0;
cleanup:
for (i--; i >= 0; i--)
bpf_link__destroy(links[i]);
bpf_object__close(obj);
return err;
}
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/jack.h>
#include <sound/pcm_params.h>
#include <sound/pcm.h>
#include <sound/soc-dapm.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include "wcd-clsh-v2.h"
#include "wcd-mbhc-v2.h"
#include "wcd937x.h"
enum {
CHIPID_WCD9370 = 0,
CHIPID_WCD9375 = 5,
};
/* Z value defined in milliohm */
#define WCD937X_ZDET_VAL_32 (32000)
#define WCD937X_ZDET_VAL_400 (400000)
#define WCD937X_ZDET_VAL_1200 (1200000)
#define WCD937X_ZDET_VAL_100K (100000000)
/* Z floating defined in ohms */
#define WCD937X_ZDET_FLOATING_IMPEDANCE (0x0FFFFFFE)
#define WCD937X_ZDET_NUM_MEASUREMENTS (900)
#define WCD937X_MBHC_GET_C1(c) (((c) & 0xC000) >> 14)
#define WCD937X_MBHC_GET_X1(x) ((x) & 0x3FFF)
/* Z value compared in milliOhm */
#define WCD937X_MBHC_IS_SECOND_RAMP_REQUIRED(z) (((z) > 400000) || ((z) < 32000))
#define WCD937X_MBHC_ZDET_CONST (86 * 16384)
#define WCD937X_MBHC_MOISTURE_RREF R_24_KOHM
#define WCD_MBHC_HS_V_MAX 1600
#define EAR_RX_PATH_AUX 1
#define WCD937X_MBHC_MAX_BUTTONS 8
#define WCD937X_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000 |\
SNDRV_PCM_RATE_384000)
/* Fractional Rates */
#define WCD937X_FRAC_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |\
SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_352800)
#define WCD937X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
enum {
ALLOW_BUCK_DISABLE,
HPH_COMP_DELAY,
HPH_PA_DELAY,
AMIC2_BCS_ENABLE,
};
enum {
AIF1_PB = 0,
AIF1_CAP,
NUM_CODEC_DAIS,
};
struct wcd937x_priv {
struct sdw_slave *tx_sdw_dev;
struct wcd937x_sdw_priv *sdw_priv[NUM_CODEC_DAIS];
struct device *txdev;
struct device *rxdev;
struct device_node *rxnode;
struct device_node *txnode;
struct regmap *regmap;
/* micb setup lock */
struct mutex micb_lock;
/* mbhc module */
struct wcd_mbhc *wcd_mbhc;
struct wcd_mbhc_config mbhc_cfg;
struct wcd_mbhc_intr intr_ids;
struct wcd_clsh_ctrl *clsh_info;
struct irq_domain *virq;
struct regmap_irq_chip *wcd_regmap_irq_chip;
struct regmap_irq_chip_data *irq_chip;
struct regulator_bulk_data supplies[WCD937X_MAX_BULK_SUPPLY];
struct regulator *buck_supply;
struct snd_soc_jack *jack;
unsigned long status_mask;
s32 micb_ref[WCD937X_MAX_MICBIAS];
s32 pullup_ref[WCD937X_MAX_MICBIAS];
u32 hph_mode;
int ear_rx_path;
u32 micb1_mv;
u32 micb2_mv;
u32 micb3_mv;
int hphr_pdm_wd_int;
int hphl_pdm_wd_int;
int aux_pdm_wd_int;
bool comp1_enable;
bool comp2_enable;
struct gpio_desc *us_euro_gpio;
struct gpio_desc *reset_gpio;
atomic_t rx_clk_cnt;
atomic_t ana_clk_count;
};
static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
struct wcd937x_mbhc_zdet_param {
u16 ldo_ctl;
u16 noff;
u16 nshift;
u16 btn5;
u16 btn6;
u16 btn7;
};
static const struct wcd_mbhc_field wcd_mbhc_fields[WCD_MBHC_REG_FUNC_MAX] = {
WCD_MBHC_FIELD(WCD_MBHC_L_DET_EN, WCD937X_ANA_MBHC_MECH, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_GND_DET_EN, WCD937X_ANA_MBHC_MECH, 0x40),
WCD_MBHC_FIELD(WCD_MBHC_MECH_DETECTION_TYPE, WCD937X_ANA_MBHC_MECH, 0x20),
WCD_MBHC_FIELD(WCD_MBHC_MIC_CLAMP_CTL, WCD937X_MBHC_NEW_PLUG_DETECT_CTL, 0x30),
WCD_MBHC_FIELD(WCD_MBHC_ELECT_DETECTION_TYPE, WCD937X_ANA_MBHC_ELECT, 0x08),
WCD_MBHC_FIELD(WCD_MBHC_HS_L_DET_PULL_UP_CTRL, WCD937X_MBHC_NEW_INT_MECH_DET_CURRENT, 0x1F),
WCD_MBHC_FIELD(WCD_MBHC_HS_L_DET_PULL_UP_COMP_CTRL, WCD937X_ANA_MBHC_MECH, 0x04),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_PLUG_TYPE, WCD937X_ANA_MBHC_MECH, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_GND_PLUG_TYPE, WCD937X_ANA_MBHC_MECH, 0x08),
WCD_MBHC_FIELD(WCD_MBHC_SW_HPH_LP_100K_TO_GND, WCD937X_ANA_MBHC_MECH, 0x01),
WCD_MBHC_FIELD(WCD_MBHC_ELECT_SCHMT_ISRC, WCD937X_ANA_MBHC_ELECT, 0x06),
WCD_MBHC_FIELD(WCD_MBHC_FSM_EN, WCD937X_ANA_MBHC_ELECT, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_INSREM_DBNC, WCD937X_MBHC_NEW_PLUG_DETECT_CTL, 0x0F),
WCD_MBHC_FIELD(WCD_MBHC_BTN_DBNC, WCD937X_MBHC_NEW_CTL_1, 0x03),
WCD_MBHC_FIELD(WCD_MBHC_HS_VREF, WCD937X_MBHC_NEW_CTL_2, 0x03),
WCD_MBHC_FIELD(WCD_MBHC_HS_COMP_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0x08),
WCD_MBHC_FIELD(WCD_MBHC_IN2P_CLAMP_STATE, WCD937X_ANA_MBHC_RESULT_3, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_MIC_SCHMT_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0x20),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_SCHMT_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_HPHR_SCHMT_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0x40),
WCD_MBHC_FIELD(WCD_MBHC_OCP_FSM_EN, WCD937X_HPH_OCP_CTL, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_BTN_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0x07),
WCD_MBHC_FIELD(WCD_MBHC_BTN_ISRC_CTL, WCD937X_ANA_MBHC_ELECT, 0x70),
WCD_MBHC_FIELD(WCD_MBHC_ELECT_RESULT, WCD937X_ANA_MBHC_RESULT_3, 0xFF),
WCD_MBHC_FIELD(WCD_MBHC_MICB_CTRL, WCD937X_ANA_MICB2, 0xC0),
WCD_MBHC_FIELD(WCD_MBHC_HPH_CNP_WG_TIME, WCD937X_HPH_CNP_WG_TIME, 0xFF),
WCD_MBHC_FIELD(WCD_MBHC_HPHR_PA_EN, WCD937X_ANA_HPH, 0x40),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_PA_EN, WCD937X_ANA_HPH, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_HPH_PA_EN, WCD937X_ANA_HPH, 0xC0),
WCD_MBHC_FIELD(WCD_MBHC_SWCH_LEVEL_REMOVE, WCD937X_ANA_MBHC_RESULT_3, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_ANC_DET_EN, WCD937X_MBHC_CTL_BCS, 0x02),
WCD_MBHC_FIELD(WCD_MBHC_FSM_STATUS, WCD937X_MBHC_NEW_FSM_STATUS, 0x01),
WCD_MBHC_FIELD(WCD_MBHC_MUX_CTL, WCD937X_MBHC_NEW_CTL_2, 0x70),
WCD_MBHC_FIELD(WCD_MBHC_MOISTURE_STATUS, WCD937X_MBHC_NEW_FSM_STATUS, 0x20),
WCD_MBHC_FIELD(WCD_MBHC_HPHR_GND, WCD937X_HPH_PA_CTL2, 0x40),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_GND, WCD937X_HPH_PA_CTL2, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_OCP_DET_EN, WCD937X_HPH_L_TEST, 0x01),
WCD_MBHC_FIELD(WCD_MBHC_HPHR_OCP_DET_EN, WCD937X_HPH_R_TEST, 0x01),
WCD_MBHC_FIELD(WCD_MBHC_HPHL_OCP_STATUS, WCD937X_DIGITAL_INTR_STATUS_0, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_HPHR_OCP_STATUS, WCD937X_DIGITAL_INTR_STATUS_0, 0x20),
WCD_MBHC_FIELD(WCD_MBHC_ADC_EN, WCD937X_MBHC_NEW_CTL_1, 0x08),
WCD_MBHC_FIELD(WCD_MBHC_ADC_COMPLETE, WCD937X_MBHC_NEW_FSM_STATUS, 0x40),
WCD_MBHC_FIELD(WCD_MBHC_ADC_TIMEOUT, WCD937X_MBHC_NEW_FSM_STATUS, 0x80),
WCD_MBHC_FIELD(WCD_MBHC_ADC_RESULT, WCD937X_MBHC_NEW_ADC_RESULT, 0xFF),
WCD_MBHC_FIELD(WCD_MBHC_MICB2_VOUT, WCD937X_ANA_MICB2, 0x3F),
WCD_MBHC_FIELD(WCD_MBHC_ADC_MODE, WCD937X_MBHC_NEW_CTL_1, 0x10),
WCD_MBHC_FIELD(WCD_MBHC_DETECTION_DONE, WCD937X_MBHC_NEW_CTL_1, 0x04),
WCD_MBHC_FIELD(WCD_MBHC_ELECT_ISRC_EN, WCD937X_ANA_MBHC_ZDET, 0x02),
};
static const struct regmap_irq wcd937x_irqs[WCD937X_NUM_IRQS] = {
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_BUTTON_PRESS_DET, 0, BIT(0)),
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_BUTTON_RELEASE_DET, 0, BIT(1)),
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_ELECT_INS_REM_DET, 0, BIT(2)),
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, 0, BIT(3)),
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_SW_DET, 0, BIT(4)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHR_OCP_INT, 0, BIT(5)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHR_CNP_INT, 0, BIT(6)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHL_OCP_INT, 0, BIT(7)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHL_CNP_INT, 1, BIT(0)),
REGMAP_IRQ_REG(WCD937X_IRQ_EAR_CNP_INT, 1, BIT(1)),
REGMAP_IRQ_REG(WCD937X_IRQ_EAR_SCD_INT, 1, BIT(2)),
REGMAP_IRQ_REG(WCD937X_IRQ_AUX_CNP_INT, 1, BIT(3)),
REGMAP_IRQ_REG(WCD937X_IRQ_AUX_SCD_INT, 1, BIT(4)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHL_PDM_WD_INT, 1, BIT(5)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHR_PDM_WD_INT, 1, BIT(6)),
REGMAP_IRQ_REG(WCD937X_IRQ_AUX_PDM_WD_INT, 1, BIT(7)),
REGMAP_IRQ_REG(WCD937X_IRQ_LDORT_SCD_INT, 2, BIT(0)),
REGMAP_IRQ_REG(WCD937X_IRQ_MBHC_MOISTURE_INT, 2, BIT(1)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHL_SURGE_DET_INT, 2, BIT(2)),
REGMAP_IRQ_REG(WCD937X_IRQ_HPHR_SURGE_DET_INT, 2, BIT(3)),
};
static int wcd937x_handle_post_irq(void *data)
{
struct wcd937x_priv *wcd937x;
if (data)
wcd937x = (struct wcd937x_priv *)data;
else
return IRQ_HANDLED;
regmap_write(wcd937x->regmap, WCD937X_DIGITAL_INTR_CLEAR_0, 0);
regmap_write(wcd937x->regmap, WCD937X_DIGITAL_INTR_CLEAR_1, 0);
regmap_write(wcd937x->regmap, WCD937X_DIGITAL_INTR_CLEAR_2, 0);
return IRQ_HANDLED;
}
static const u32 wcd937x_config_regs[] = {
WCD937X_DIGITAL_INTR_LEVEL_0,
};
static const struct regmap_irq_chip wcd937x_regmap_irq_chip = {
.name = "wcd937x",
.irqs = wcd937x_irqs,
.num_irqs = ARRAY_SIZE(wcd937x_irqs),
.num_regs = 3,
.status_base = WCD937X_DIGITAL_INTR_STATUS_0,
.mask_base = WCD937X_DIGITAL_INTR_MASK_0,
.ack_base = WCD937X_DIGITAL_INTR_CLEAR_0,
.use_ack = 1,
.clear_ack = 1,
.config_base = wcd937x_config_regs,
.num_config_bases = ARRAY_SIZE(wcd937x_config_regs),
.num_config_regs = 1,
.runtime_pm = true,
.handle_post_irq = wcd937x_handle_post_irq,
.irq_drv_data = NULL,
};
static void wcd937x_reset(struct wcd937x_priv *wcd937x)
{
gpiod_set_value(wcd937x->reset_gpio, 1);
usleep_range(20, 30);
gpiod_set_value(wcd937x->reset_gpio, 0);
usleep_range(20, 30);
}
static void wcd937x_io_init(struct regmap *regmap)
{
u32 val = 0, temp = 0, temp1 = 0;
regmap_read(regmap, WCD937X_DIGITAL_EFUSE_REG_29, &val);
val = val & 0x0F;
regmap_read(regmap, WCD937X_DIGITAL_EFUSE_REG_16, &temp);
regmap_read(regmap, WCD937X_DIGITAL_EFUSE_REG_17, &temp1);
if (temp == 0x02 || temp1 > 0x09)
regmap_update_bits(regmap, WCD937X_SLEEP_CTL, 0x0E, val);
else
regmap_update_bits(regmap, WCD937X_SLEEP_CTL, 0x0e, 0x0e);
regmap_update_bits(regmap, WCD937X_SLEEP_CTL, 0x80, 0x80);
usleep_range(1000, 1010);
regmap_update_bits(regmap, WCD937X_SLEEP_CTL, 0x40, 0x40);
usleep_range(1000, 1010);
regmap_update_bits(regmap, WCD937X_LDORXTX_CONFIG, BIT(4), 0x00);
regmap_update_bits(regmap, WCD937X_BIAS_VBG_FINE_ADJ, 0xf0, BIT(7));
regmap_update_bits(regmap, WCD937X_ANA_BIAS, BIT(7), BIT(7));
regmap_update_bits(regmap, WCD937X_ANA_BIAS, BIT(6), BIT(6));
usleep_range(10000, 10010);
regmap_update_bits(regmap, WCD937X_ANA_BIAS, BIT(6), 0x00);
regmap_update_bits(regmap, WCD937X_HPH_SURGE_HPHLR_SURGE_EN, 0xff, 0xd9);
regmap_update_bits(regmap, WCD937X_MICB1_TEST_CTL_1, 0xff, 0xfa);
regmap_update_bits(regmap, WCD937X_MICB2_TEST_CTL_1, 0xff, 0xfa);
regmap_update_bits(regmap, WCD937X_MICB3_TEST_CTL_1, 0xff, 0xfa);
regmap_update_bits(regmap, WCD937X_MICB1_TEST_CTL_2, 0x38, 0x00);
regmap_update_bits(regmap, WCD937X_MICB2_TEST_CTL_2, 0x38, 0x00);
regmap_update_bits(regmap, WCD937X_MICB3_TEST_CTL_2, 0x38, 0x00);
/* Set Bandgap Fine Adjustment to +5mV for Tanggu SMIC part */
regmap_read(regmap, WCD937X_DIGITAL_EFUSE_REG_16, &val);
if (val == 0x01) {
regmap_update_bits(regmap, WCD937X_BIAS_VBG_FINE_ADJ, 0xF0, 0xB0);
} else if (val == 0x02) {
regmap_update_bits(regmap, WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x1F, 0x04);
regmap_update_bits(regmap, WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x1F, 0x04);
regmap_update_bits(regmap, WCD937X_BIAS_VBG_FINE_ADJ, 0xF0, 0xB0);
regmap_update_bits(regmap, WCD937X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xF0, 0x50);
}
}
static int wcd937x_rx_clk_enable(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
if (atomic_read(&wcd937x->rx_clk_cnt))
return 0;
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_DIG_CLK_CTL, BIT(3), BIT(3));
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(0), BIT(0));
snd_soc_component_update_bits(component, WCD937X_ANA_RX_SUPPLIES, BIT(0), BIT(0));
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_RX0_CTL, BIT(6), 0x00);
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_RX1_CTL, BIT(6), 0x00);
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_RX2_CTL, BIT(6), 0x00);
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(1), BIT(1));
atomic_inc(&wcd937x->rx_clk_cnt);
return 0;
}
static int wcd937x_rx_clk_disable(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
if (!atomic_read(&wcd937x->rx_clk_cnt)) {
dev_err(component->dev, "clk already disabled\n");
return 0;
}
atomic_dec(&wcd937x->rx_clk_cnt);
snd_soc_component_update_bits(component, WCD937X_ANA_RX_SUPPLIES, BIT(0), 0x00);
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(1), 0x00);
snd_soc_component_update_bits(component, WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(0), 0x00);
return 0;
}
static int wcd937x_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_rx_clk_enable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_HPH_GAIN_CTL,
BIT(2), BIT(2));
snd_soc_component_update_bits(component,
WCD937X_HPH_RDAC_CLK_CTL1,
BIT(7), 0x00);
set_bit(HPH_COMP_DELAY, &wcd937x->status_mask);
break;
case SND_SOC_DAPM_POST_PMU:
if (hph_mode == CLS_AB_HIFI || hph_mode == CLS_H_HIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, BIT(1));
else if (hph_mode == CLS_H_LOHIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, 0x06);
if (wcd937x->comp1_enable) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(1), BIT(1));
snd_soc_component_update_bits(component,
WCD937X_HPH_L_EN,
BIT(5), 0x00);
if (wcd937x->comp2_enable) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
WCD937X_HPH_R_EN, BIT(5), 0x00);
}
if (test_bit(HPH_COMP_DELAY, &wcd937x->status_mask)) {
usleep_range(5000, 5110);
clear_bit(HPH_COMP_DELAY, &wcd937x->status_mask);
}
} else {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(1), 0x00);
snd_soc_component_update_bits(component,
WCD937X_HPH_L_EN,
BIT(5), BIT(5));
}
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_HPH_TIMER1,
BIT(1), 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, BIT(0));
break;
}
return 0;
}
static int wcd937x_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_rx_clk_enable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL, BIT(1), BIT(1));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_HPH_GAIN_CTL, BIT(3), BIT(3));
snd_soc_component_update_bits(component,
WCD937X_HPH_RDAC_CLK_CTL1, BIT(7), 0x00);
set_bit(HPH_COMP_DELAY, &wcd937x->status_mask);
break;
case SND_SOC_DAPM_POST_PMU:
if (hph_mode == CLS_AB_HIFI || hph_mode == CLS_H_HIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_R,
0x0f, BIT(1));
else if (hph_mode == CLS_H_LOHIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_R,
0x0f, 0x06);
if (wcd937x->comp2_enable) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
WCD937X_HPH_R_EN, BIT(5), 0x00);
if (wcd937x->comp1_enable) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(1), BIT(1));
snd_soc_component_update_bits(component,
WCD937X_HPH_L_EN,
BIT(5), 0x00);
}
if (test_bit(HPH_COMP_DELAY, &wcd937x->status_mask)) {
usleep_range(5000, 5110);
clear_bit(HPH_COMP_DELAY, &wcd937x->status_mask);
}
} else {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(0), 0x00);
snd_soc_component_update_bits(component,
WCD937X_HPH_R_EN,
BIT(5), BIT(5));
}
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_HPH_TIMER1,
BIT(1), 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_R,
0x0f, BIT(0));
break;
}
return 0;
}
static int wcd937x_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_rx_clk_enable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_HPH_GAIN_CTL,
BIT(2), BIT(2));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(0), BIT(0));
if (hph_mode == CLS_AB_HIFI || hph_mode == CLS_H_HIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, BIT(1));
else if (hph_mode == CLS_H_LOHIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, 0x06);
if (wcd937x->comp1_enable)
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(1), BIT(1));
usleep_range(5000, 5010);
snd_soc_component_update_bits(component, WCD937X_FLYBACK_EN, BIT(2), 0x00);
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_PRE_DAC,
WCD_CLSH_STATE_EAR,
hph_mode);
break;
case SND_SOC_DAPM_POST_PMD:
if (hph_mode == CLS_AB_HIFI || hph_mode == CLS_H_LOHIFI ||
hph_mode == CLS_H_HIFI)
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_RDAC_HD2_CTL_L,
0x0f, BIT(0));
if (wcd937x->comp1_enable)
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_COMP_CTL_0,
BIT(1), 0x00);
break;
}
return 0;
}
static int wcd937x_codec_aux_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_rx_clk_enable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL,
BIT(2), BIT(2));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(2), BIT(2));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_AUX_GAIN_CTL,
BIT(0), BIT(0));
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_PRE_DAC,
WCD_CLSH_STATE_AUX,
hph_mode);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL,
BIT(2), 0x00);
break;
}
return 0;
}
static int wcd937x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_PRE_DAC,
WCD_CLSH_STATE_HPHR,
hph_mode);
snd_soc_component_update_bits(component, WCD937X_ANA_HPH,
BIT(4), BIT(4));
usleep_range(100, 110);
set_bit(HPH_PA_DELAY, &wcd937x->status_mask);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL1,
0x07, 0x03);
break;
case SND_SOC_DAPM_POST_PMU:
if (test_bit(HPH_PA_DELAY, &wcd937x->status_mask)) {
if (wcd937x->comp2_enable)
usleep_range(7000, 7100);
else
usleep_range(20000, 20100);
clear_bit(HPH_PA_DELAY, &wcd937x->status_mask);
}
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_HPH_TIMER1,
BIT(1), BIT(1));
if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI)
snd_soc_component_update_bits(component,
WCD937X_ANA_RX_SUPPLIES,
BIT(1), BIT(1));
enable_irq(wcd937x->hphr_pdm_wd_int);
break;
case SND_SOC_DAPM_PRE_PMD:
disable_irq_nosync(wcd937x->hphr_pdm_wd_int);
set_bit(HPH_PA_DELAY, &wcd937x->status_mask);
wcd_mbhc_event_notify(wcd937x->wcd_mbhc, WCD_EVENT_PRE_HPHR_PA_OFF);
break;
case SND_SOC_DAPM_POST_PMD:
if (test_bit(HPH_PA_DELAY, &wcd937x->status_mask)) {
if (wcd937x->comp2_enable)
usleep_range(7000, 7100);
else
usleep_range(20000, 20100);
clear_bit(HPH_PA_DELAY, &wcd937x->status_mask);
}
wcd_mbhc_event_notify(wcd937x->wcd_mbhc, WCD_EVENT_POST_HPHR_PA_OFF);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL1, 0x07, 0x00);
snd_soc_component_update_bits(component, WCD937X_ANA_HPH,
BIT(4), 0x00);
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_HPHR,
hph_mode);
break;
}
return 0;
}
static int wcd937x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_PRE_DAC,
WCD_CLSH_STATE_HPHL,
hph_mode);
snd_soc_component_update_bits(component, WCD937X_ANA_HPH,
BIT(5), BIT(5));
usleep_range(100, 110);
set_bit(HPH_PA_DELAY, &wcd937x->status_mask);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL0, 0x07, 0x03);
break;
case SND_SOC_DAPM_POST_PMU:
if (test_bit(HPH_PA_DELAY, &wcd937x->status_mask)) {
if (!wcd937x->comp1_enable)
usleep_range(20000, 20100);
else
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &wcd937x->status_mask);
}
snd_soc_component_update_bits(component,
WCD937X_HPH_NEW_INT_HPH_TIMER1,
BIT(1), BIT(1));
if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI)
snd_soc_component_update_bits(component,
WCD937X_ANA_RX_SUPPLIES,
BIT(1), BIT(1));
enable_irq(wcd937x->hphl_pdm_wd_int);
break;
case SND_SOC_DAPM_PRE_PMD:
disable_irq_nosync(wcd937x->hphl_pdm_wd_int);
set_bit(HPH_PA_DELAY, &wcd937x->status_mask);
wcd_mbhc_event_notify(wcd937x->wcd_mbhc, WCD_EVENT_PRE_HPHL_PA_OFF);
break;
case SND_SOC_DAPM_POST_PMD:
if (test_bit(HPH_PA_DELAY, &wcd937x->status_mask)) {
if (!wcd937x->comp1_enable)
usleep_range(20000, 20100);
else
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &wcd937x->status_mask);
}
wcd_mbhc_event_notify(wcd937x->wcd_mbhc, WCD_EVENT_POST_HPHL_PA_OFF);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL0, 0x07, 0x00);
snd_soc_component_update_bits(component,
WCD937X_ANA_HPH, BIT(5), 0x00);
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_HPHL,
hph_mode);
break;
}
return 0;
}
static int wcd937x_codec_enable_aux_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
u8 val;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
val = WCD937X_DIGITAL_PDM_WD_CTL2_EN |
WCD937X_DIGITAL_PDM_WD_CTL2_TIMEOUT_SEL |
WCD937X_DIGITAL_PDM_WD_CTL2_HOLD_OFF;
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL2,
WCD937X_DIGITAL_PDM_WD_CTL2_MASK,
val);
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(1000, 1010);
if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI)
snd_soc_component_update_bits(component,
WCD937X_ANA_RX_SUPPLIES,
BIT(1), BIT(1));
enable_irq(wcd937x->aux_pdm_wd_int);
break;
case SND_SOC_DAPM_PRE_PMD:
disable_irq_nosync(wcd937x->aux_pdm_wd_int);
break;
case SND_SOC_DAPM_POST_PMD:
usleep_range(2000, 2010);
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_AUX,
hph_mode);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL2,
WCD937X_DIGITAL_PDM_WD_CTL2_MASK,
0x00);
break;
}
return 0;
}
static int wcd937x_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int hph_mode = wcd937x->hph_mode;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Enable watchdog interrupt for HPHL or AUX depending on mux value */
wcd937x->ear_rx_path = snd_soc_component_read(component,
WCD937X_DIGITAL_CDC_EAR_PATH_CTL);
if (wcd937x->ear_rx_path & EAR_RX_PATH_AUX)
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL2,
BIT(0), BIT(0));
else
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL0,
0x07, 0x03);
if (!wcd937x->comp1_enable)
snd_soc_component_update_bits(component,
WCD937X_ANA_EAR_COMPANDER_CTL,
BIT(7), BIT(7));
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(6000, 6010);
if (hph_mode == CLS_AB || hph_mode == CLS_AB_HIFI)
snd_soc_component_update_bits(component,
WCD937X_ANA_RX_SUPPLIES,
BIT(1), BIT(1));
if (wcd937x->ear_rx_path & EAR_RX_PATH_AUX)
enable_irq(wcd937x->aux_pdm_wd_int);
else
enable_irq(wcd937x->hphl_pdm_wd_int);
break;
case SND_SOC_DAPM_PRE_PMD:
if (wcd937x->ear_rx_path & EAR_RX_PATH_AUX)
disable_irq_nosync(wcd937x->aux_pdm_wd_int);
else
disable_irq_nosync(wcd937x->hphl_pdm_wd_int);
break;
case SND_SOC_DAPM_POST_PMD:
if (!wcd937x->comp1_enable)
snd_soc_component_update_bits(component,
WCD937X_ANA_EAR_COMPANDER_CTL,
BIT(7), 0x00);
usleep_range(7000, 7010);
wcd_clsh_ctrl_set_state(wcd937x->clsh_info,
WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_EAR,
hph_mode);
snd_soc_component_update_bits(component, WCD937X_FLYBACK_EN,
BIT(2), BIT(2));
if (wcd937x->ear_rx_path & EAR_RX_PATH_AUX)
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL2,
BIT(0), 0x00);
else
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_PDM_WD_CTL0,
0x07, 0x00);
break;
}
return 0;
}
static int wcd937x_enable_rx1(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
if (event == SND_SOC_DAPM_POST_PMD) {
wcd937x_rx_clk_disable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(0), 0x00);
}
return 0;
}
static int wcd937x_enable_rx2(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
if (event == SND_SOC_DAPM_POST_PMD) {
wcd937x_rx_clk_disable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(1), 0x00);
}
return 0;
}
static int wcd937x_enable_rx3(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
if (event == SND_SOC_DAPM_POST_PMD) {
usleep_range(6000, 6010);
wcd937x_rx_clk_disable(component);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(2), 0x00);
}
return 0;
}
static int wcd937x_get_micb_vout_ctl_val(u32 micb_mv)
{
if (micb_mv < 1000 || micb_mv > 2850) {
pr_err("Unsupported micbias voltage (%u mV)\n", micb_mv);
return -EINVAL;
}
return (micb_mv - 1000) / 50;
}
static int wcd937x_tx_swr_ctrl(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
bool use_amic3 = snd_soc_component_read(component, WCD937X_TX_NEW_TX_CH2_SEL) & BIT(7);
/* Enable BCS for Headset mic */
if (event == SND_SOC_DAPM_PRE_PMU && strnstr(w->name, "ADC", sizeof("ADC")))
if (w->shift == 1 && !use_amic3)
set_bit(AMIC2_BCS_ENABLE, &wcd937x->status_mask);
return 0;
}
static int wcd937x_codec_enable_adc(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
atomic_inc(&wcd937x->ana_clk_count);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL, BIT(7), BIT(7));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(3), BIT(3));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(4), BIT(4));
break;
case SND_SOC_DAPM_POST_PMD:
if (w->shift == 1 && test_bit(AMIC2_BCS_ENABLE, &wcd937x->status_mask))
clear_bit(AMIC2_BCS_ENABLE, &wcd937x->status_mask);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL, BIT(3), 0x00);
break;
}
return 0;
}
static int wcd937x_enable_req(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_REQ_CTL, BIT(1), BIT(1));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_REQ_CTL, BIT(0), 0x00);
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH2, BIT(6), BIT(6));
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH3_HPF, BIT(6), BIT(6));
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL, 0x70, 0x70);
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH1, BIT(7), BIT(7));
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH2, BIT(6), 0x00);
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH2, BIT(7), BIT(7));
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH3, BIT(7), BIT(7));
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH1, BIT(7), 0x00);
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH2, BIT(7), 0x00);
snd_soc_component_update_bits(component,
WCD937X_ANA_TX_CH3, BIT(7), 0x00);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL, BIT(4), 0x00);
atomic_dec(&wcd937x->ana_clk_count);
if (atomic_read(&wcd937x->ana_clk_count) <= 0) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL,
BIT(4), 0x00);
atomic_set(&wcd937x->ana_clk_count, 0);
}
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(7), 0x00);
break;
}
return 0;
}
static int wcd937x_codec_enable_dmic(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
u16 dmic_clk_reg;
switch (w->shift) {
case 0:
case 1:
dmic_clk_reg = WCD937X_DIGITAL_CDC_DMIC1_CTL;
break;
case 2:
case 3:
dmic_clk_reg = WCD937X_DIGITAL_CDC_DMIC2_CTL;
break;
case 4:
case 5:
dmic_clk_reg = WCD937X_DIGITAL_CDC_DMIC3_CTL;
break;
default:
dev_err(component->dev, "Invalid DMIC Selection\n");
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
BIT(7), BIT(7));
snd_soc_component_update_bits(component,
dmic_clk_reg, 0x07, BIT(1));
snd_soc_component_update_bits(component,
dmic_clk_reg, BIT(3), BIT(3));
snd_soc_component_update_bits(component,
dmic_clk_reg, 0x70, BIT(5));
break;
}
return 0;
}
static int wcd937x_micbias_control(struct snd_soc_component *component,
int micb_num, int req, bool is_dapm)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int micb_index = micb_num - 1;
u16 micb_reg;
if (micb_index < 0 || (micb_index > WCD937X_MAX_MICBIAS - 1)) {
dev_err(component->dev, "Invalid micbias index, micb_ind:%d\n", micb_index);
return -EINVAL;
}
switch (micb_num) {
case MIC_BIAS_1:
micb_reg = WCD937X_ANA_MICB1;
break;
case MIC_BIAS_2:
micb_reg = WCD937X_ANA_MICB2;
break;
case MIC_BIAS_3:
micb_reg = WCD937X_ANA_MICB3;
break;
default:
dev_err(component->dev, "Invalid micbias number: %d\n", micb_num);
return -EINVAL;
}
mutex_lock(&wcd937x->micb_lock);
switch (req) {
case MICB_PULLUP_ENABLE:
wcd937x->pullup_ref[micb_index]++;
if (wcd937x->pullup_ref[micb_index] == 1 &&
wcd937x->micb_ref[micb_index] == 0)
snd_soc_component_update_bits(component, micb_reg,
0xc0, BIT(7));
break;
case MICB_PULLUP_DISABLE:
if (wcd937x->pullup_ref[micb_index] > 0)
wcd937x->pullup_ref[micb_index]++;
if (wcd937x->pullup_ref[micb_index] == 0 &&
wcd937x->micb_ref[micb_index] == 0)
snd_soc_component_update_bits(component, micb_reg,
0xc0, 0x00);
break;
case MICB_ENABLE:
wcd937x->micb_ref[micb_index]++;
atomic_inc(&wcd937x->ana_clk_count);
if (wcd937x->micb_ref[micb_index] == 1) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_DIG_CLK_CTL,
0xf0, 0xf0);
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL,
BIT(4), BIT(4));
snd_soc_component_update_bits(component,
WCD937X_MICB1_TEST_CTL_2,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
WCD937X_MICB2_TEST_CTL_2,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
WCD937X_MICB3_TEST_CTL_2,
BIT(0), BIT(0));
snd_soc_component_update_bits(component,
micb_reg, 0xc0, BIT(6));
if (micb_num == MIC_BIAS_2)
wcd_mbhc_event_notify(wcd937x->wcd_mbhc,
WCD_EVENT_POST_MICBIAS_2_ON);
if (micb_num == MIC_BIAS_2 && is_dapm)
wcd_mbhc_event_notify(wcd937x->wcd_mbhc,
WCD_EVENT_POST_DAPM_MICBIAS_2_ON);
}
break;
case MICB_DISABLE:
atomic_dec(&wcd937x->ana_clk_count);
if (wcd937x->micb_ref[micb_index] > 0)
wcd937x->micb_ref[micb_index]--;
if (wcd937x->micb_ref[micb_index] == 0 &&
wcd937x->pullup_ref[micb_index] > 0)
snd_soc_component_update_bits(component, micb_reg,
0xc0, BIT(7));
else if (wcd937x->micb_ref[micb_index] == 0 &&
wcd937x->pullup_ref[micb_index] == 0) {
if (micb_num == MIC_BIAS_2)
wcd_mbhc_event_notify(wcd937x->wcd_mbhc,
WCD_EVENT_PRE_MICBIAS_2_OFF);
snd_soc_component_update_bits(component, micb_reg,
0xc0, 0x00);
if (micb_num == MIC_BIAS_2)
wcd_mbhc_event_notify(wcd937x->wcd_mbhc,
WCD_EVENT_POST_MICBIAS_2_OFF);
}
if (is_dapm && micb_num == MIC_BIAS_2)
wcd_mbhc_event_notify(wcd937x->wcd_mbhc,
WCD_EVENT_POST_DAPM_MICBIAS_2_OFF);
if (atomic_read(&wcd937x->ana_clk_count) <= 0) {
snd_soc_component_update_bits(component,
WCD937X_DIGITAL_CDC_ANA_CLK_CTL,
BIT(4), 0x00);
atomic_set(&wcd937x->ana_clk_count, 0);
}
break;
}
mutex_unlock(&wcd937x->micb_lock);
return 0;
}
static int __wcd937x_codec_enable_micbias(struct snd_soc_dapm_widget *w,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
int micb_num = w->shift;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_micbias_control(component, micb_num,
MICB_ENABLE, true);
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(1000, 1100);
break;
case SND_SOC_DAPM_POST_PMD:
wcd937x_micbias_control(component, micb_num,
MICB_DISABLE, true);
break;
}
return 0;
}
static int wcd937x_codec_enable_micbias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
return __wcd937x_codec_enable_micbias(w, event);
}
static int __wcd937x_codec_enable_micbias_pullup(struct snd_soc_dapm_widget *w,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
int micb_num = w->shift;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
wcd937x_micbias_control(component, micb_num, MICB_PULLUP_ENABLE, true);
break;
case SND_SOC_DAPM_POST_PMU:
usleep_range(1000, 1100);
break;
case SND_SOC_DAPM_POST_PMD:
wcd937x_micbias_control(component, micb_num, MICB_PULLUP_DISABLE, true);
break;
}
return 0;
}
static int wcd937x_codec_enable_micbias_pullup(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
return __wcd937x_codec_enable_micbias_pullup(w, event);
}
static int wcd937x_connect_port(struct wcd937x_sdw_priv *wcd, u8 port_idx, u8 ch_id, bool enable)
{
struct sdw_port_config *port_config = &wcd->port_config[port_idx - 1];
const struct wcd937x_sdw_ch_info *ch_info = &wcd->ch_info[ch_id];
u8 port_num = ch_info->port_num;
u8 ch_mask = ch_info->ch_mask;
port_config->num = port_num;
if (enable)
port_config->ch_mask |= ch_mask;
else
port_config->ch_mask &= ~ch_mask;
return 0;
}
static int wcd937x_rx_hph_mode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = wcd937x->hph_mode;
return 0;
}
static int wcd937x_rx_hph_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
u32 mode_val;
mode_val = ucontrol->value.enumerated.item[0];
if (!mode_val)
mode_val = CLS_AB;
if (mode_val == wcd937x->hph_mode)
return 0;
switch (mode_val) {
case CLS_H_NORMAL:
case CLS_H_HIFI:
case CLS_H_LP:
case CLS_AB:
case CLS_H_LOHIFI:
case CLS_H_ULP:
case CLS_AB_LP:
case CLS_AB_HIFI:
wcd937x->hph_mode = mode_val;
return 1;
}
dev_dbg(component->dev, "%s: Invalid HPH Mode\n", __func__);
return -EINVAL;
}
static int wcd937x_get_compander(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
struct soc_mixer_control *mc;
bool hphr;
mc = (struct soc_mixer_control *)(kcontrol->private_value);
hphr = mc->shift;
ucontrol->value.integer.value[0] = hphr ? wcd937x->comp2_enable :
wcd937x->comp1_enable;
return 0;
}
static int wcd937x_set_compander(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
struct wcd937x_sdw_priv *wcd = wcd937x->sdw_priv[AIF1_PB];
int value = ucontrol->value.integer.value[0];
struct soc_mixer_control *mc;
int portidx;
bool hphr;
mc = (struct soc_mixer_control *)(kcontrol->private_value);
hphr = mc->shift;
if (hphr) {
if (value == wcd937x->comp2_enable)
return 0;
wcd937x->comp2_enable = value;
} else {
if (value == wcd937x->comp1_enable)
return 0;
wcd937x->comp1_enable = value;
}
portidx = wcd->ch_info[mc->reg].port_num;
if (value)
wcd937x_connect_port(wcd, portidx, mc->reg, true);
else
wcd937x_connect_port(wcd, portidx, mc->reg, false);
return 1;
}
static int wcd937x_get_swr_port(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(comp);
struct wcd937x_sdw_priv *wcd;
int dai_id = mixer->shift;
int ch_idx = mixer->reg;
int portidx;
wcd = wcd937x->sdw_priv[dai_id];
portidx = wcd->ch_info[ch_idx].port_num;
ucontrol->value.integer.value[0] = wcd->port_enable[portidx];
return 0;
}
static int wcd937x_set_swr_port(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(comp);
struct wcd937x_sdw_priv *wcd;
int dai_id = mixer->shift;
int ch_idx = mixer->reg;
int portidx;
bool enable;
wcd = wcd937x->sdw_priv[dai_id];
portidx = wcd->ch_info[ch_idx].port_num;
enable = ucontrol->value.integer.value[0];
if (enable == wcd->port_enable[portidx]) {
wcd937x_connect_port(wcd, portidx, ch_idx, enable);
return 0;
}
wcd->port_enable[portidx] = enable;
wcd937x_connect_port(wcd, portidx, ch_idx, enable);
return 1;
}
static const char * const rx_hph_mode_mux_text[] = {
"CLS_H_NORMAL", "CLS_H_INVALID", "CLS_H_HIFI", "CLS_H_LP", "CLS_AB",
"CLS_H_LOHIFI", "CLS_H_ULP", "CLS_AB_LP", "CLS_AB_HIFI",
};
static const struct soc_enum rx_hph_mode_mux_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(rx_hph_mode_mux_text), rx_hph_mode_mux_text);
/* MBHC related */
static void wcd937x_mbhc_clk_setup(struct snd_soc_component *component,
bool enable)
{
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_1,
WCD937X_MBHC_CTL_RCO_EN_MASK, enable);
}
static void wcd937x_mbhc_mbhc_bias_control(struct snd_soc_component *component,
bool enable)
{
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_ELECT,
WCD937X_ANA_MBHC_BIAS_EN, enable);
}
static void wcd937x_mbhc_program_btn_thr(struct snd_soc_component *component,
int *btn_low, int *btn_high,
int num_btn, bool is_micbias)
{
int i, vth;
if (num_btn > WCD_MBHC_DEF_BUTTONS) {
dev_err(component->dev, "%s: invalid number of buttons: %d\n",
__func__, num_btn);
return;
}
for (i = 0; i < num_btn; i++) {
vth = ((btn_high[i] * 2) / 25) & 0x3F;
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_BTN0 + i,
WCD937X_MBHC_BTN_VTH_MASK, vth);
}
}
static bool wcd937x_mbhc_micb_en_status(struct snd_soc_component *component, int micb_num)
{
u8 val;
if (micb_num == MIC_BIAS_2) {
val = snd_soc_component_read_field(component,
WCD937X_ANA_MICB2,
WCD937X_ANA_MICB2_ENABLE_MASK);
if (val == WCD937X_MICB_ENABLE)
return true;
}
return false;
}
static void wcd937x_mbhc_hph_l_pull_up_control(struct snd_soc_component *component,
int pull_up_cur)
{
/* Default pull up current to 2uA */
if (pull_up_cur > HS_PULLUP_I_OFF || pull_up_cur < HS_PULLUP_I_3P0_UA)
pull_up_cur = HS_PULLUP_I_2P0_UA;
snd_soc_component_write_field(component,
WCD937X_MBHC_NEW_INT_MECH_DET_CURRENT,
WCD937X_HSDET_PULLUP_C_MASK, pull_up_cur);
}
static int wcd937x_mbhc_request_micbias(struct snd_soc_component *component,
int micb_num, int req)
{
return wcd937x_micbias_control(component, micb_num, req, false);
}
static void wcd937x_mbhc_micb_ramp_control(struct snd_soc_component *component,
bool enable)
{
if (enable) {
snd_soc_component_write_field(component, WCD937X_ANA_MICB2_RAMP,
WCD937X_RAMP_SHIFT_CTRL_MASK, 0x0C);
snd_soc_component_write_field(component, WCD937X_ANA_MICB2_RAMP,
WCD937X_RAMP_EN_MASK, 1);
} else {
snd_soc_component_write_field(component, WCD937X_ANA_MICB2_RAMP,
WCD937X_RAMP_EN_MASK, 0);
snd_soc_component_write_field(component, WCD937X_ANA_MICB2_RAMP,
WCD937X_RAMP_SHIFT_CTRL_MASK, 0);
}
}
static int wcd937x_mbhc_micb_adjust_voltage(struct snd_soc_component *component,
int req_volt, int micb_num)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int cur_vout_ctl, req_vout_ctl, micb_reg, micb_en, ret = 0;
switch (micb_num) {
case MIC_BIAS_1:
micb_reg = WCD937X_ANA_MICB1;
break;
case MIC_BIAS_2:
micb_reg = WCD937X_ANA_MICB2;
break;
case MIC_BIAS_3:
micb_reg = WCD937X_ANA_MICB3;
break;
default:
return -EINVAL;
}
mutex_lock(&wcd937x->micb_lock);
/*
* If requested micbias voltage is same as current micbias
* voltage, then just return. Otherwise, adjust voltage as
* per requested value. If micbias is already enabled, then
* to avoid slow micbias ramp-up or down enable pull-up
* momentarily, change the micbias value and then re-enable
* micbias.
*/
micb_en = snd_soc_component_read_field(component, micb_reg,
WCD937X_MICB_EN_MASK);
cur_vout_ctl = snd_soc_component_read_field(component, micb_reg,
WCD937X_MICB_VOUT_MASK);
req_vout_ctl = wcd937x_get_micb_vout_ctl_val(req_volt);
if (req_vout_ctl < 0) {
ret = -EINVAL;
goto exit;
}
if (cur_vout_ctl == req_vout_ctl) {
ret = 0;
goto exit;
}
if (micb_en == WCD937X_MICB_ENABLE)
snd_soc_component_write_field(component, micb_reg,
WCD937X_MICB_EN_MASK,
WCD937X_MICB_PULL_UP);
snd_soc_component_write_field(component, micb_reg,
WCD937X_MICB_VOUT_MASK,
req_vout_ctl);
if (micb_en == WCD937X_MICB_ENABLE) {
snd_soc_component_write_field(component, micb_reg,
WCD937X_MICB_EN_MASK,
WCD937X_MICB_ENABLE);
/*
* Add 2ms delay as per HW requirement after enabling
* micbias
*/
usleep_range(2000, 2100);
}
exit:
mutex_unlock(&wcd937x->micb_lock);
return ret;
}
static int wcd937x_mbhc_micb_ctrl_threshold_mic(struct snd_soc_component *component,
int micb_num, bool req_en)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int micb_mv;
if (micb_num != MIC_BIAS_2)
return -EINVAL;
/*
* If device tree micbias level is already above the minimum
* voltage needed to detect threshold microphone, then do
* not change the micbias, just return.
*/
if (wcd937x->micb2_mv >= WCD_MBHC_THR_HS_MICB_MV)
return 0;
micb_mv = req_en ? WCD_MBHC_THR_HS_MICB_MV : wcd937x->micb2_mv;
return wcd937x_mbhc_micb_adjust_voltage(component, micb_mv, MIC_BIAS_2);
}
static void wcd937x_mbhc_get_result_params(struct snd_soc_component *component,
s16 *d1_a, u16 noff,
int32_t *zdet)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
int i;
int val, val1;
s16 c1;
s32 x1, d1;
s32 denom;
static const int minCode_param[] = {
3277, 1639, 820, 410, 205, 103, 52, 26
};
regmap_update_bits(wcd937x->regmap, WCD937X_ANA_MBHC_ZDET, 0x20, 0x20);
for (i = 0; i < WCD937X_ZDET_NUM_MEASUREMENTS; i++) {
regmap_read(wcd937x->regmap, WCD937X_ANA_MBHC_RESULT_2, &val);
if (val & 0x80)
break;
}
val = val << 0x8;
regmap_read(wcd937x->regmap, WCD937X_ANA_MBHC_RESULT_1, &val1);
val |= val1;
regmap_update_bits(wcd937x->regmap, WCD937X_ANA_MBHC_ZDET, 0x20, 0x00);
x1 = WCD937X_MBHC_GET_X1(val);
c1 = WCD937X_MBHC_GET_C1(val);
/* If ramp is not complete, give additional 5ms */
if (c1 < 2 && x1)
usleep_range(5000, 5050);
if (!c1 || !x1) {
dev_err(component->dev, "Impedance detect ramp error, c1=%d, x1=0x%x\n",
c1, x1);
goto ramp_down;
}
d1 = d1_a[c1];
denom = (x1 * d1) - (1 << (14 - noff));
if (denom > 0)
*zdet = (WCD937X_MBHC_ZDET_CONST * 1000) / denom;
else if (x1 < minCode_param[noff])
*zdet = WCD937X_ZDET_FLOATING_IMPEDANCE;
dev_err(component->dev, "%s: d1=%d, c1=%d, x1=0x%x, z_val=%d (milliohm)\n",
__func__, d1, c1, x1, *zdet);
ramp_down:
i = 0;
while (x1) {
regmap_read(wcd937x->regmap,
WCD937X_ANA_MBHC_RESULT_1, &val);
regmap_read(wcd937x->regmap,
WCD937X_ANA_MBHC_RESULT_2, &val1);
val = val << 0x08;
val |= val1;
x1 = WCD937X_MBHC_GET_X1(val);
i++;
if (i == WCD937X_ZDET_NUM_MEASUREMENTS)
break;
}
}
static void wcd937x_mbhc_zdet_ramp(struct snd_soc_component *component,
struct wcd937x_mbhc_zdet_param *zdet_param,
s32 *zl, s32 *zr, s16 *d1_a)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
s32 zdet = 0;
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_ZDET_ANA_CTL,
WCD937X_ZDET_MAXV_CTL_MASK, zdet_param->ldo_ctl);
snd_soc_component_update_bits(component, WCD937X_ANA_MBHC_BTN5,
WCD937X_VTH_MASK, zdet_param->btn5);
snd_soc_component_update_bits(component, WCD937X_ANA_MBHC_BTN6,
WCD937X_VTH_MASK, zdet_param->btn6);
snd_soc_component_update_bits(component, WCD937X_ANA_MBHC_BTN7,
WCD937X_VTH_MASK, zdet_param->btn7);
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_ZDET_ANA_CTL,
WCD937X_ZDET_RANGE_CTL_MASK, zdet_param->noff);
snd_soc_component_update_bits(component, WCD937X_MBHC_NEW_ZDET_RAMP_CTL,
0x0F, zdet_param->nshift);
if (!zl)
goto z_right;
/* Start impedance measurement for HPH_L */
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ZDET, 0x80, 0x80);
wcd937x_mbhc_get_result_params(component, d1_a, zdet_param->noff, &zdet);
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ZDET, 0x80, 0x00);
*zl = zdet;
z_right:
if (!zr)
return;
/* Start impedance measurement for HPH_R */
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ZDET, 0x40, 0x40);
wcd937x_mbhc_get_result_params(component, d1_a, zdet_param->noff, &zdet);
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ZDET, 0x40, 0x00);
*zr = zdet;
}
static void wcd937x_wcd_mbhc_qfuse_cal(struct snd_soc_component *component,
s32 *z_val, int flag_l_r)
{
s16 q1;
int q1_cal;
if (*z_val < (WCD937X_ZDET_VAL_400 / 1000))
q1 = snd_soc_component_read(component,
WCD937X_DIGITAL_EFUSE_REG_23 + (2 * flag_l_r));
else
q1 = snd_soc_component_read(component,
WCD937X_DIGITAL_EFUSE_REG_24 + (2 * flag_l_r));
if (q1 & 0x80)
q1_cal = (10000 - ((q1 & 0x7F) * 25));
else
q1_cal = (10000 + (q1 * 25));
if (q1_cal > 0)
*z_val = ((*z_val) * 10000) / q1_cal;
}
static void wcd937x_wcd_mbhc_calc_impedance(struct snd_soc_component *component,
u32 *zl, u32 *zr)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
s16 reg0, reg1, reg2, reg3, reg4;
s32 z1l, z1r, z1ls;
int zMono, z_diff1, z_diff2;
bool is_fsm_disable = false;
struct wcd937x_mbhc_zdet_param zdet_param[] = {
{4, 0, 4, 0x08, 0x14, 0x18}, /* < 32ohm */
{2, 0, 3, 0x18, 0x7C, 0x90}, /* 32ohm < Z < 400ohm */
{1, 4, 5, 0x18, 0x7C, 0x90}, /* 400ohm < Z < 1200ohm */
{1, 6, 7, 0x18, 0x7C, 0x90}, /* >1200ohm */
};
struct wcd937x_mbhc_zdet_param *zdet_param_ptr = NULL;
s16 d1_a[][4] = {
{0, 30, 90, 30},
{0, 30, 30, 5},
{0, 30, 30, 5},
{0, 30, 30, 5},
};
s16 *d1 = NULL;
reg0 = snd_soc_component_read(component, WCD937X_ANA_MBHC_BTN5);
reg1 = snd_soc_component_read(component, WCD937X_ANA_MBHC_BTN6);
reg2 = snd_soc_component_read(component, WCD937X_ANA_MBHC_BTN7);
reg3 = snd_soc_component_read(component, WCD937X_MBHC_CTL_CLK);
reg4 = snd_soc_component_read(component, WCD937X_MBHC_NEW_ZDET_ANA_CTL);
if (snd_soc_component_read(component, WCD937X_ANA_MBHC_ELECT) & 0x80) {
is_fsm_disable = true;
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ELECT, 0x80, 0x00);
}
/* For NO-jack, disable L_DET_EN before Z-det measurements */
if (wcd937x->mbhc_cfg.hphl_swh)
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_MECH, 0x80, 0x00);
/* Turn off 100k pull down on HPHL */
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_MECH, 0x01, 0x00);
/* Disable surge protection before impedance detection.
* This is done to give correct value for high impedance.
*/
regmap_update_bits(wcd937x->regmap,
WCD937X_HPH_SURGE_HPHLR_SURGE_EN, 0xC0, 0x00);
/* 1ms delay needed after disable surge protection */
usleep_range(1000, 1010);
/* First get impedance on Left */
d1 = d1_a[1];
zdet_param_ptr = &zdet_param[1];
wcd937x_mbhc_zdet_ramp(component, zdet_param_ptr, &z1l, NULL, d1);
if (!WCD937X_MBHC_IS_SECOND_RAMP_REQUIRED(z1l))
goto left_ch_impedance;
/* Second ramp for left ch */
if (z1l < WCD937X_ZDET_VAL_32) {
zdet_param_ptr = &zdet_param[0];
d1 = d1_a[0];
} else if ((z1l > WCD937X_ZDET_VAL_400) &&
(z1l <= WCD937X_ZDET_VAL_1200)) {
zdet_param_ptr = &zdet_param[2];
d1 = d1_a[2];
} else if (z1l > WCD937X_ZDET_VAL_1200) {
zdet_param_ptr = &zdet_param[3];
d1 = d1_a[3];
}
wcd937x_mbhc_zdet_ramp(component, zdet_param_ptr, &z1l, NULL, d1);
left_ch_impedance:
if (z1l == WCD937X_ZDET_FLOATING_IMPEDANCE ||
z1l > WCD937X_ZDET_VAL_100K) {
*zl = WCD937X_ZDET_FLOATING_IMPEDANCE;
zdet_param_ptr = &zdet_param[1];
d1 = d1_a[1];
} else {
*zl = z1l / 1000;
wcd937x_wcd_mbhc_qfuse_cal(component, zl, 0);
}
/* Start of right impedance ramp and calculation */
wcd937x_mbhc_zdet_ramp(component, zdet_param_ptr, NULL, &z1r, d1);
if (WCD937X_MBHC_IS_SECOND_RAMP_REQUIRED(z1r)) {
if ((z1r > WCD937X_ZDET_VAL_1200 &&
zdet_param_ptr->noff == 0x6) ||
((*zl) != WCD937X_ZDET_FLOATING_IMPEDANCE))
goto right_ch_impedance;
/* Second ramp for right ch */
if (z1r < WCD937X_ZDET_VAL_32) {
zdet_param_ptr = &zdet_param[0];
d1 = d1_a[0];
} else if ((z1r > WCD937X_ZDET_VAL_400) &&
(z1r <= WCD937X_ZDET_VAL_1200)) {
zdet_param_ptr = &zdet_param[2];
d1 = d1_a[2];
} else if (z1r > WCD937X_ZDET_VAL_1200) {
zdet_param_ptr = &zdet_param[3];
d1 = d1_a[3];
}
wcd937x_mbhc_zdet_ramp(component, zdet_param_ptr, NULL, &z1r, d1);
}
right_ch_impedance:
if (z1r == WCD937X_ZDET_FLOATING_IMPEDANCE ||
z1r > WCD937X_ZDET_VAL_100K) {
*zr = WCD937X_ZDET_FLOATING_IMPEDANCE;
} else {
*zr = z1r / 1000;
wcd937x_wcd_mbhc_qfuse_cal(component, zr, 1);
}
/* Mono/stereo detection */
if ((*zl == WCD937X_ZDET_FLOATING_IMPEDANCE) &&
(*zr == WCD937X_ZDET_FLOATING_IMPEDANCE)) {
dev_err(component->dev,
"%s: plug type is invalid or extension cable\n",
__func__);
goto zdet_complete;
}
if ((*zl == WCD937X_ZDET_FLOATING_IMPEDANCE) ||
(*zr == WCD937X_ZDET_FLOATING_IMPEDANCE) ||
((*zl < WCD_MONO_HS_MIN_THR) && (*zr > WCD_MONO_HS_MIN_THR)) ||
((*zl > WCD_MONO_HS_MIN_THR) && (*zr < WCD_MONO_HS_MIN_THR))) {
wcd_mbhc_set_hph_type(wcd937x->wcd_mbhc, WCD_MBHC_HPH_MONO);
goto zdet_complete;
}
snd_soc_component_write_field(component, WCD937X_HPH_R_ATEST,
WCD937X_HPHPA_GND_OVR_MASK, 1);
snd_soc_component_write_field(component, WCD937X_HPH_PA_CTL2,
WCD937X_HPHPA_GND_R_MASK, 1);
if (*zl < (WCD937X_ZDET_VAL_32 / 1000))
wcd937x_mbhc_zdet_ramp(component, &zdet_param[0], &z1ls, NULL, d1);
else
wcd937x_mbhc_zdet_ramp(component, &zdet_param[1], &z1ls, NULL, d1);
snd_soc_component_write_field(component, WCD937X_HPH_PA_CTL2,
WCD937X_HPHPA_GND_R_MASK, 0);
snd_soc_component_write_field(component, WCD937X_HPH_R_ATEST,
WCD937X_HPHPA_GND_OVR_MASK, 0);
z1ls /= 1000;
wcd937x_wcd_mbhc_qfuse_cal(component, &z1ls, 0);
/* Parallel of left Z and 9 ohm pull down resistor */
zMono = ((*zl) * 9) / ((*zl) + 9);
z_diff1 = (z1ls > zMono) ? (z1ls - zMono) : (zMono - z1ls);
z_diff2 = ((*zl) > z1ls) ? ((*zl) - z1ls) : (z1ls - (*zl));
if ((z_diff1 * (*zl + z1ls)) > (z_diff2 * (z1ls + zMono)))
wcd_mbhc_set_hph_type(wcd937x->wcd_mbhc, WCD_MBHC_HPH_STEREO);
else
wcd_mbhc_set_hph_type(wcd937x->wcd_mbhc, WCD_MBHC_HPH_MONO);
/* Enable surge protection again after impedance detection */
regmap_update_bits(wcd937x->regmap,
WCD937X_HPH_SURGE_HPHLR_SURGE_EN, 0xC0, 0xC0);
zdet_complete:
snd_soc_component_write(component, WCD937X_ANA_MBHC_BTN5, reg0);
snd_soc_component_write(component, WCD937X_ANA_MBHC_BTN6, reg1);
snd_soc_component_write(component, WCD937X_ANA_MBHC_BTN7, reg2);
/* Turn on 100k pull down on HPHL */
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_MECH, 0x01, 0x01);
/* For NO-jack, re-enable L_DET_EN after Z-det measurements */
if (wcd937x->mbhc_cfg.hphl_swh)
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_MECH, 0x80, 0x80);
snd_soc_component_write(component, WCD937X_MBHC_NEW_ZDET_ANA_CTL, reg4);
snd_soc_component_write(component, WCD937X_MBHC_CTL_CLK, reg3);
if (is_fsm_disable)
regmap_update_bits(wcd937x->regmap,
WCD937X_ANA_MBHC_ELECT, 0x80, 0x80);
}
static void wcd937x_mbhc_gnd_det_ctrl(struct snd_soc_component *component,
bool enable)
{
if (enable) {
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_MECH,
WCD937X_MBHC_HSG_PULLUP_COMP_EN, 1);
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_MECH,
WCD937X_MBHC_GND_DET_EN_MASK, 1);
} else {
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_MECH,
WCD937X_MBHC_GND_DET_EN_MASK, 0);
snd_soc_component_write_field(component, WCD937X_ANA_MBHC_MECH,
WCD937X_MBHC_HSG_PULLUP_COMP_EN, 0);
}
}
static void wcd937x_mbhc_hph_pull_down_ctrl(struct snd_soc_component *component,
bool enable)
{
snd_soc_component_write_field(component, WCD937X_HPH_PA_CTL2,
WCD937X_HPHPA_GND_R_MASK, enable);
snd_soc_component_write_field(component, WCD937X_HPH_PA_CTL2,
WCD937X_HPHPA_GND_L_MASK, enable);
}
static void wcd937x_mbhc_moisture_config(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
if (wcd937x->mbhc_cfg.moist_rref == R_OFF) {
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, R_OFF);
return;
}
/* Do not enable moisture detection if jack type is NC */
if (!wcd937x->mbhc_cfg.hphl_swh) {
dev_err(component->dev, "%s: disable moisture detection for NC\n",
__func__);
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, R_OFF);
return;
}
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, wcd937x->mbhc_cfg.moist_rref);
}
static void wcd937x_mbhc_moisture_detect_en(struct snd_soc_component *component, bool enable)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
if (enable)
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, wcd937x->mbhc_cfg.moist_rref);
else
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, R_OFF);
}
static bool wcd937x_mbhc_get_moisture_status(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
bool ret = false;
if (wcd937x->mbhc_cfg.moist_rref == R_OFF) {
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, R_OFF);
goto done;
}
/* Do not enable moisture detection if jack type is NC */
if (!wcd937x->mbhc_cfg.hphl_swh) {
dev_err(component->dev, "%s: disable moisture detection for NC\n",
__func__);
snd_soc_component_write_field(component, WCD937X_MBHC_NEW_CTL_2,
WCD937X_M_RTH_CTL_MASK, R_OFF);
goto done;
}
/*
* If moisture_en is already enabled, then skip to plug type
* detection.
*/
if (snd_soc_component_read_field(component, WCD937X_MBHC_NEW_CTL_2, WCD937X_M_RTH_CTL_MASK))
goto done;
wcd937x_mbhc_moisture_detect_en(component, true);
/* Read moisture comparator status */
ret = ((snd_soc_component_read(component, WCD937X_MBHC_NEW_FSM_STATUS)
& 0x20) ? 0 : 1);
done:
return ret;
}
static void wcd937x_mbhc_moisture_polling_ctrl(struct snd_soc_component *component,
bool enable)
{
snd_soc_component_write_field(component,
WCD937X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL,
WCD937X_MOISTURE_EN_POLLING_MASK, enable);
}
static const struct wcd_mbhc_cb mbhc_cb = {
.clk_setup = wcd937x_mbhc_clk_setup,
.mbhc_bias = wcd937x_mbhc_mbhc_bias_control,
.set_btn_thr = wcd937x_mbhc_program_btn_thr,
.micbias_enable_status = wcd937x_mbhc_micb_en_status,
.hph_pull_up_control_v2 = wcd937x_mbhc_hph_l_pull_up_control,
.mbhc_micbias_control = wcd937x_mbhc_request_micbias,
.mbhc_micb_ramp_control = wcd937x_mbhc_micb_ramp_control,
.mbhc_micb_ctrl_thr_mic = wcd937x_mbhc_micb_ctrl_threshold_mic,
.compute_impedance = wcd937x_wcd_mbhc_calc_impedance,
.mbhc_gnd_det_ctrl = wcd937x_mbhc_gnd_det_ctrl,
.hph_pull_down_ctrl = wcd937x_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = wcd937x_mbhc_moisture_config,
.mbhc_get_moisture_status = wcd937x_mbhc_get_moisture_status,
.mbhc_moisture_polling_ctrl = wcd937x_mbhc_moisture_polling_ctrl,
.mbhc_moisture_detect_en = wcd937x_mbhc_moisture_detect_en,
};
static int wcd937x_get_hph_type(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = wcd_mbhc_get_hph_type(wcd937x->wcd_mbhc);
return 0;
}
static int wcd937x_hph_impedance_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
u32 zl, zr;
bool hphr;
struct soc_mixer_control *mc;
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
mc = (struct soc_mixer_control *)(kcontrol->private_value);
hphr = mc->shift;
wcd_mbhc_get_impedance(wcd937x->wcd_mbhc, &zl, &zr);
ucontrol->value.integer.value[0] = hphr ? zr : zl;
return 0;
}
static const struct snd_kcontrol_new hph_type_detect_controls[] = {
SOC_SINGLE_EXT("HPH Type", 0, 0, WCD_MBHC_HPH_STEREO, 0,
wcd937x_get_hph_type, NULL),
};
static const struct snd_kcontrol_new impedance_detect_controls[] = {
SOC_SINGLE_EXT("HPHL Impedance", 0, 0, INT_MAX, 0,
wcd937x_hph_impedance_get, NULL),
SOC_SINGLE_EXT("HPHR Impedance", 0, 1, INT_MAX, 0,
wcd937x_hph_impedance_get, NULL),
};
static int wcd937x_mbhc_init(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
struct wcd_mbhc_intr *intr_ids = &wcd937x->intr_ids;
intr_ids->mbhc_sw_intr = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_MBHC_SW_DET);
intr_ids->mbhc_btn_press_intr = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_MBHC_BUTTON_PRESS_DET);
intr_ids->mbhc_btn_release_intr = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_MBHC_BUTTON_RELEASE_DET);
intr_ids->mbhc_hs_ins_intr = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_MBHC_ELECT_INS_REM_LEG_DET);
intr_ids->mbhc_hs_rem_intr = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_MBHC_ELECT_INS_REM_DET);
intr_ids->hph_left_ocp = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_HPHL_OCP_INT);
intr_ids->hph_right_ocp = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_HPHR_OCP_INT);
wcd937x->wcd_mbhc = wcd_mbhc_init(component, &mbhc_cb, intr_ids, wcd_mbhc_fields, true);
if (IS_ERR(wcd937x->wcd_mbhc))
return PTR_ERR(wcd937x->wcd_mbhc);
snd_soc_add_component_controls(component, impedance_detect_controls,
ARRAY_SIZE(impedance_detect_controls));
snd_soc_add_component_controls(component, hph_type_detect_controls,
ARRAY_SIZE(hph_type_detect_controls));
return 0;
}
static void wcd937x_mbhc_deinit(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
wcd_mbhc_deinit(wcd937x->wcd_mbhc);
}
/* END MBHC */
static const struct snd_kcontrol_new wcd937x_snd_controls[] = {
SOC_SINGLE_TLV("EAR_PA Volume", WCD937X_ANA_EAR_COMPANDER_CTL,
2, 0x10, 0, ear_pa_gain),
SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
wcd937x_rx_hph_mode_get, wcd937x_rx_hph_mode_put),
SOC_SINGLE_EXT("HPHL_COMP Switch", SND_SOC_NOPM, 0, 1, 0,
wcd937x_get_compander, wcd937x_set_compander),
SOC_SINGLE_EXT("HPHR_COMP Switch", SND_SOC_NOPM, 1, 1, 0,
wcd937x_get_compander, wcd937x_set_compander),
SOC_SINGLE_TLV("HPHL Volume", WCD937X_HPH_L_EN, 0, 20, 1, line_gain),
SOC_SINGLE_TLV("HPHR Volume", WCD937X_HPH_R_EN, 0, 20, 1, line_gain),
SOC_SINGLE_TLV("ADC1 Volume", WCD937X_ANA_TX_CH1, 0, 20, 0, analog_gain),
SOC_SINGLE_TLV("ADC2 Volume", WCD937X_ANA_TX_CH2, 0, 20, 0, analog_gain),
SOC_SINGLE_TLV("ADC3 Volume", WCD937X_ANA_TX_CH3, 0, 20, 0, analog_gain),
SOC_SINGLE_EXT("HPHL Switch", WCD937X_HPH_L, 0, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("HPHR Switch", WCD937X_HPH_R, 0, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("LO Switch", WCD937X_LO, 0, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("ADC1 Switch", WCD937X_ADC1, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("ADC2 Switch", WCD937X_ADC2, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("ADC3 Switch", WCD937X_ADC3, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC0 Switch", WCD937X_DMIC0, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC1 Switch", WCD937X_DMIC1, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("MBHC Switch", WCD937X_MBHC, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC2 Switch", WCD937X_DMIC2, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC3 Switch", WCD937X_DMIC3, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC4 Switch", WCD937X_DMIC4, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
SOC_SINGLE_EXT("DMIC5 Switch", WCD937X_DMIC5, 1, 1, 0,
wcd937x_get_swr_port, wcd937x_set_swr_port),
};
static const struct snd_kcontrol_new adc1_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new adc2_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new adc3_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic1_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic2_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic3_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic4_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic5_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new dmic6_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new ear_rdac_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new aux_rdac_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new hphl_rdac_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const struct snd_kcontrol_new hphr_rdac_switch[] = {
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0)
};
static const char * const adc2_mux_text[] = {
"INP2", "INP3"
};
static const char * const rdac3_mux_text[] = {
"RX1", "RX3"
};
static const struct soc_enum adc2_enum =
SOC_ENUM_SINGLE(WCD937X_TX_NEW_TX_CH2_SEL, 7,
ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
static const struct soc_enum rdac3_enum =
SOC_ENUM_SINGLE(WCD937X_DIGITAL_CDC_EAR_PATH_CTL, 0,
ARRAY_SIZE(rdac3_mux_text), rdac3_mux_text);
static const struct snd_kcontrol_new tx_adc2_mux = SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
static const struct snd_kcontrol_new rx_rdac3_mux = SOC_DAPM_ENUM("RDAC3_MUX Mux", rdac3_enum);
static const struct snd_soc_dapm_widget wcd937x_dapm_widgets[] = {
/* Input widgets */
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_INPUT("AMIC3"),
SND_SOC_DAPM_INPUT("IN1_HPHL"),
SND_SOC_DAPM_INPUT("IN2_HPHR"),
SND_SOC_DAPM_INPUT("IN3_AUX"),
/* TX widgets */
SND_SOC_DAPM_ADC_E("ADC1", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_enable_adc,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("ADC2", NULL, SND_SOC_NOPM, 1, 0,
wcd937x_codec_enable_adc,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("ADC1 REQ", SND_SOC_NOPM, 0, 0,
NULL, 0, wcd937x_enable_req,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("ADC2 REQ", SND_SOC_NOPM, 0, 0,
NULL, 0, wcd937x_enable_req,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
/* TX mixers */
SND_SOC_DAPM_MIXER_E("ADC1_MIXER", SND_SOC_NOPM, 0, 0,
adc1_switch, ARRAY_SIZE(adc1_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("ADC2_MIXER", SND_SOC_NOPM, 1, 0,
adc2_switch, ARRAY_SIZE(adc2_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/* MIC_BIAS widgets */
SND_SOC_DAPM_SUPPLY("MIC BIAS1", SND_SOC_NOPM, MIC_BIAS_1, 0,
wcd937x_codec_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("MIC BIAS2", SND_SOC_NOPM, MIC_BIAS_2, 0,
wcd937x_codec_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("MIC BIAS3", SND_SOC_NOPM, MIC_BIAS_3, 0,
wcd937x_codec_enable_micbias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("VDD_BUCK", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("CLS_H_PORT", 1, SND_SOC_NOPM, 0, 0, NULL, 0),
/* RX widgets */
SND_SOC_DAPM_PGA_E("EAR PGA", WCD937X_ANA_EAR, 7, 0, NULL, 0,
wcd937x_codec_enable_ear_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("AUX PGA", WCD937X_AUX_AUXPA, 7, 0, NULL, 0,
wcd937x_codec_enable_aux_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("HPHL PGA", WCD937X_ANA_HPH, 7, 0, NULL, 0,
wcd937x_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("HPHR PGA", WCD937X_ANA_HPH, 6, 0, NULL, 0,
wcd937x_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RDAC1", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RDAC2", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RDAC3", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RDAC4", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_aux_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("RDAC3_MUX", SND_SOC_NOPM, 0, 0, &rx_rdac3_mux),
SND_SOC_DAPM_MIXER_E("RX1", SND_SOC_NOPM, 0, 0, NULL, 0,
wcd937x_enable_rx1, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("RX2", SND_SOC_NOPM, 0, 0, NULL, 0,
wcd937x_enable_rx2, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("RX3", SND_SOC_NOPM, 0, 0, NULL, 0,
wcd937x_enable_rx3, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
/* RX mixer widgets*/
SND_SOC_DAPM_MIXER("EAR_RDAC", SND_SOC_NOPM, 0, 0,
ear_rdac_switch, ARRAY_SIZE(ear_rdac_switch)),
SND_SOC_DAPM_MIXER("AUX_RDAC", SND_SOC_NOPM, 0, 0,
aux_rdac_switch, ARRAY_SIZE(aux_rdac_switch)),
SND_SOC_DAPM_MIXER("HPHL_RDAC", SND_SOC_NOPM, 0, 0,
hphl_rdac_switch, ARRAY_SIZE(hphl_rdac_switch)),
SND_SOC_DAPM_MIXER("HPHR_RDAC", SND_SOC_NOPM, 0, 0,
hphr_rdac_switch, ARRAY_SIZE(hphr_rdac_switch)),
/* TX output widgets */
SND_SOC_DAPM_OUTPUT("ADC1_OUTPUT"),
SND_SOC_DAPM_OUTPUT("ADC2_OUTPUT"),
SND_SOC_DAPM_OUTPUT("ADC3_OUTPUT"),
SND_SOC_DAPM_OUTPUT("WCD_TX_OUTPUT"),
/* RX output widgets */
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("AUX"),
SND_SOC_DAPM_OUTPUT("HPHL"),
SND_SOC_DAPM_OUTPUT("HPHR"),
/* MIC_BIAS pull up widgets */
SND_SOC_DAPM_SUPPLY("VA MIC BIAS1", SND_SOC_NOPM, MIC_BIAS_1, 0,
wcd937x_codec_enable_micbias_pullup,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("VA MIC BIAS2", SND_SOC_NOPM, MIC_BIAS_2, 0,
wcd937x_codec_enable_micbias_pullup,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("VA MIC BIAS3", SND_SOC_NOPM, MIC_BIAS_3, 0,
wcd937x_codec_enable_micbias_pullup,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_widget wcd9375_dapm_widgets[] = {
/* Input widgets */
SND_SOC_DAPM_INPUT("AMIC4"),
/* TX widgets */
SND_SOC_DAPM_ADC_E("ADC3", NULL, SND_SOC_NOPM, 2, 0,
wcd937x_codec_enable_adc,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("ADC3 REQ", SND_SOC_NOPM, 0, 0,
NULL, 0, wcd937x_enable_req,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 1, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 2, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 3, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 4, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_ADC_E("DMIC6", NULL, SND_SOC_NOPM, 5, 0,
wcd937x_codec_enable_dmic,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* TX mixer widgets */
SND_SOC_DAPM_MIXER_E("DMIC1_MIXER", SND_SOC_NOPM, 0,
0, dmic1_switch, ARRAY_SIZE(dmic1_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DMIC2_MIXER", SND_SOC_NOPM, 1,
0, dmic2_switch, ARRAY_SIZE(dmic2_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DMIC3_MIXER", SND_SOC_NOPM, 2,
0, dmic3_switch, ARRAY_SIZE(dmic3_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DMIC4_MIXER", SND_SOC_NOPM, 3,
0, dmic4_switch, ARRAY_SIZE(dmic4_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DMIC5_MIXER", SND_SOC_NOPM, 4,
0, dmic5_switch, ARRAY_SIZE(dmic5_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("DMIC6_MIXER", SND_SOC_NOPM, 5,
0, dmic6_switch, ARRAY_SIZE(dmic6_switch),
wcd937x_tx_swr_ctrl, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER_E("ADC3_MIXER", SND_SOC_NOPM, 2, 0, adc3_switch,
ARRAY_SIZE(adc3_switch), wcd937x_tx_swr_ctrl,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* Output widgets */
SND_SOC_DAPM_OUTPUT("DMIC1_OUTPUT"),
SND_SOC_DAPM_OUTPUT("DMIC2_OUTPUT"),
SND_SOC_DAPM_OUTPUT("DMIC3_OUTPUT"),
SND_SOC_DAPM_OUTPUT("DMIC4_OUTPUT"),
SND_SOC_DAPM_OUTPUT("DMIC5_OUTPUT"),
SND_SOC_DAPM_OUTPUT("DMIC6_OUTPUT"),
};
static const struct snd_soc_dapm_route wcd937x_audio_map[] = {
{ "ADC1_OUTPUT", NULL, "ADC1_MIXER" },
{ "ADC1_MIXER", "Switch", "ADC1 REQ" },
{ "ADC1 REQ", NULL, "ADC1" },
{ "ADC1", NULL, "AMIC1" },
{ "ADC2_OUTPUT", NULL, "ADC2_MIXER" },
{ "ADC2_MIXER", "Switch", "ADC2 REQ" },
{ "ADC2 REQ", NULL, "ADC2" },
{ "ADC2", NULL, "ADC2 MUX" },
{ "ADC2 MUX", "INP3", "AMIC3" },
{ "ADC2 MUX", "INP2", "AMIC2" },
{ "IN1_HPHL", NULL, "VDD_BUCK" },
{ "IN1_HPHL", NULL, "CLS_H_PORT" },
{ "RX1", NULL, "IN1_HPHL" },
{ "RDAC1", NULL, "RX1" },
{ "HPHL_RDAC", "Switch", "RDAC1" },
{ "HPHL PGA", NULL, "HPHL_RDAC" },
{ "HPHL", NULL, "HPHL PGA" },
{ "IN2_HPHR", NULL, "VDD_BUCK" },
{ "IN2_HPHR", NULL, "CLS_H_PORT" },
{ "RX2", NULL, "IN2_HPHR" },
{ "RDAC2", NULL, "RX2" },
{ "HPHR_RDAC", "Switch", "RDAC2" },
{ "HPHR PGA", NULL, "HPHR_RDAC" },
{ "HPHR", NULL, "HPHR PGA" },
{ "IN3_AUX", NULL, "VDD_BUCK" },
{ "IN3_AUX", NULL, "CLS_H_PORT" },
{ "RX3", NULL, "IN3_AUX" },
{ "RDAC4", NULL, "RX3" },
{ "AUX_RDAC", "Switch", "RDAC4" },
{ "AUX PGA", NULL, "AUX_RDAC" },
{ "AUX", NULL, "AUX PGA" },
{ "RDAC3_MUX", "RX3", "RX3" },
{ "RDAC3_MUX", "RX1", "RX1" },
{ "RDAC3", NULL, "RDAC3_MUX" },
{ "EAR_RDAC", "Switch", "RDAC3" },
{ "EAR PGA", NULL, "EAR_RDAC" },
{ "EAR", NULL, "EAR PGA" },
};
static const struct snd_soc_dapm_route wcd9375_audio_map[] = {
{ "ADC3_OUTPUT", NULL, "ADC3_MIXER" },
{ "ADC3_OUTPUT", NULL, "ADC3_MIXER" },
{ "ADC3_MIXER", "Switch", "ADC3 REQ" },
{ "ADC3 REQ", NULL, "ADC3" },
{ "ADC3", NULL, "AMIC4" },
{ "DMIC1_OUTPUT", NULL, "DMIC1_MIXER" },
{ "DMIC1_MIXER", "Switch", "DMIC1" },
{ "DMIC2_OUTPUT", NULL, "DMIC2_MIXER" },
{ "DMIC2_MIXER", "Switch", "DMIC2" },
{ "DMIC3_OUTPUT", NULL, "DMIC3_MIXER" },
{ "DMIC3_MIXER", "Switch", "DMIC3" },
{ "DMIC4_OUTPUT", NULL, "DMIC4_MIXER" },
{ "DMIC4_MIXER", "Switch", "DMIC4" },
{ "DMIC5_OUTPUT", NULL, "DMIC5_MIXER" },
{ "DMIC5_MIXER", "Switch", "DMIC5" },
{ "DMIC6_OUTPUT", NULL, "DMIC6_MIXER" },
{ "DMIC6_MIXER", "Switch", "DMIC6" },
};
static int wcd937x_set_micbias_data(struct wcd937x_priv *wcd937x)
{
int vout_ctl[3];
/* Set micbias voltage */
vout_ctl[0] = wcd937x_get_micb_vout_ctl_val(wcd937x->micb1_mv);
vout_ctl[1] = wcd937x_get_micb_vout_ctl_val(wcd937x->micb2_mv);
vout_ctl[2] = wcd937x_get_micb_vout_ctl_val(wcd937x->micb3_mv);
if ((vout_ctl[0] | vout_ctl[1] | vout_ctl[2]) < 0)
return -EINVAL;
regmap_update_bits(wcd937x->regmap, WCD937X_ANA_MICB1, WCD937X_ANA_MICB_VOUT, vout_ctl[0]);
regmap_update_bits(wcd937x->regmap, WCD937X_ANA_MICB2, WCD937X_ANA_MICB_VOUT, vout_ctl[1]);
regmap_update_bits(wcd937x->regmap, WCD937X_ANA_MICB3, WCD937X_ANA_MICB_VOUT, vout_ctl[2]);
return 0;
}
static irqreturn_t wcd937x_wd_handle_irq(int irq, void *data)
{
return IRQ_HANDLED;
}
static const struct irq_chip wcd_irq_chip = {
.name = "WCD937x",
};
static int wcd_irq_chip_map(struct irq_domain *irqd, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &wcd_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
irq_set_noprobe(virq);
return 0;
}
static const struct irq_domain_ops wcd_domain_ops = {
.map = wcd_irq_chip_map,
};
static int wcd937x_irq_init(struct wcd937x_priv *wcd, struct device *dev)
{
wcd->virq = irq_domain_add_linear(NULL, 1, &wcd_domain_ops, NULL);
if (!(wcd->virq)) {
dev_err(dev, "%s: Failed to add IRQ domain\n", __func__);
return -EINVAL;
}
return devm_regmap_add_irq_chip(dev, wcd->regmap,
irq_create_mapping(wcd->virq, 0),
IRQF_ONESHOT, 0, &wcd937x_regmap_irq_chip,
&wcd->irq_chip);
}
static int wcd937x_soc_codec_probe(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
struct sdw_slave *tx_sdw_dev = wcd937x->tx_sdw_dev;
struct device *dev = component->dev;
unsigned long time_left;
int i, ret;
u32 chipid;
time_left = wait_for_completion_timeout(&tx_sdw_dev->initialization_complete,
msecs_to_jiffies(5000));
if (!time_left) {
dev_err(dev, "soundwire device init timeout\n");
return -ETIMEDOUT;
}
snd_soc_component_init_regmap(component, wcd937x->regmap);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
chipid = (snd_soc_component_read(component,
WCD937X_DIGITAL_EFUSE_REG_0) & 0x1e) >> 1;
if (chipid != CHIPID_WCD9370 && chipid != CHIPID_WCD9375) {
dev_err(dev, "Got unknown chip id: 0x%x\n", chipid);
pm_runtime_put(dev);
return -EINVAL;
}
wcd937x->clsh_info = wcd_clsh_ctrl_alloc(component, WCD937X);
if (IS_ERR(wcd937x->clsh_info)) {
pm_runtime_put(dev);
return PTR_ERR(wcd937x->clsh_info);
}
wcd937x_io_init(wcd937x->regmap);
/* Set all interrupts as edge triggered */
for (i = 0; i < wcd937x_regmap_irq_chip.num_regs; i++)
regmap_write(wcd937x->regmap, (WCD937X_DIGITAL_INTR_LEVEL_0 + i), 0);
pm_runtime_put(dev);
wcd937x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_HPHR_PDM_WD_INT);
wcd937x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_HPHL_PDM_WD_INT);
wcd937x->aux_pdm_wd_int = regmap_irq_get_virq(wcd937x->irq_chip,
WCD937X_IRQ_AUX_PDM_WD_INT);
/* Request for watchdog interrupt */
ret = devm_request_threaded_irq(dev, wcd937x->hphr_pdm_wd_int, NULL, wcd937x_wd_handle_irq,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
"HPHR PDM WDOG INT", wcd937x);
if (ret)
dev_err(dev, "Failed to request HPHR watchdog interrupt (%d)\n", ret);
ret = devm_request_threaded_irq(dev, wcd937x->hphl_pdm_wd_int, NULL, wcd937x_wd_handle_irq,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
"HPHL PDM WDOG INT", wcd937x);
if (ret)
dev_err(dev, "Failed to request HPHL watchdog interrupt (%d)\n", ret);
ret = devm_request_threaded_irq(dev, wcd937x->aux_pdm_wd_int, NULL, wcd937x_wd_handle_irq,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
"AUX PDM WDOG INT", wcd937x);
if (ret)
dev_err(dev, "Failed to request Aux watchdog interrupt (%d)\n", ret);
/* Disable watchdog interrupt for HPH and AUX */
disable_irq_nosync(wcd937x->hphr_pdm_wd_int);
disable_irq_nosync(wcd937x->hphl_pdm_wd_int);
disable_irq_nosync(wcd937x->aux_pdm_wd_int);
if (chipid == CHIPID_WCD9375) {
ret = snd_soc_dapm_new_controls(dapm, wcd9375_dapm_widgets,
ARRAY_SIZE(wcd9375_dapm_widgets));
if (ret < 0) {
dev_err(component->dev, "Failed to add snd_ctls\n");
return ret;
}
ret = snd_soc_dapm_add_routes(dapm, wcd9375_audio_map,
ARRAY_SIZE(wcd9375_audio_map));
if (ret < 0) {
dev_err(component->dev, "Failed to add routes\n");
return ret;
}
}
ret = wcd937x_mbhc_init(component);
if (ret)
dev_err(component->dev, "mbhc initialization failed\n");
return ret;
}
static void wcd937x_soc_codec_remove(struct snd_soc_component *component)
{
struct wcd937x_priv *wcd937x = snd_soc_component_get_drvdata(component);
wcd937x_mbhc_deinit(component);
free_irq(wcd937x->aux_pdm_wd_int, wcd937x);
free_irq(wcd937x->hphl_pdm_wd_int, wcd937x);
free_irq(wcd937x->hphr_pdm_wd_int, wcd937x);
wcd_clsh_ctrl_free(wcd937x->clsh_info);
}
static int wcd937x_codec_set_jack(struct snd_soc_component *comp,
struct snd_soc_jack *jack, void *data)
{
struct wcd937x_priv *wcd = dev_get_drvdata(comp->dev);
int ret = 0;
if (jack)
ret = wcd_mbhc_start(wcd->wcd_mbhc, &wcd->mbhc_cfg, jack);
else
wcd_mbhc_stop(wcd->wcd_mbhc);
return ret;
}
static const struct snd_soc_component_driver soc_codec_dev_wcd937x = {
.name = "wcd937x_codec",
.probe = wcd937x_soc_codec_probe,
.remove = wcd937x_soc_codec_remove,
.controls = wcd937x_snd_controls,
.num_controls = ARRAY_SIZE(wcd937x_snd_controls),
.dapm_widgets = wcd937x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wcd937x_dapm_widgets),
.dapm_routes = wcd937x_audio_map,
.num_dapm_routes = ARRAY_SIZE(wcd937x_audio_map),
.set_jack = wcd937x_codec_set_jack,
.endianness = 1,
};
static void wcd937x_dt_parse_micbias_info(struct device *dev, struct wcd937x_priv *wcd)
{
struct device_node *np = dev->of_node;
u32 prop_val = 0;
int ret = 0;
ret = of_property_read_u32(np, "qcom,micbias1-microvolt", &prop_val);
if (!ret)
wcd->micb1_mv = prop_val / 1000;
else
dev_warn(dev, "Micbias1 DT property not found\n");
ret = of_property_read_u32(np, "qcom,micbias2-microvolt", &prop_val);
if (!ret)
wcd->micb2_mv = prop_val / 1000;
else
dev_warn(dev, "Micbias2 DT property not found\n");
ret = of_property_read_u32(np, "qcom,micbias3-microvolt", &prop_val);
if (!ret)
wcd->micb3_mv = prop_val / 1000;
else
dev_warn(dev, "Micbias3 DT property not found\n");
}
static bool wcd937x_swap_gnd_mic(struct snd_soc_component *component, bool active)
{
int value;
struct wcd937x_priv *wcd937x;
wcd937x = snd_soc_component_get_drvdata(component);
value = gpiod_get_value(wcd937x->us_euro_gpio);
gpiod_set_value(wcd937x->us_euro_gpio, !value);
return true;
}
static int wcd937x_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct wcd937x_priv *wcd937x = dev_get_drvdata(dai->dev);
struct wcd937x_sdw_priv *wcd = wcd937x->sdw_priv[dai->id];
return wcd937x_sdw_hw_params(wcd, substream, params, dai);
}
static int wcd937x_codec_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct wcd937x_priv *wcd937x = dev_get_drvdata(dai->dev);
struct wcd937x_sdw_priv *wcd = wcd937x->sdw_priv[dai->id];
return sdw_stream_remove_slave(wcd->sdev, wcd->sruntime);
}
static int wcd937x_codec_set_sdw_stream(struct snd_soc_dai *dai,
void *stream, int direction)
{
struct wcd937x_priv *wcd937x = dev_get_drvdata(dai->dev);
struct wcd937x_sdw_priv *wcd = wcd937x->sdw_priv[dai->id];
wcd->sruntime = stream;
return 0;
}
static const struct snd_soc_dai_ops wcd937x_sdw_dai_ops = {
.hw_params = wcd937x_codec_hw_params,
.hw_free = wcd937x_codec_free,
.set_stream = wcd937x_codec_set_sdw_stream,
};
static struct snd_soc_dai_driver wcd937x_dais[] = {
[0] = {
.name = "wcd937x-sdw-rx",
.playback = {
.stream_name = "WCD AIF Playback",
.rates = WCD937X_RATES | WCD937X_FRAC_RATES,
.formats = WCD937X_FORMATS,
.rate_min = 8000,
.rate_max = 384000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &wcd937x_sdw_dai_ops,
},
[1] = {
.name = "wcd937x-sdw-tx",
.capture = {
.stream_name = "WCD AIF Capture",
.rates = WCD937X_RATES,
.formats = WCD937X_FORMATS,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 4,
},
.ops = &wcd937x_sdw_dai_ops,
},
};
static int wcd937x_bind(struct device *dev)
{
struct wcd937x_priv *wcd937x = dev_get_drvdata(dev);
int ret;
/* Give the SDW subdevices some more time to settle */
usleep_range(5000, 5010);
ret = component_bind_all(dev, wcd937x);
if (ret) {
dev_err(dev, "Slave bind failed, ret = %d\n", ret);
return ret;
}
wcd937x->rxdev = wcd937x_sdw_device_get(wcd937x->rxnode);
if (!wcd937x->rxdev) {
dev_err(dev, "could not find slave with matching of node\n");
return -EINVAL;
}
wcd937x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd937x->rxdev);
wcd937x->sdw_priv[AIF1_PB]->wcd937x = wcd937x;
wcd937x->txdev = wcd937x_sdw_device_get(wcd937x->txnode);
if (!wcd937x->txdev) {
dev_err(dev, "could not find txslave with matching of node\n");
return -EINVAL;
}
wcd937x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd937x->txdev);
wcd937x->sdw_priv[AIF1_CAP]->wcd937x = wcd937x;
wcd937x->tx_sdw_dev = dev_to_sdw_dev(wcd937x->txdev);
if (!wcd937x->tx_sdw_dev) {
dev_err(dev, "could not get txslave with matching of dev\n");
return -EINVAL;
}
/*
* As TX is the main CSR reg interface, which should not be suspended first.
* expicilty add the dependency link
*/
if (!device_link_add(wcd937x->rxdev, wcd937x->txdev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "Could not devlink TX and RX\n");
return -EINVAL;
}
if (!device_link_add(dev, wcd937x->txdev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "Could not devlink WCD and TX\n");
return -EINVAL;
}
if (!device_link_add(dev, wcd937x->rxdev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "Could not devlink WCD and RX\n");
return -EINVAL;
}
wcd937x->regmap = dev_get_regmap(&wcd937x->tx_sdw_dev->dev, NULL);
if (!wcd937x->regmap) {
dev_err(dev, "could not get TX device regmap\n");
return -EINVAL;
}
ret = wcd937x_irq_init(wcd937x, dev);
if (ret) {
dev_err(dev, "IRQ init failed: %d\n", ret);
return ret;
}
wcd937x->sdw_priv[AIF1_PB]->slave_irq = wcd937x->virq;
wcd937x->sdw_priv[AIF1_CAP]->slave_irq = wcd937x->virq;
ret = wcd937x_set_micbias_data(wcd937x);
if (ret < 0) {
dev_err(dev, "Bad micbias pdata\n");
return ret;
}
ret = snd_soc_register_component(dev, &soc_codec_dev_wcd937x,
wcd937x_dais, ARRAY_SIZE(wcd937x_dais));
if (ret)
dev_err(dev, "Codec registration failed\n");
return ret;
}
static void wcd937x_unbind(struct device *dev)
{
struct wcd937x_priv *wcd937x = dev_get_drvdata(dev);
snd_soc_unregister_component(dev);
device_link_remove(dev, wcd937x->txdev);
device_link_remove(dev, wcd937x->rxdev);
device_link_remove(wcd937x->rxdev, wcd937x->txdev);
component_unbind_all(dev, wcd937x);
mutex_destroy(&wcd937x->micb_lock);
}
static const struct component_master_ops wcd937x_comp_ops = {
.bind = wcd937x_bind,
.unbind = wcd937x_unbind,
};
static int wcd937x_add_slave_components(struct wcd937x_priv *wcd937x,
struct device *dev,
struct component_match **matchptr)
{
struct device_node *np = dev->of_node;
wcd937x->rxnode = of_parse_phandle(np, "qcom,rx-device", 0);
if (!wcd937x->rxnode) {
dev_err(dev, "Couldn't parse phandle to qcom,rx-device!\n");
return -ENODEV;
}
of_node_get(wcd937x->rxnode);
component_match_add_release(dev, matchptr, component_release_of,
component_compare_of, wcd937x->rxnode);
wcd937x->txnode = of_parse_phandle(np, "qcom,tx-device", 0);
if (!wcd937x->txnode) {
dev_err(dev, "Couldn't parse phandle to qcom,tx-device\n");
return -ENODEV;
}
of_node_get(wcd937x->txnode);
component_match_add_release(dev, matchptr, component_release_of,
component_compare_of, wcd937x->txnode);
return 0;
}
static int wcd937x_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
struct device *dev = &pdev->dev;
struct wcd937x_priv *wcd937x;
struct wcd_mbhc_config *cfg;
int ret;
wcd937x = devm_kzalloc(dev, sizeof(*wcd937x), GFP_KERNEL);
if (!wcd937x)
return -ENOMEM;
dev_set_drvdata(dev, wcd937x);
mutex_init(&wcd937x->micb_lock);
wcd937x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(wcd937x->reset_gpio))
return dev_err_probe(dev, PTR_ERR(wcd937x->reset_gpio),
"failed to reset wcd gpio\n");
wcd937x->us_euro_gpio = devm_gpiod_get_optional(dev, "us-euro", GPIOD_OUT_LOW);
if (IS_ERR(wcd937x->us_euro_gpio))
return dev_err_probe(dev, PTR_ERR(wcd937x->us_euro_gpio),
"us-euro swap Control GPIO not found\n");
cfg = &wcd937x->mbhc_cfg;
cfg->swap_gnd_mic = wcd937x_swap_gnd_mic;
wcd937x->supplies[0].supply = "vdd-rxtx";
wcd937x->supplies[1].supply = "vdd-px";
wcd937x->supplies[2].supply = "vdd-mic-bias";
wcd937x->supplies[3].supply = "vdd-buck";
ret = devm_regulator_bulk_get(dev, WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
if (ret)
return dev_err_probe(dev, ret, "Failed to get supplies\n");
ret = regulator_bulk_enable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
if (ret) {
regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
return dev_err_probe(dev, ret, "Failed to enable supplies\n");
}
wcd937x_dt_parse_micbias_info(dev, wcd937x);
cfg->mbhc_micbias = MIC_BIAS_2;
cfg->anc_micbias = MIC_BIAS_2;
cfg->v_hs_max = WCD_MBHC_HS_V_MAX;
cfg->num_btn = WCD937X_MBHC_MAX_BUTTONS;
cfg->micb_mv = wcd937x->micb2_mv;
cfg->linein_th = 5000;
cfg->hs_thr = 1700;
cfg->hph_thr = 50;
wcd_dt_parse_mbhc_data(dev, &wcd937x->mbhc_cfg);
ret = wcd937x_add_slave_components(wcd937x, dev, &match);
if (ret)
goto err_disable_regulators;
wcd937x_reset(wcd937x);
ret = component_master_add_with_match(dev, &wcd937x_comp_ops, match);
if (ret)
goto err_disable_regulators;
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_idle(dev);
return 0;
err_disable_regulators:
regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
return ret;
}
static void wcd937x_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct wcd937x_priv *wcd937x = dev_get_drvdata(dev);
component_master_del(&pdev->dev, &wcd937x_comp_ops);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_dont_use_autosuspend(dev);
regulator_bulk_disable(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
regulator_bulk_free(WCD937X_MAX_BULK_SUPPLY, wcd937x->supplies);
}
#if defined(CONFIG_OF)
static const struct of_device_id wcd937x_of_match[] = {
{ .compatible = "qcom,wcd9370-codec" },
{ .compatible = "qcom,wcd9375-codec" },
{ }
};
MODULE_DEVICE_TABLE(of, wcd937x_of_match);
#endif
static struct platform_driver wcd937x_codec_driver = {
.probe = wcd937x_probe,
.remove = wcd937x_remove,
.driver = {
.name = "wcd937x_codec",
.of_match_table = of_match_ptr(wcd937x_of_match),
.suppress_bind_attrs = true,
},
};
module_platform_driver(wcd937x_codec_driver);
MODULE_DESCRIPTION("WCD937X Codec driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* PS3 OHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <asm/firmware.h>
#include <asm/ps3.h>
static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci->flags |= OHCI_QUIRK_BE_MMIO;
ohci_hcd_init(ohci);
return ohci_init(ohci);
}
static int ps3_ohci_hc_start(struct usb_hcd *hcd)
{
int result;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
/* Handle root hub init quirk in spider south bridge. */
/* Also set PwrOn2PwrGood to 0x7f (254ms). */
ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM,
&ohci->regs->roothub.a);
ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b);
result = ohci_run(ohci);
if (result < 0) {
dev_err(hcd->self.controller, "can't start %s\n",
hcd->self.bus_name);
ohci_stop(hcd);
}
return result;
}
static const struct hc_driver ps3_ohci_hc_driver = {
.description = hcd_name,
.product_desc = "PS3 OHCI Host Controller",
.hcd_priv_size = sizeof(struct ohci_hcd),
.irq = ohci_irq,
.flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
.reset = ps3_ohci_hc_reset,
.start = ps3_ohci_hc_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
.start_port_reset = ohci_start_port_reset,
#if defined(CONFIG_PM)
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
};
static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
unsigned int virq;
static u64 dummy_mask;
if (usb_disabled()) {
result = -ENODEV;
goto fail_start;
}
result = ps3_open_hv_device(dev);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_open;
}
result = ps3_dma_region_create(dev->d_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
"(%d)\n", __func__, __LINE__, result);
BUG_ON("check region type");
goto fail_dma_region;
}
result = ps3_mmio_region_create(dev->m_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
__func__, __LINE__);
result = -EPERM;
goto fail_mmio_region;
}
dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
__LINE__, dev->m_region->lpar_addr);
result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
__func__, __LINE__, virq);
result = -EPERM;
goto fail_irq;
}
dummy_mask = DMA_BIT_MASK(32);
dev->core.dma_mask = &dummy_mask;
dma_set_coherent_mask(&dev->core, dummy_mask);
hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core));
if (!hcd) {
dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
__LINE__);
result = -ENOMEM;
goto fail_create_hcd;
}
hcd->rsrc_start = dev->m_region->lpar_addr;
hcd->rsrc_len = dev->m_region->len;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
__func__, __LINE__);
hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
if (!hcd->regs) {
dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
__LINE__);
result = -EPERM;
goto fail_ioremap;
}
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_start);
dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
(unsigned long)hcd->rsrc_len);
dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
(unsigned long)hcd->regs);
dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
(unsigned long)virq);
ps3_system_bus_set_drvdata(dev, hcd);
result = usb_add_hcd(hcd, virq, 0);
if (result) {
dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
__func__, __LINE__, result);
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
return result;
fail_add_hcd:
iounmap(hcd->regs);
fail_ioremap:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
fail_create_hcd:
ps3_io_irq_destroy(virq);
fail_irq:
ps3_free_mmio_region(dev->m_region);
fail_mmio_region:
ps3_dma_region_free(dev->d_region);
fail_dma_region:
ps3_close_hv_device(dev);
fail_open:
fail_start:
return result;
}
static void ps3_ohci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
BUG_ON(!hcd);
dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
tmp = hcd->irq;
ohci_shutdown(hcd);
usb_remove_hcd(hcd);
ps3_system_bus_set_drvdata(dev, NULL);
BUG_ON(!hcd->regs);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
ps3_io_irq_destroy(tmp);
ps3_free_mmio_region(dev->m_region);
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
}
static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
{
return firmware_has_feature(FW_FEATURE_PS3_LV1)
? ps3_system_bus_driver_register(drv)
: 0;
}
static void ps3_ohci_driver_unregister(struct ps3_system_bus_driver *drv)
{
if (firmware_has_feature(FW_FEATURE_PS3_LV1))
ps3_system_bus_driver_unregister(drv);
}
MODULE_ALIAS(PS3_MODULE_ALIAS_OHCI);
static struct ps3_system_bus_driver ps3_ohci_driver = {
.core.name = "ps3-ohci-driver",
.core.owner = THIS_MODULE,
.match_id = PS3_MATCH_ID_OHCI,
.probe = ps3_ohci_probe,
.remove = ps3_ohci_remove,
.shutdown = ps3_ohci_remove,
};
|
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2021 Christian Hewitt <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
//
// Keytable for the Mecool Kiii Pro remote control
//
static struct rc_map_table mecool_kiii_pro[] = {
{ 0x59, KEY_POWER },
{ 0x52, KEY_1 },
{ 0x50, KEY_2 },
{ 0x10, KEY_3 },
{ 0x56, KEY_4 },
{ 0x54, KEY_5 },
{ 0x14, KEY_6 },
{ 0x4e, KEY_7 },
{ 0x4c, KEY_8 },
{ 0x0c, KEY_9 },
{ 0x02, KEY_INFO },
{ 0x0f, KEY_0 },
{ 0x51, KEY_DELETE },
{ 0x1f, KEY_FAVORITES},
{ 0x09, KEY_SUBTITLE },
{ 0x01, KEY_LANGUAGE }, // AUDIO
{ 0x42, KEY_RED },
{ 0x40, KEY_GREEN },
{ 0x00, KEY_YELLOW},
{ 0x03, KEY_BLUE }, // RADIO
{ 0x0d, KEY_HOME },
{ 0x4d, KEY_EPG },
{ 0x45, KEY_MENU },
{ 0x05, KEY_EXIT },
{ 0x5a, KEY_LEFT },
{ 0x1b, KEY_RIGHT },
{ 0x06, KEY_UP },
{ 0x16, KEY_DOWN },
{ 0x1a, KEY_OK },
{ 0x13, KEY_VOLUMEUP },
{ 0x17, KEY_VOLUMEDOWN },
{ 0x19, KEY_MUTE },
{ 0x12, KEY_CONTEXT_MENU }, // MOUSE
{ 0x55, KEY_CHANNELUP }, // PAGE_UP
{ 0x15, KEY_CHANNELDOWN }, // PAGE_DOWN
{ 0x4a, KEY_REWIND },
{ 0x48, KEY_FORWARD },
{ 0x46, KEY_PLAYPAUSE },
{ 0x44, KEY_STOP },
{ 0x08, KEY_PREVIOUSSONG},
{ 0x0b, KEY_NEXTSONG},
{ 0x04, KEY_PVR },
{ 0x64, KEY_RECORD },
};
static struct rc_map_list mecool_kiii_pro_map = {
.map = {
.scan = mecool_kiii_pro,
.size = ARRAY_SIZE(mecool_kiii_pro),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_MECOOL_KIII_PRO,
}
};
static int __init init_rc_map_mecool_kiii_pro(void)
{
return rc_map_register(&mecool_kiii_pro_map);
}
static void __exit exit_rc_map_mecool_kiii_pro(void)
{
rc_map_unregister(&mecool_kiii_pro_map);
}
module_init(init_rc_map_mecool_kiii_pro)
module_exit(exit_rc_map_mecool_kiii_pro)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <[email protected]");
MODULE_DESCRIPTION("Mecool Kiii Pro remote controller keytable");
|
/*
* Copyright (c) 2016 Hisilicon Limited.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
struct hns_roce_buf_list *trunks;
u32 i;
if (!buf)
return;
trunks = buf->trunk_list;
if (trunks) {
buf->trunk_list = NULL;
for (i = 0; i < buf->ntrunks; i++)
dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
trunks[i].buf, trunks[i].map);
kfree(trunks);
}
kfree(buf);
}
/*
* Allocate the dma buffer for storing ROCEE table entries
*
* @size: required size
* @page_shift: the unit size in a continuous dma address range
* @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
*/
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
u32 page_shift, u32 flags)
{
u32 trunk_size, page_size, alloced_size;
struct hns_roce_buf_list *trunks;
struct hns_roce_buf *buf;
gfp_t gfp_flags;
u32 ntrunk, i;
/* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
return ERR_PTR(-EINVAL);
gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
buf = kzalloc(sizeof(*buf), gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
buf->page_shift = page_shift;
page_size = 1 << buf->page_shift;
/* Calc the trunk size and num by required size and page_shift */
if (flags & HNS_ROCE_BUF_DIRECT) {
buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
ntrunk = 1;
} else {
buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
}
trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
if (!trunks) {
kfree(buf);
return ERR_PTR(-ENOMEM);
}
trunk_size = 1 << buf->trunk_shift;
alloced_size = 0;
for (i = 0; i < ntrunk; i++) {
trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
&trunks[i].map, gfp_flags);
if (!trunks[i].buf)
break;
alloced_size += trunk_size;
}
buf->ntrunks = i;
/* In nofail mode, it's only failed when the alloced size is 0 */
if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
for (i = 0; i < buf->ntrunks; i++)
dma_free_coherent(hr_dev->dev, trunk_size,
trunks[i].buf, trunks[i].map);
kfree(trunks);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
buf->npages = DIV_ROUND_UP(alloced_size, page_size);
buf->trunk_list = trunks;
return buf;
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, struct hns_roce_buf *buf,
unsigned int page_shift)
{
unsigned int offset, max_size;
int total = 0;
int i;
if (page_shift > buf->trunk_shift) {
dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
page_shift, buf->trunk_shift);
return -EINVAL;
}
offset = 0;
max_size = buf->ntrunks << buf->trunk_shift;
for (i = 0; i < buf_cnt && offset < max_size; i++) {
bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
offset += (1 << page_shift);
}
return total;
}
int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
/* convert system page cnt to hw page cnt */
rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
bufs[total++] = rdma_block_iter_dma_address(&biter);
if (total >= buf_cnt)
goto done;
}
done:
return total;
}
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ida_destroy(&hr_dev->xrcd_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
ida_destroy(&hr_dev->pd_ida.ida);
ida_destroy(&hr_dev->uar_ida.ida);
}
|
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
#include "hal.h"
#include "hal_tx.h"
#include "hal_rx.h"
#include "hal_desc.h"
#include "hif.h"
static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
u8 owner, u8 buffer_type, u32 magic)
{
hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
/* Magic pattern in reserved bits for debugging */
hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
}
static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_get_queue_stats *desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->queue_addr_lo = cmd->addr_lo;
desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
cmd->addr_hi);
if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_flush_cache *desc;
u8 avail_slot = ffz(hal->avail_blk_resource);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
return -ENOSPC;
hal->current_blk_index = avail_slot;
}
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->cache_addr_lo = cmd->addr_lo;
desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
cmd->addr_hi);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
desc->info0 |=
FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
avail_slot);
}
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_reo_update_rx_queue *desc;
tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
desc->queue_addr_lo = cmd->addr_lo;
desc->info0 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
cmd->addr_hi) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
desc->info1 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
cmd->rx_queue_num) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
if (cmd->pn_size == 24)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
else if (cmd->pn_size == 48)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
else if (cmd->pn_size == 128)
cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
if (cmd->ba_window_size < 1)
cmd->ba_window_size = 1;
if (cmd->ba_window_size == 1)
cmd->ba_window_size++;
desc->info2 =
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
cmd->ba_window_size - 1) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
}
int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
enum hal_reo_cmd_type type,
struct ath11k_hal_reo_cmd *cmd)
{
struct hal_tlv_hdr *reo_desc;
int ret;
spin_lock_bh(&srng->lock);
ath11k_hal_srng_access_begin(ab, srng);
reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
if (!reo_desc) {
ret = -ENOBUFS;
goto out;
}
switch (type) {
case HAL_REO_CMD_GET_QUEUE_STATS:
ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_CACHE:
ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
break;
case HAL_REO_CMD_UPDATE_RX_QUEUE:
ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
break;
case HAL_REO_CMD_FLUSH_QUEUE:
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
ret = -EOPNOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
ret = -EINVAL;
break;
}
ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
out:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return ret;
}
void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
u32 cookie, u8 manager)
{
struct ath11k_buffer_addr *binfo = desc;
u32 paddr_lo, paddr_hi;
paddr_lo = lower_32_bits(paddr);
paddr_hi = upper_32_bits(paddr);
binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
}
void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
u32 *cookie, u8 *rbm)
{
struct ath11k_buffer_addr *binfo = desc;
*paddr =
(((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
*cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
}
void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm)
{
struct hal_rx_msdu_link *link = link_desc;
struct hal_rx_msdu_details *msdu;
int i;
*num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
msdu = &link->msdu_link[0];
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
msdu->buf_addr_info.info1);
for (i = 0; i < *num_msdus; i++) {
msdu = &link->msdu_link[i];
if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
msdu->buf_addr_info.info0)) {
*num_msdus = i;
break;
}
*msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
msdu->buf_addr_info.info1);
msdu_cookies++;
}
}
int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
dma_addr_t *paddr, u32 *desc_bank)
{
struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
enum hal_reo_dest_ring_push_reason push_reason;
enum hal_reo_dest_ring_error_code err_code;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
desc->info0);
ab->soc_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
ath11k_warn(ab, "expected error push reason code, received %d\n",
push_reason);
return -EINVAL;
}
if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
ath11k_warn(ab, "expected buffer type link_desc");
return -EINVAL;
}
ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
return 0;
}
int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
struct hal_rx_wbm_rel_info *rel_info)
{
struct hal_wbm_release_ring *wbm_desc = desc;
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
enum hal_rx_buf_return_buf_manager ret_buf_mgr;
type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
wbm_desc->info0);
/* We expect only WBM_REL buffer type */
if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
WARN_ON(1);
return -EINVAL;
}
rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
wbm_desc->info0);
if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
rel_src != HAL_WBM_REL_SRC_MODULE_REO)
return -EINVAL;
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
wbm_desc->buf_addr_info.info1);
rel_info->err_rel_src = rel_src;
if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
rel_info->push_reason =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
wbm_desc->info0);
rel_info->err_code =
FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
wbm_desc->info0);
} else {
rel_info->push_reason =
FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
wbm_desc->info0);
rel_info->err_code =
FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
wbm_desc->info0);
}
rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
wbm_desc->info2);
rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
wbm_desc->info2);
return 0;
}
void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
dma_addr_t *paddr, u32 *desc_bank)
{
struct ath11k_buffer_addr *buff_addr = desc;
*paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
*desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
}
void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
void *link_desc,
enum hal_wbm_rel_bm_act action)
{
struct hal_wbm_release_ring *dst_desc = desc;
struct hal_wbm_release_ring *src_desc = link_desc;
dst_desc->buf_addr_info = src_desc->buf_addr_info;
dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
HAL_WBM_REL_SRC_MODULE_SW) |
FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
}
void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_get_queue_stats_status *desc =
(struct hal_reo_get_queue_stats_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
status->uniform_hdr.cmd_num,
status->uniform_hdr.cmd_status);
ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
desc->info0),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
desc->info0));
ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
ath11k_dbg(ab, ATH11K_DBG_HAL,
"last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
ath11k_dbg(ab, ATH11K_DBG_HAL,
"rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
desc->info1),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
desc->info1));
ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
desc->info2),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
desc->info2));
ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
desc->info3),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
desc->info3));
ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
desc->num_mpdu_frames, desc->num_msdu_frames,
desc->total_bytes);
ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
desc->info4),
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
desc->info4));
ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
desc->info5));
}
int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_status_hdr *hdr;
hdr = (struct hal_reo_status_hdr *)tlv->value;
*status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
}
void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_queue_status *desc =
(struct hal_reo_flush_queue_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.flush_queue.err_detected =
FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
desc->info0);
}
void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_cache_status *desc =
(struct hal_reo_flush_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.flush_cache.err_detected =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.flush_cache.err_code =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
desc->info0);
if (!status->u.flush_cache.err_code)
hal->avail_blk_resource |= BIT(hal->current_blk_index);
status->u.flush_cache.cache_controller_flush_status_hit =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_desc_type =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_client_id =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_err =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
desc->info0);
status->u.flush_cache.cache_controller_flush_status_cnt =
FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
desc->info0);
}
void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
struct hal_reo_status *status)
{
struct ath11k_hal *hal = &ab->hal;
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_unblock_cache_status *desc =
(struct hal_reo_unblock_cache_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.unblock_cache.err_detected =
FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.unblock_cache.unblock_type =
FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
desc->info0);
if (!status->u.unblock_cache.err_detected &&
status->u.unblock_cache.unblock_type ==
HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
}
void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_flush_timeout_list_status *desc =
(struct hal_reo_flush_timeout_list_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.timeout_list.err_detected =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
desc->info0);
status->u.timeout_list.list_empty =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
desc->info0);
status->u.timeout_list.release_desc_cnt =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
desc->info1);
status->u.timeout_list.fwd_buf_cnt =
FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
desc->info1);
}
void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_desc_thresh_reached_status *desc =
(struct hal_reo_desc_thresh_reached_status *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->hdr.info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->hdr.info0);
status->u.desc_thresh_reached.threshold_idx =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
desc->info0);
status->u.desc_thresh_reached.link_desc_counter0 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
desc->info1);
status->u.desc_thresh_reached.link_desc_counter1 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
desc->info2);
status->u.desc_thresh_reached.link_desc_counter2 =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
desc->info3);
status->u.desc_thresh_reached.link_desc_counter_sum =
FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
desc->info4);
}
void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
u32 *reo_desc,
struct hal_reo_status *status)
{
struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
struct hal_reo_status_hdr *desc =
(struct hal_reo_status_hdr *)tlv->value;
status->uniform_hdr.cmd_num =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
desc->info0);
status->uniform_hdr.cmd_status =
FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
desc->info0);
}
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
u32 num_ext_desc;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
} else {
num_ext_desc = 3;
}
return sizeof(struct hal_rx_reo_queue) +
(num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
}
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue *qdesc = vaddr;
struct hal_rx_reo_queue_ext *ext_desc;
memset(qdesc, 0, sizeof(*qdesc));
ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
qdesc->info0 =
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
if (ba_window_size < 1)
ba_window_size = 1;
if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
ba_window_size++;
if (ba_window_size == 1)
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
ba_window_size - 1);
switch (type) {
case HAL_PN_TYPE_NONE:
case HAL_PN_TYPE_WAPI_EVEN:
case HAL_PN_TYPE_WAPI_UNEVEN:
break;
case HAL_PN_TYPE_WPA:
qdesc->info0 |=
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
HAL_RX_REO_QUEUE_PN_SIZE_48);
break;
}
/* TODO: Set Ignore ampdu flags based on BA window size and/or
* AMPDU capabilities
*/
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
if (start_seq <= 0xfff)
qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
start_seq);
if (tid == HAL_DESC_REO_NON_QOS_TID)
return;
ext_desc = qdesc->ext_desc;
/* TODO: HW queue descriptors are currently allocated for max BA
* window size for all QOS TIDs so that same descriptor can be used
* later when ADDBA request is received. This should be changed to
* allocate HW queue descriptors based on BA window size being
* negotiated (0 for non BA cases), and reallocate when BA window
* size changes and also send WMI message to FW to change the REO
* queue descriptor in Rx peer entry as part of dp_rx_tid_update.
*/
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
ext_desc++;
memset(ext_desc, 0, sizeof(*ext_desc));
ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
HAL_DESC_REO_QUEUE_EXT_DESC,
REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
}
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng)
{
struct hal_srng_params params;
struct hal_tlv_hdr *tlv;
struct hal_reo_get_queue_stats *desc;
int i, cmd_num = 1;
int entry_size;
u8 *entry;
memset(¶ms, 0, sizeof(params));
entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
ath11k_hal_srng_get_params(ab, srng, ¶ms);
entry = (u8 *)params.ring_base_vaddr;
for (i = 0; i < params.num_entries; i++) {
tlv = (struct hal_tlv_hdr *)entry;
desc = (struct hal_reo_get_queue_stats *)tlv->value;
desc->cmd.info0 =
FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
entry += entry_size;
}
}
#define HAL_MAX_UL_MU_USERS 37
static inline void
ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->info10);
}
static inline void
ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
rx_user_status->mpdu_ok_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->info8));
rx_user_status->mpdu_err_byte_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT,
__le32_to_cpu(ppdu_end_user->info9));
}
static inline void
ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *rx_user_status)
{
rx_user_status->ast_index = ppdu_info->ast_index;
rx_user_status->tid = ppdu_info->tid;
rx_user_status->tcp_msdu_count =
ppdu_info->tcp_msdu_count;
rx_user_status->udp_msdu_count =
ppdu_info->udp_msdu_count;
rx_user_status->other_msdu_count =
ppdu_info->other_msdu_count;
rx_user_status->frame_control = ppdu_info->frame_control;
rx_user_status->frame_control_info_valid =
ppdu_info->frame_control_info_valid;
rx_user_status->data_sequence_control_info_valid =
ppdu_info->data_sequence_control_info_valid;
rx_user_status->first_data_seq_ctrl =
ppdu_info->first_data_seq_ctrl;
rx_user_status->preamble_type = ppdu_info->preamble_type;
rx_user_status->ht_flags = ppdu_info->ht_flags;
rx_user_status->vht_flags = ppdu_info->vht_flags;
rx_user_status->he_flags = ppdu_info->he_flags;
rx_user_status->rs_flags = ppdu_info->rs_flags;
rx_user_status->mpdu_cnt_fcs_ok =
ppdu_info->num_mpdu_fcs_ok;
rx_user_status->mpdu_cnt_fcs_err =
ppdu_info->num_mpdu_fcs_err;
ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
}
static u16 ath11k_hal_rx_mpduinfo_get_peerid(struct ath11k_base *ab,
struct hal_rx_mpdu_info *mpdu_info)
{
return ab->hw_params.hw_ops->mpdu_info_get_peerid(mpdu_info);
}
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 tlv_tag, u8 *tlv_data, u32 userid)
{
u32 info0, info1, value;
u8 he_dcm = 0, he_stbc = 0;
u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
struct hal_rx_ppdu_start *ppdu_start =
(struct hal_rx_ppdu_start *)tlv_data;
ppdu_info->ppdu_id =
FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
__le32_to_cpu(ppdu_start->info0));
ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
break;
}
case HAL_RX_PPDU_END_USER_STATS: {
struct hal_rx_ppdu_end_user_stats *eu_stats =
(struct hal_rx_ppdu_end_user_stats *)tlv_data;
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
ppdu_info->ast_index =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
__le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP,
__le32_to_cpu(eu_stats->info7))) - 1;
ppdu_info->tcp_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
__le32_to_cpu(eu_stats->info4));
ppdu_info->udp_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
__le32_to_cpu(eu_stats->info4));
ppdu_info->other_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
__le32_to_cpu(eu_stats->info5));
ppdu_info->tcp_ack_msdu_count =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
__le32_to_cpu(eu_stats->info5));
ppdu_info->preamble_type =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
ppdu_info->num_mpdu_fcs_ok =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
info1);
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AC:
ppdu_info->vht_flags = 1;
break;
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
default:
break;
}
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats;
ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
rxuser_stats);
}
ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
__le32_to_cpu(eu_stats->rsvd1[0]);
ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
__le32_to_cpu(eu_stats->rsvd1[1]);
break;
}
case HAL_RX_PPDU_END_USER_STATS_EXT: {
struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
struct hal_rx_ht_sig_info *ht_sig =
(struct hal_rx_ht_sig_info *)tlv_data;
info0 = __le32_to_cpu(ht_sig->info0);
info1 = __le32_to_cpu(ht_sig->info1);
ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
info1);
ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
switch (ppdu_info->mcs) {
case 0 ... 7:
ppdu_info->nss = 1;
break;
case 8 ... 15:
ppdu_info->nss = 2;
break;
case 16 ... 23:
ppdu_info->nss = 3;
break;
case 24 ... 31:
ppdu_info->nss = 4;
break;
}
if (ppdu_info->nss > 1)
ppdu_info->mcs = ppdu_info->mcs % 8;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_L_SIG_B: {
struct hal_rx_lsig_b_info *lsigb =
(struct hal_rx_lsig_b_info *)tlv_data;
ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
__le32_to_cpu(lsigb->info0));
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_L_SIG_A: {
struct hal_rx_lsig_a_info *lsiga =
(struct hal_rx_lsig_a_info *)tlv_data;
ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
__le32_to_cpu(lsiga->info0));
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_VHT_SIG_A: {
struct hal_rx_vht_sig_a_info *vht_sig =
(struct hal_rx_vht_sig_a_info *)tlv_data;
u32 nsts;
u32 group_id;
u8 gi_setting;
info0 = __le32_to_cpu(vht_sig->info0);
info1 = __le32_to_cpu(vht_sig->info1);
ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
info1);
ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
info1);
gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
info1);
switch (gi_setting) {
case HAL_RX_VHT_SIG_A_NORMAL_GI:
ppdu_info->gi = HAL_RX_GI_0_8_US;
break;
case HAL_RX_VHT_SIG_A_SHORT_GI:
case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
ppdu_info->gi = HAL_RX_GI_0_4_US;
break;
}
ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
if (ppdu_info->is_stbc && nsts > 0)
nsts = ((nsts + 1) >> 1) - 1;
ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
info0);
ppdu_info->beamformed = info1 &
HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
info0);
if (group_id == 0 || group_id == 63)
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
ppdu_info->vht_flag_values5 = group_id;
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
ppdu_info->vht_flag_values4 =
FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
if (value == 0)
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
else
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 |=
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
ppdu_info->mcs = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
ppdu_info->dcm = he_dcm;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
ppdu_info->is_stbc = he_stbc;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
/* data4 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/* data5 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_1_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
if (he_dcm && he_stbc) {
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
} else {
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
}
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
ppdu_info->beamformed = value;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/* data6 */
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
value++;
ppdu_info->nss = value;
ppdu_info->he_data6 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
case HAL_PHYRX_HE_SIG_A_MU_DL: {
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
ppdu_info->he_mu_flags = 1;
ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
ppdu_info->he_data2 =
IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
/*data3*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
ppdu_info->he_data3 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
he_stbc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
/*data4*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
ppdu_info->he_data4 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
/*data5*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->bw = value;
ppdu_info->he_data5 =
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
switch (value) {
case 0:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_4_X;
break;
case 1:
he_gi = HE_GI_0_8;
he_ltf = HE_LTF_2_X;
break;
case 2:
he_gi = HE_GI_1_6;
he_ltf = HE_LTF_2_X;
break;
case 3:
he_gi = HE_GI_3_2;
he_ltf = HE_LTF_4_X;
break;
}
ppdu_info->gi = he_gi;
he_gi = (he_gi != 0) ? he_gi - 1 : 0;
ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
ppdu_info->ltf_size = he_ltf;
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
(he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
info1);
ppdu_info->he_data5 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
/*data6*/
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
info0);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
ppdu_info->he_data6 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
/* HE-MU Flags */
/* HE-MU-flags1 */
ppdu_info->he_flags1 =
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
ppdu_info->he_flags1 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
value);
/* HE-MU-flags2 */
ppdu_info->he_flags2 =
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
value = value - 1;
ppdu_info->he_flags2 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
case HAL_PHYRX_HE_SIG_B1_MU: {
struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
(struct hal_rx_he_sig_b1_mu_info *)tlv_data;
u16 ru_tones;
info0 = __le32_to_cpu(he_sig_b1_mu->info0);
ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
case HAL_PHYRX_HE_SIG_B2_MU: {
struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
(struct hal_rx_he_sig_b2_mu_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
(struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
ppdu_info->he_data1 |=
IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
/* HE-data2 */
ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
he_dcm = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
ppdu_info->ldpc = value;
ppdu_info->he_data3 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
/* HE-data4 */
value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
ppdu_info->he_data4 |=
FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
case HAL_PHYRX_RSSI_LEGACY: {
int i;
bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ab->wmi_ab.svc_map);
struct hal_rx_phyrx_rssi_legacy_info *rssi =
(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB,
__le32_to_cpu(rssi->info0));
if (db2dbm) {
for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) {
ppdu_info->rssi_chain_pri20[i] =
le32_get_bits(rssi->preamble[i].rssi_2040,
HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20);
}
}
break;
}
case HAL_RX_MPDU_START: {
struct hal_rx_mpdu_info *mpdu_info =
(struct hal_rx_mpdu_info *)tlv_data;
u16 peer_id;
peer_id = ath11k_hal_rx_mpduinfo_get_peerid(ab, mpdu_info);
if (peer_id)
ppdu_info->peer_id = peer_id;
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
(struct hal_rx_ppdu_end_duration *)tlv_data;
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
ppdu_info->tsft = (ppdu_info->tsft << 32) |
__le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
return HAL_RX_MON_STATUS_BUF_DONE;
case HAL_RX_PPDU_END_STATUS_DONE:
case 0:
return HAL_RX_MON_STATUS_PPDU_DONE;
default:
break;
}
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb)
{
struct hal_tlv_hdr *tlv;
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
* TLVs that follow. Skip the TLV header and
* rx_rxpcu_classification_overview that follows the header to get to
* next TLV.
*/
if (tlv_tag == HAL_RX_PPDU_END)
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
} while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
return hal_status;
}
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie, void **pp_buf_addr,
u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring = rx_desc;
struct ath11k_buffer_addr *buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details =
(struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
*msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
rx_mpdu_desc_info_details->info0);
buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
*paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
buf_addr_info->info0);
*sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
buf_addr_info->info1);
*pp_buf_addr = (void *)buf_addr_info;
}
void
ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
struct hal_sw_mon_ring_entries *sw_mon_entries)
{
struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
struct ath11k_buffer_addr *buf_addr_info;
struct ath11k_buffer_addr *status_buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
rx_mpdu_desc_info_details->info0);
buf_addr_info = &sw_mon_ring->buf_addr_info;
status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
buf_addr_info->info0);
sw_mon_entries->mon_status_paddr =
(((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
status_buf_addr_info->info1)) << 32) |
FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
status_buf_addr_info->info0);
sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
status_buf_addr_info->info1);
sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
sw_mon_ring->info0);
sw_mon_entries->dst_buf_addr_info = buf_addr_info;
sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
sw_mon_entries->ppdu_id =
FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
}
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018, 2020-2023 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#ifndef __iwl_eeprom_parse_h__
#define __iwl_eeprom_parse_h__
#include <linux/types.h>
#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include "iwl-trans.h"
struct iwl_nvm_data {
int n_hw_addrs;
u8 hw_addr[ETH_ALEN];
u8 calib_version;
__le16 calib_voltage;
__le16 raw_temperature;
__le16 kelvin_temperature;
__le16 kelvin_voltage;
__le16 xtal_calib[2];
bool sku_cap_band_24ghz_enable;
bool sku_cap_band_52ghz_enable;
bool sku_cap_11n_enable;
bool sku_cap_11ac_enable;
bool sku_cap_11ax_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
bool sku_cap_11be_enable;
u16 radio_cfg_type;
u8 radio_cfg_step;
u8 radio_cfg_dash;
u8 radio_cfg_pnum;
u8 valid_tx_ant, valid_rx_ant;
u32 nvm_version;
s8 max_tx_pwr_half_dbm;
bool lar_enabled;
bool vht160_supported;
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
/*
* iftype data for low (2.4 GHz) high (5 GHz) and uhb (6 GHz) bands
*/
struct {
struct ieee80211_sband_iftype_data low[2];
struct ieee80211_sband_iftype_data high[2];
struct ieee80211_sband_iftype_data uhb[2];
} iftd;
struct ieee80211_channel channels[];
};
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band);
void iwl_init_ht_hw_capab(struct iwl_trans *trans,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
enum nl80211_band band,
u8 tx_chains, u8 rx_chains);
#endif /* __iwl_eeprom_parse_h__ */
|
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
#define __MEMAC_H
#include "fman_mac.h"
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
struct mac_device;
int memac_initialization(struct mac_device *mac_dev,
struct device_node *mac_node,
struct fman_mac_params *params);
#endif /* __MEMAC_H */
|
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef CIK_STRUCTS_H_
#define CIK_STRUCTS_H_
struct cik_mqd {
uint32_t header;
uint32_t compute_dispatch_initiator;
uint32_t compute_dim_x;
uint32_t compute_dim_y;
uint32_t compute_dim_z;
uint32_t compute_start_x;
uint32_t compute_start_y;
uint32_t compute_start_z;
uint32_t compute_num_thread_x;
uint32_t compute_num_thread_y;
uint32_t compute_num_thread_z;
uint32_t compute_pipelinestat_enable;
uint32_t compute_perfcount_enable;
uint32_t compute_pgm_lo;
uint32_t compute_pgm_hi;
uint32_t compute_tba_lo;
uint32_t compute_tba_hi;
uint32_t compute_tma_lo;
uint32_t compute_tma_hi;
uint32_t compute_pgm_rsrc1;
uint32_t compute_pgm_rsrc2;
uint32_t compute_vmid;
uint32_t compute_resource_limits;
uint32_t compute_static_thread_mgmt_se0;
uint32_t compute_static_thread_mgmt_se1;
uint32_t compute_tmpring_size;
uint32_t compute_static_thread_mgmt_se2;
uint32_t compute_static_thread_mgmt_se3;
uint32_t compute_restart_x;
uint32_t compute_restart_y;
uint32_t compute_restart_z;
uint32_t compute_thread_trace_enable;
uint32_t compute_misc_reserved;
uint32_t compute_user_data_0;
uint32_t compute_user_data_1;
uint32_t compute_user_data_2;
uint32_t compute_user_data_3;
uint32_t compute_user_data_4;
uint32_t compute_user_data_5;
uint32_t compute_user_data_6;
uint32_t compute_user_data_7;
uint32_t compute_user_data_8;
uint32_t compute_user_data_9;
uint32_t compute_user_data_10;
uint32_t compute_user_data_11;
uint32_t compute_user_data_12;
uint32_t compute_user_data_13;
uint32_t compute_user_data_14;
uint32_t compute_user_data_15;
uint32_t cp_compute_csinvoc_count_lo;
uint32_t cp_compute_csinvoc_count_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
uint32_t cp_hqd_vmid;
uint32_t cp_hqd_persistent_state;
uint32_t cp_hqd_pipe_priority;
uint32_t cp_hqd_queue_priority;
uint32_t cp_hqd_quantum;
uint32_t cp_hqd_pq_base_lo;
uint32_t cp_hqd_pq_base_hi;
uint32_t cp_hqd_pq_rptr;
uint32_t cp_hqd_pq_rptr_report_addr_lo;
uint32_t cp_hqd_pq_rptr_report_addr_hi;
uint32_t cp_hqd_pq_wptr_poll_addr_lo;
uint32_t cp_hqd_pq_wptr_poll_addr_hi;
uint32_t cp_hqd_pq_doorbell_control;
uint32_t cp_hqd_pq_wptr;
uint32_t cp_hqd_pq_control;
uint32_t cp_hqd_ib_base_addr_lo;
uint32_t cp_hqd_ib_base_addr_hi;
uint32_t cp_hqd_ib_rptr;
uint32_t cp_hqd_ib_control;
uint32_t cp_hqd_iq_timer;
uint32_t cp_hqd_iq_rptr;
uint32_t cp_hqd_dequeue_request;
uint32_t cp_hqd_dma_offload;
uint32_t cp_hqd_sema_cmd;
uint32_t cp_hqd_msg_type;
uint32_t cp_hqd_atomic0_preop_lo;
uint32_t cp_hqd_atomic0_preop_hi;
uint32_t cp_hqd_atomic1_preop_lo;
uint32_t cp_hqd_atomic1_preop_hi;
uint32_t cp_hqd_hq_status0;
uint32_t cp_hqd_hq_control0;
uint32_t cp_mqd_control;
uint32_t cp_mqd_query_time_lo;
uint32_t cp_mqd_query_time_hi;
uint32_t cp_mqd_connect_start_time_lo;
uint32_t cp_mqd_connect_start_time_hi;
uint32_t cp_mqd_connect_end_time_lo;
uint32_t cp_mqd_connect_end_time_hi;
uint32_t cp_mqd_connect_end_wf_count;
uint32_t cp_mqd_connect_end_pq_rptr;
uint32_t cp_mqd_connect_end_pq_wptr;
uint32_t cp_mqd_connect_end_ib_rptr;
uint32_t reserved_96;
uint32_t reserved_97;
uint32_t reserved_98;
uint32_t reserved_99;
uint32_t iqtimer_pkt_header;
uint32_t iqtimer_pkt_dw0;
uint32_t iqtimer_pkt_dw1;
uint32_t iqtimer_pkt_dw2;
uint32_t iqtimer_pkt_dw3;
uint32_t iqtimer_pkt_dw4;
uint32_t iqtimer_pkt_dw5;
uint32_t iqtimer_pkt_dw6;
uint32_t reserved_108;
uint32_t reserved_109;
uint32_t reserved_110;
uint32_t reserved_111;
uint32_t queue_doorbell_id0;
uint32_t queue_doorbell_id1;
uint32_t queue_doorbell_id2;
uint32_t queue_doorbell_id3;
uint32_t queue_doorbell_id4;
uint32_t queue_doorbell_id5;
uint32_t queue_doorbell_id6;
uint32_t queue_doorbell_id7;
uint32_t queue_doorbell_id8;
uint32_t queue_doorbell_id9;
uint32_t queue_doorbell_id10;
uint32_t queue_doorbell_id11;
uint32_t queue_doorbell_id12;
uint32_t queue_doorbell_id13;
uint32_t queue_doorbell_id14;
uint32_t queue_doorbell_id15;
};
struct cik_sdma_rlc_registers {
uint32_t sdma_rlc_rb_cntl;
uint32_t sdma_rlc_rb_base;
uint32_t sdma_rlc_rb_base_hi;
uint32_t sdma_rlc_rb_rptr;
uint32_t sdma_rlc_rb_wptr;
uint32_t sdma_rlc_rb_wptr_poll_cntl;
uint32_t sdma_rlc_rb_wptr_poll_addr_hi;
uint32_t sdma_rlc_rb_wptr_poll_addr_lo;
uint32_t sdma_rlc_rb_rptr_addr_hi;
uint32_t sdma_rlc_rb_rptr_addr_lo;
uint32_t sdma_rlc_ib_cntl;
uint32_t sdma_rlc_ib_rptr;
uint32_t sdma_rlc_ib_offset;
uint32_t sdma_rlc_ib_base_lo;
uint32_t sdma_rlc_ib_base_hi;
uint32_t sdma_rlc_ib_size;
uint32_t sdma_rlc_skip_cntl;
uint32_t sdma_rlc_context_status;
uint32_t sdma_rlc_doorbell;
uint32_t sdma_rlc_virtual_addr;
uint32_t sdma_rlc_ape1_cntl;
uint32_t sdma_rlc_doorbell_log;
uint32_t reserved_22;
uint32_t reserved_23;
uint32_t reserved_24;
uint32_t reserved_25;
uint32_t reserved_26;
uint32_t reserved_27;
uint32_t reserved_28;
uint32_t reserved_29;
uint32_t reserved_30;
uint32_t reserved_31;
uint32_t reserved_32;
uint32_t reserved_33;
uint32_t reserved_34;
uint32_t reserved_35;
uint32_t reserved_36;
uint32_t reserved_37;
uint32_t reserved_38;
uint32_t reserved_39;
uint32_t reserved_40;
uint32_t reserved_41;
uint32_t reserved_42;
uint32_t reserved_43;
uint32_t reserved_44;
uint32_t reserved_45;
uint32_t reserved_46;
uint32_t reserved_47;
uint32_t reserved_48;
uint32_t reserved_49;
uint32_t reserved_50;
uint32_t reserved_51;
uint32_t reserved_52;
uint32_t reserved_53;
uint32_t reserved_54;
uint32_t reserved_55;
uint32_t reserved_56;
uint32_t reserved_57;
uint32_t reserved_58;
uint32_t reserved_59;
uint32_t reserved_60;
uint32_t reserved_61;
uint32_t reserved_62;
uint32_t reserved_63;
uint32_t reserved_64;
uint32_t reserved_65;
uint32_t reserved_66;
uint32_t reserved_67;
uint32_t reserved_68;
uint32_t reserved_69;
uint32_t reserved_70;
uint32_t reserved_71;
uint32_t reserved_72;
uint32_t reserved_73;
uint32_t reserved_74;
uint32_t reserved_75;
uint32_t reserved_76;
uint32_t reserved_77;
uint32_t reserved_78;
uint32_t reserved_79;
uint32_t reserved_80;
uint32_t reserved_81;
uint32_t reserved_82;
uint32_t reserved_83;
uint32_t reserved_84;
uint32_t reserved_85;
uint32_t reserved_86;
uint32_t reserved_87;
uint32_t reserved_88;
uint32_t reserved_89;
uint32_t reserved_90;
uint32_t reserved_91;
uint32_t reserved_92;
uint32_t reserved_93;
uint32_t reserved_94;
uint32_t reserved_95;
uint32_t reserved_96;
uint32_t reserved_97;
uint32_t reserved_98;
uint32_t reserved_99;
uint32_t reserved_100;
uint32_t reserved_101;
uint32_t reserved_102;
uint32_t reserved_103;
uint32_t reserved_104;
uint32_t reserved_105;
uint32_t reserved_106;
uint32_t reserved_107;
uint32_t reserved_108;
uint32_t reserved_109;
uint32_t reserved_110;
uint32_t reserved_111;
uint32_t reserved_112;
uint32_t reserved_113;
uint32_t reserved_114;
uint32_t reserved_115;
uint32_t reserved_116;
uint32_t reserved_117;
uint32_t reserved_118;
uint32_t reserved_119;
uint32_t reserved_120;
uint32_t reserved_121;
uint32_t reserved_122;
uint32_t reserved_123;
uint32_t reserved_124;
uint32_t reserved_125;
/* reserved_126,127: repurposed for driver-internal use */
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
};
#endif /* CIK_STRUCTS_H_ */
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP8755 High Performance Power Management Unit Driver:System Interface Driver
*
* Copyright (C) 2012 Texas Instruments
*
* Author: Daniel(Geon Si) Jeong <[email protected]>
* G.Shark Jeong <[email protected]>
*/
#ifndef _LP8755_H
#define _LP8755_H
#include <linux/regulator/consumer.h>
#define LP8755_NAME "lp8755-regulator"
/*
*PWR FAULT : power fault detected
*OCP : over current protect activated
*OVP : over voltage protect activated
*TEMP_WARN : thermal warning
*TEMP_SHDN : thermal shutdonw detected
*I_LOAD : current measured
*/
#define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL
#define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT
#define LP8755_EVENT_OVP 0x10000
#define LP8755_EVENT_TEMP_WARN 0x2000
#define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP
#define LP8755_EVENT_I_LOAD 0x40000
enum lp8755_bucks {
LP8755_BUCK0 = 0,
LP8755_BUCK1,
LP8755_BUCK2,
LP8755_BUCK3,
LP8755_BUCK4,
LP8755_BUCK5,
LP8755_BUCK_MAX,
};
/**
* multiphase configuration options
*/
enum lp8755_mphase_config {
MPHASE_CONF0,
MPHASE_CONF1,
MPHASE_CONF2,
MPHASE_CONF3,
MPHASE_CONF4,
MPHASE_CONF5,
MPHASE_CONF6,
MPHASE_CONF7,
MPHASE_CONF8,
MPHASE_CONF_MAX
};
/**
* struct lp8755_platform_data
* @mphase_type : Multiphase Switcher Configurations.
* @buck_data : buck0~6 init voltage in uV
*/
struct lp8755_platform_data {
int mphase;
struct regulator_init_data *buck_data[LP8755_BUCK_MAX];
};
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
//
// sma1303.c -- sma1303 ALSA SoC Audio driver
//
// Copyright 2023 Iron Device Corporation
//
// Auther: Gyuhwa Park <[email protected]>
// Kiseok Jo <[email protected]>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <linux/slab.h>
#include <asm/div64.h>
#include "sma1303.h"
#define CHECK_PERIOD_TIME 1 /* sec per HZ */
#define MAX_CONTROL_NAME 48
#define PLL_MATCH(_input_clk_name, _output_clk_name, _input_clk,\
_post_n, _n, _vco, _p_cp)\
{\
.input_clk_name = _input_clk_name,\
.output_clk_name = _output_clk_name,\
.input_clk = _input_clk,\
.post_n = _post_n,\
.n = _n,\
.vco = _vco,\
.p_cp = _p_cp,\
}
enum sma1303_type {
SMA1303,
};
struct sma1303_pll_match {
char *input_clk_name;
char *output_clk_name;
unsigned int input_clk;
unsigned int post_n;
unsigned int n;
unsigned int vco;
unsigned int p_cp;
};
struct sma1303_priv {
enum sma1303_type devtype;
struct attribute_group *attr_grp;
struct delayed_work check_fault_work;
struct device *dev;
struct kobject *kobj;
struct regmap *regmap;
struct sma1303_pll_match *pll_matches;
bool amp_power_status;
bool force_mute_status;
int num_of_pll_matches;
int retry_cnt;
unsigned int amp_mode;
unsigned int cur_vol;
unsigned int format;
unsigned int frame_size;
unsigned int init_vol;
unsigned int last_bclk;
unsigned int last_ocp_val;
unsigned int last_over_temp;
unsigned int rev_num;
unsigned int sys_clk_id;
unsigned int tdm_slot_rx;
unsigned int tdm_slot_tx;
unsigned int tsdw_cnt;
long check_fault_period;
long check_fault_status;
};
static struct sma1303_pll_match sma1303_pll_matches[] = {
PLL_MATCH("1.411MHz", "24.595MHz", 1411200, 0x07, 0xF4, 0x8B, 0x03),
PLL_MATCH("1.536MHz", "24.576MHz", 1536000, 0x07, 0xE0, 0x8B, 0x03),
PLL_MATCH("3.072MHz", "24.576MHz", 3072000, 0x07, 0x70, 0x8B, 0x03),
PLL_MATCH("6.144MHz", "24.576MHz", 6144000, 0x07, 0x70, 0x8B, 0x07),
PLL_MATCH("12.288MHz", "24.576MHz", 12288000, 0x07, 0x70, 0x8B, 0x0B),
PLL_MATCH("19.2MHz", "24.343MHz", 19200000, 0x07, 0x47, 0x8B, 0x0A),
PLL_MATCH("24.576MHz", "24.576MHz", 24576000, 0x07, 0x70, 0x8B, 0x0F),
};
static int sma1303_startup(struct snd_soc_component *);
static int sma1303_shutdown(struct snd_soc_component *);
static const struct reg_default sma1303_reg_def[] = {
{ 0x00, 0x80 },
{ 0x01, 0x00 },
{ 0x02, 0x00 },
{ 0x03, 0x11 },
{ 0x04, 0x17 },
{ 0x09, 0x00 },
{ 0x0A, 0x31 },
{ 0x0B, 0x98 },
{ 0x0C, 0x84 },
{ 0x0D, 0x07 },
{ 0x0E, 0x3F },
{ 0x10, 0x00 },
{ 0x11, 0x00 },
{ 0x12, 0x00 },
{ 0x14, 0x5C },
{ 0x15, 0x01 },
{ 0x16, 0x0F },
{ 0x17, 0x0F },
{ 0x18, 0x0F },
{ 0x19, 0x00 },
{ 0x1A, 0x00 },
{ 0x1B, 0x00 },
{ 0x23, 0x19 },
{ 0x24, 0x00 },
{ 0x25, 0x00 },
{ 0x26, 0x04 },
{ 0x33, 0x00 },
{ 0x36, 0x92 },
{ 0x37, 0x27 },
{ 0x3B, 0x5A },
{ 0x3C, 0x20 },
{ 0x3D, 0x00 },
{ 0x3E, 0x03 },
{ 0x3F, 0x0C },
{ 0x8B, 0x07 },
{ 0x8C, 0x70 },
{ 0x8D, 0x8B },
{ 0x8E, 0x6F },
{ 0x8F, 0x03 },
{ 0x90, 0x26 },
{ 0x91, 0x42 },
{ 0x92, 0xE0 },
{ 0x94, 0x35 },
{ 0x95, 0x0C },
{ 0x96, 0x42 },
{ 0x97, 0x95 },
{ 0xA0, 0x00 },
{ 0xA1, 0x3B },
{ 0xA2, 0xC8 },
{ 0xA3, 0x28 },
{ 0xA4, 0x40 },
{ 0xA5, 0x01 },
{ 0xA6, 0x41 },
{ 0xA7, 0x00 },
};
static bool sma1303_readable_register(struct device *dev, unsigned int reg)
{
bool result;
if (reg > SMA1303_FF_DEVICE_INDEX)
return false;
switch (reg) {
case SMA1303_00_SYSTEM_CTRL ... SMA1303_04_INPUT1_CTRL4:
case SMA1303_09_OUTPUT_CTRL ... SMA1303_0E_MUTE_VOL_CTRL:
case SMA1303_10_SYSTEM_CTRL1 ... SMA1303_12_SYSTEM_CTRL3:
case SMA1303_14_MODULATOR ... SMA1303_1B_BASS_SPK7:
case SMA1303_23_COMP_LIM1 ... SMA1303_26_COMP_LIM4:
case SMA1303_33_SDM_CTRL ... SMA1303_34_OTP_DATA1:
case SMA1303_36_PROTECTION ... SMA1303_38_OTP_TRM0:
case SMA1303_3B_TEST1 ... SMA1303_3F_ATEST2:
case SMA1303_8B_PLL_POST_N ... SMA1303_92_FDPEC_CTRL:
case SMA1303_94_BOOST_CTRL1 ... SMA1303_97_BOOST_CTRL4:
case SMA1303_A0_PAD_CTRL0 ... SMA1303_A7_CLK_MON:
case SMA1303_FA_STATUS1 ... SMA1303_FB_STATUS2:
result = true;
break;
case SMA1303_FF_DEVICE_INDEX:
result = true;
break;
default:
result = false;
break;
}
return result;
}
static bool sma1303_writeable_register(struct device *dev, unsigned int reg)
{
bool result;
if (reg > SMA1303_FF_DEVICE_INDEX)
return false;
switch (reg) {
case SMA1303_00_SYSTEM_CTRL ... SMA1303_04_INPUT1_CTRL4:
case SMA1303_09_OUTPUT_CTRL ... SMA1303_0E_MUTE_VOL_CTRL:
case SMA1303_10_SYSTEM_CTRL1 ... SMA1303_12_SYSTEM_CTRL3:
case SMA1303_14_MODULATOR ... SMA1303_1B_BASS_SPK7:
case SMA1303_23_COMP_LIM1 ... SMA1303_26_COMP_LIM4:
case SMA1303_33_SDM_CTRL:
case SMA1303_36_PROTECTION ... SMA1303_37_SLOPE_CTRL:
case SMA1303_3B_TEST1 ... SMA1303_3F_ATEST2:
case SMA1303_8B_PLL_POST_N ... SMA1303_92_FDPEC_CTRL:
case SMA1303_94_BOOST_CTRL1 ... SMA1303_97_BOOST_CTRL4:
case SMA1303_A0_PAD_CTRL0 ... SMA1303_A7_CLK_MON:
result = true;
break;
default:
result = false;
break;
}
return result;
}
static bool sma1303_volatile_register(struct device *dev, unsigned int reg)
{
bool result;
switch (reg) {
case SMA1303_FA_STATUS1 ... SMA1303_FB_STATUS2:
result = true;
break;
case SMA1303_FF_DEVICE_INDEX:
result = true;
break;
default:
result = false;
break;
}
return result;
}
static const DECLARE_TLV_DB_SCALE(sma1303_spk_tlv, -6000, 50, 0);
static int sma1303_regmap_write(struct sma1303_priv *sma1303,
unsigned int reg, unsigned int val)
{
int ret = 0;
int cnt = sma1303->retry_cnt;
while (cnt--) {
ret = regmap_write(sma1303->regmap, reg, val);
if (ret < 0) {
dev_err(sma1303->dev,
"Failed to write [0x%02X]\n", reg);
} else
break;
}
return ret;
}
static int sma1303_regmap_update_bits(struct sma1303_priv *sma1303,
unsigned int reg, unsigned int mask, unsigned int val, bool *change)
{
int ret = 0;
int cnt = sma1303->retry_cnt;
while (cnt--) {
ret = regmap_update_bits_check(sma1303->regmap, reg,
mask, val, change);
if (ret < 0) {
dev_err(sma1303->dev,
"Failed to update [0x%02X]\n", reg);
} else
break;
}
return ret;
}
static int sma1303_regmap_read(struct sma1303_priv *sma1303,
unsigned int reg, unsigned int *val)
{
int ret = 0;
int cnt = sma1303->retry_cnt;
while (cnt--) {
ret = regmap_read(sma1303->regmap, reg, val);
if (ret < 0) {
dev_err(sma1303->dev,
"Failed to read [0x%02X]\n", reg);
} else
break;
}
return ret;
}
static const char * const sma1303_aif_in_source_text[] = {
"Mono", "Left", "Right"};
static const char * const sma1303_aif_out_source_text[] = {
"Disable", "After_FmtC", "After_Mixer", "After_DSP", "After_Post",
"Clk_PLL", "Clk_OSC"};
static const char * const sma1303_tdm_slot_text[] = {
"Slot0", "Slot1", "Slot2", "Slot3",
"Slot4", "Slot5", "Slot6", "Slot7"};
static const struct soc_enum sma1303_aif_in_source_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1303_aif_in_source_text),
sma1303_aif_in_source_text);
static const struct soc_enum sma1303_aif_out_source_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1303_aif_out_source_text),
sma1303_aif_out_source_text);
static const struct soc_enum sma1303_tdm_slot_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1303_tdm_slot_text),
sma1303_tdm_slot_text);
static int sma1303_force_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = (int)sma1303->force_mute_status;
dev_dbg(sma1303->dev, "%s : Force Mute %s\n", __func__,
sma1303->force_mute_status ? "ON" : "OFF");
return 0;
}
static int sma1303_force_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
bool change = false, val = (bool)ucontrol->value.integer.value[0];
if (sma1303->force_mute_status == val)
change = false;
else {
change = true;
sma1303->force_mute_status = val;
}
dev_dbg(sma1303->dev, "%s : Force Mute %s\n", __func__,
sma1303->force_mute_status ? "ON" : "OFF");
return change;
}
static int sma1303_postscaler_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int val, ret;
ret = sma1303_regmap_read(sma1303, SMA1303_90_POSTSCALER, &val);
if (ret < 0)
return -EINVAL;
ucontrol->value.integer.value[0] = (val & 0x7E) >> 1;
return 0;
}
static int sma1303_postscaler_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret, val = (int)ucontrol->value.integer.value[0];
bool change;
ret = sma1303_regmap_update_bits(sma1303,
SMA1303_90_POSTSCALER, 0x7E, (val << 1), &change);
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_tdm_slot_rx_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int val, ret;
ret = sma1303_regmap_read(sma1303, SMA1303_A5_TDM1, &val);
if (ret < 0)
return -EINVAL;
ucontrol->value.integer.value[0] = (val & 0x38) >> 3;
sma1303->tdm_slot_rx = ucontrol->value.integer.value[0];
return 0;
}
static int sma1303_tdm_slot_rx_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret, val = (int)ucontrol->value.integer.value[0];
bool change;
ret = sma1303_regmap_update_bits(sma1303,
SMA1303_A5_TDM1, 0x38, (val << 3), &change);
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_tdm_slot_tx_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int val, ret;
ret = sma1303_regmap_read(sma1303, SMA1303_A6_TDM2, &val);
if (ret < 0)
return -EINVAL;
ucontrol->value.integer.value[0] = (val & 0x38) >> 3;
sma1303->tdm_slot_tx = ucontrol->value.integer.value[0];
return 0;
}
static int sma1303_tdm_slot_tx_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_kcontrol_component(kcontrol);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret, val = (int)ucontrol->value.integer.value[0];
bool change;
ret = sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2, 0x38, (val << 3), &change);
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_startup(struct snd_soc_component *component)
{
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
bool change = false, temp = false;
sma1303_regmap_update_bits(sma1303, SMA1303_8E_PLL_CTRL,
SMA1303_PLL_PD2_MASK, SMA1303_PLL_OPERATION2, &temp);
if (temp == true)
change = true;
sma1303_regmap_update_bits(sma1303, SMA1303_00_SYSTEM_CTRL,
SMA1303_POWER_MASK, SMA1303_POWER_ON, &temp);
if (temp == true)
change = true;
if (sma1303->amp_mode == SMA1303_MONO) {
sma1303_regmap_update_bits(sma1303,
SMA1303_10_SYSTEM_CTRL1,
SMA1303_SPK_MODE_MASK,
SMA1303_SPK_MONO,
&temp);
if (temp == true)
change = true;
} else {
sma1303_regmap_update_bits(sma1303,
SMA1303_10_SYSTEM_CTRL1,
SMA1303_SPK_MODE_MASK,
SMA1303_SPK_STEREO,
&temp);
if (temp == true)
change = true;
}
if (sma1303->check_fault_status) {
if (sma1303->check_fault_period > 0)
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
sma1303->check_fault_period * HZ);
else
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
CHECK_PERIOD_TIME * HZ);
}
sma1303->amp_power_status = true;
return change;
}
static int sma1303_shutdown(struct snd_soc_component *component)
{
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
bool change = false, temp = false;
cancel_delayed_work_sync(&sma1303->check_fault_work);
sma1303_regmap_update_bits(sma1303, SMA1303_10_SYSTEM_CTRL1,
SMA1303_SPK_MODE_MASK, SMA1303_SPK_OFF, &temp);
if (temp == true)
change = true;
sma1303_regmap_update_bits(sma1303, SMA1303_00_SYSTEM_CTRL,
SMA1303_POWER_MASK, SMA1303_POWER_OFF, &temp);
if (temp == true)
change = true;
sma1303_regmap_update_bits(sma1303, SMA1303_8E_PLL_CTRL,
SMA1303_PLL_PD2_MASK, SMA1303_PLL_PD2, &temp);
if (temp == true)
change = true;
sma1303->amp_power_status = false;
return change;
}
static int sma1303_aif_in_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
int ret = 0;
bool change = false, temp = false;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
switch (mux) {
case 0:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_11_SYSTEM_CTRL2,
SMA1303_MONOMIX_MASK,
SMA1303_MONOMIX_ON,
&change);
sma1303->amp_mode = SMA1303_MONO;
break;
case 1:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_11_SYSTEM_CTRL2,
SMA1303_MONOMIX_MASK,
SMA1303_MONOMIX_OFF,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_11_SYSTEM_CTRL2,
SMA1303_LR_DATA_SW_MASK,
SMA1303_LR_DATA_SW_NORMAL,
&temp);
if (temp == true)
change = true;
sma1303->amp_mode = SMA1303_STEREO;
break;
case 2:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_11_SYSTEM_CTRL2,
SMA1303_MONOMIX_MASK,
SMA1303_MONOMIX_OFF,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_11_SYSTEM_CTRL2,
SMA1303_LR_DATA_SW_MASK,
SMA1303_LR_DATA_SW_SWAP,
&temp);
if (temp == true)
change = true;
sma1303->amp_mode = SMA1303_STEREO;
break;
default:
dev_err(sma1303->dev, "%s : Invalid value (%d)\n",
__func__, mux);
return -EINVAL;
}
dev_dbg(sma1303->dev, "%s : Source : %s\n", __func__,
sma1303_aif_in_source_text[mux]);
break;
}
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_aif_out_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
unsigned int mux = dapm_kcontrol_get_value(w->kcontrols[0]);
int ret = 0;
bool change = false, temp = false;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
switch (mux) {
case 0:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_NORMAL_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_OUT_SEL_MASK,
SMA1303_OUT_SEL_DISABLE,
&temp);
if (temp == true)
change = true;
break;
case 1:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_NORMAL_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_OUT_SEL_MASK,
SMA1303_FORMAT_CONVERTER,
&temp);
if (temp == true)
change = true;
break;
case 2:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_NORMAL_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_OUT_SEL_MASK,
SMA1303_MIXER_OUTPUT,
&temp);
if (temp == true)
change = true;
break;
case 3:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_NORMAL_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_OUT_SEL_MASK,
SMA1303_SPEAKER_PATH,
&temp);
if (temp == true)
change = true;
break;
case 4:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_NORMAL_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_OUT_SEL_MASK,
SMA1303_POSTSCALER_OUTPUT,
&temp);
if (temp == true)
change = true;
break;
case 5:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_CLK_OUT_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_MON_OSC_PLL_MASK,
SMA1303_PLL_SDO,
&temp);
if (temp == true)
change = true;
break;
case 6:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_TEST_CLKO_EN_MASK,
SMA1303_CLK_OUT_SDO,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_MON_OSC_PLL_MASK,
SMA1303_OSC_SDO,
&temp);
if (temp == true)
change = true;
break;
default:
dev_err(sma1303->dev, "%s : Invalid value (%d)\n",
__func__, mux);
return -EINVAL;
}
dev_dbg(sma1303->dev, "%s : Source : %s\n", __func__,
sma1303_aif_out_source_text[mux]);
break;
}
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_sdo_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
bool change = false, temp = false;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_PRE_PMU\n", __func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_CONFIG_MASK,
SMA1303_OUTPUT_PORT_ENABLE,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_SDO_OUTPUT_MASK,
SMA1303_NORMAL_OUT,
&temp);
if (temp == true)
change = true;
break;
case SND_SOC_DAPM_POST_PMD:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_POST_PMD\n", __func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_09_OUTPUT_CTRL,
SMA1303_PORT_CONFIG_MASK,
SMA1303_INPUT_PORT_ONLY,
&temp);
if (temp == true)
change = true;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A3_TOP_MAN2,
SMA1303_SDO_OUTPUT_MASK,
SMA1303_HIGH_Z_OUT,
&temp);
if (temp == true)
change = true;
break;
}
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_post_scaler_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
bool change = false;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_PRE_PMU\n", __func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_90_POSTSCALER,
SMA1303_BYP_POST_MASK,
SMA1303_EN_POST_SCALER,
&change);
break;
case SND_SOC_DAPM_POST_PMD:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_POST_PMD\n", __func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_90_POSTSCALER,
SMA1303_BYP_POST_MASK,
SMA1303_BYP_POST_SCALER,
&change);
break;
}
if (ret < 0)
return -EINVAL;
return change;
}
static int sma1303_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_POST_PMU\n", __func__);
ret = sma1303_startup(component);
break;
case SND_SOC_DAPM_PRE_PMD:
dev_dbg(sma1303->dev,
"%s : SND_SOC_DAPM_PRE_PMD\n", __func__);
ret = sma1303_shutdown(component);
break;
}
return ret;
}
static const struct snd_kcontrol_new sma1303_aif_in_source_control =
SOC_DAPM_ENUM("AIF IN Source", sma1303_aif_in_source_enum);
static const struct snd_kcontrol_new sma1303_aif_out_source_control =
SOC_DAPM_ENUM("AIF OUT Source", sma1303_aif_out_source_enum);
static const struct snd_kcontrol_new sma1303_sdo_control =
SOC_DAPM_SINGLE_VIRT("Switch", 1);
static const struct snd_kcontrol_new sma1303_post_scaler_control =
SOC_DAPM_SINGLE_VIRT("Switch", 1);
static const struct snd_kcontrol_new sma1303_enable_control =
SOC_DAPM_SINGLE_VIRT("Switch", 1);
static const struct snd_kcontrol_new sma1303_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Volume", SMA1303_0A_SPK_VOL,
0, 167, 1, sma1303_spk_tlv),
SOC_SINGLE_BOOL_EXT("Force Mute Switch", 0,
sma1303_force_mute_get, sma1303_force_mute_put),
SOC_SINGLE_EXT("Postscaler Gain", SMA1303_90_POSTSCALER, 1, 0x30, 0,
sma1303_postscaler_get, sma1303_postscaler_put),
SOC_ENUM_EXT("TDM RX Slot Position", sma1303_tdm_slot_enum,
sma1303_tdm_slot_rx_get, sma1303_tdm_slot_rx_put),
SOC_ENUM_EXT("TDM TX Slot Position", sma1303_tdm_slot_enum,
sma1303_tdm_slot_tx_get, sma1303_tdm_slot_tx_put),
};
static const struct snd_soc_dapm_widget sma1303_dapm_widgets[] = {
/* platform domain */
SND_SOC_DAPM_OUTPUT("SPK"),
SND_SOC_DAPM_INPUT("SDO"),
/* path domain */
SND_SOC_DAPM_MUX_E("AIF IN Source", SND_SOC_NOPM, 0, 0,
&sma1303_aif_in_source_control,
sma1303_aif_in_event,
SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_MUX_E("AIF OUT Source", SND_SOC_NOPM, 0, 0,
&sma1303_aif_out_source_control,
sma1303_aif_out_event,
SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_SWITCH_E("SDO Enable", SND_SOC_NOPM, 0, 0,
&sma1303_sdo_control,
sma1303_sdo_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("Entry", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SWITCH_E("Post Scaler", SND_SOC_NOPM, 0, 1,
&sma1303_post_scaler_control,
sma1303_post_scaler_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_OUT_DRV_E("AMP Power", SND_SOC_NOPM, 0, 0, NULL, 0,
sma1303_power_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SWITCH("AMP Enable", SND_SOC_NOPM, 0, 1,
&sma1303_enable_control),
/* stream domain */
SND_SOC_DAPM_AIF_IN("AIF IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("AIF OUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
};
static const struct snd_soc_dapm_route sma1303_audio_map[] = {
/* Playback */
{"AIF IN Source", "Mono", "AIF IN"},
{"AIF IN Source", "Left", "AIF IN"},
{"AIF IN Source", "Right", "AIF IN"},
{"SDO Enable", "Switch", "AIF IN"},
{"AIF OUT Source", "Disable", "SDO Enable"},
{"AIF OUT Source", "After_FmtC", "SDO Enable"},
{"AIF OUT Source", "After_Mixer", "SDO Enable"},
{"AIF OUT Source", "After_DSP", "SDO Enable"},
{"AIF OUT Source", "After_Post", "SDO Enable"},
{"AIF OUT Source", "Clk_PLL", "SDO Enable"},
{"AIF OUT Source", "Clk_OSC", "SDO Enable"},
{"Entry", NULL, "AIF OUT Source"},
{"Entry", NULL, "AIF IN Source"},
{"Post Scaler", "Switch", "Entry"},
{"AMP Power", NULL, "Entry"},
{"AMP Power", NULL, "Entry"},
{"AMP Enable", "Switch", "AMP Power"},
{"SPK", NULL, "AMP Enable"},
/* Capture */
{"AIF OUT", NULL, "AMP Enable"},
};
static int sma1303_setup_pll(struct snd_soc_component *component,
unsigned int bclk)
{
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int i = 0, ret = 0;
dev_dbg(component->dev, "%s : BCLK = %dHz\n",
__func__, bclk);
if (sma1303->sys_clk_id == SMA1303_PLL_CLKIN_MCLK) {
dev_dbg(component->dev, "%s : MCLK is not supported\n",
__func__);
} else if (sma1303->sys_clk_id == SMA1303_PLL_CLKIN_BCLK) {
for (i = 0; i < sma1303->num_of_pll_matches; i++) {
if (sma1303->pll_matches[i].input_clk == bclk)
break;
}
if (i == sma1303->num_of_pll_matches) {
dev_dbg(component->dev, "%s : No matching value between pll table and SCK\n",
__func__);
return -EINVAL;
}
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A2_TOP_MAN1,
SMA1303_PLL_PD_MASK|SMA1303_PLL_REF_CLK_MASK,
SMA1303_PLL_OPERATION|SMA1303_PLL_SCK,
NULL);
}
ret += sma1303_regmap_write(sma1303,
SMA1303_8B_PLL_POST_N,
sma1303->pll_matches[i].post_n);
ret += sma1303_regmap_write(sma1303,
SMA1303_8C_PLL_N,
sma1303->pll_matches[i].n);
ret += sma1303_regmap_write(sma1303,
SMA1303_8D_PLL_A_SETTING,
sma1303->pll_matches[i].vco);
ret += sma1303_regmap_write(sma1303,
SMA1303_8F_PLL_P_CP,
sma1303->pll_matches[i].p_cp);
if (ret < 0)
return -EINVAL;
return 0;
}
static int sma1303_dai_hw_params_amp(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
unsigned int bclk = 0;
int ret = 0;
if (sma1303->format == SND_SOC_DAIFMT_DSP_A)
bclk = params_rate(params) * sma1303->frame_size;
else
bclk = params_rate(params) * params_physical_width(params)
* params_channels(params);
dev_dbg(component->dev,
"%s : rate = %d : bit size = %d : channel = %d\n",
__func__, params_rate(params), params_width(params),
params_channels(params));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (sma1303->sys_clk_id == SMA1303_PLL_CLKIN_BCLK) {
if (sma1303->last_bclk != bclk) {
sma1303_setup_pll(component, bclk);
sma1303->last_bclk = bclk;
}
}
switch (params_rate(params)) {
case 8000:
case 12000:
case 16000:
case 24000:
case 32000:
case 44100:
case 48000:
case 96000:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A2_TOP_MAN1,
SMA1303_DAC_DN_CONV_MASK,
SMA1303_DAC_DN_CONV_DISABLE,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_LEFTPOL_MASK,
SMA1303_LOW_FIRST_CH,
NULL);
break;
case 192000:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A2_TOP_MAN1,
SMA1303_DAC_DN_CONV_MASK,
SMA1303_DAC_DN_CONV_ENABLE,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_LEFTPOL_MASK,
SMA1303_HIGH_FIRST_CH,
NULL);
break;
default:
dev_err(component->dev, "%s not support rate : %d\n",
__func__, params_rate(params));
return -EINVAL;
}
} else {
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
dev_dbg(component->dev,
"%s set format SNDRV_PCM_FORMAT_S16_LE\n",
__func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_SCK_RATE_MASK,
SMA1303_SCK_32FS,
NULL);
break;
case SNDRV_PCM_FORMAT_S24_LE:
dev_dbg(component->dev,
"%s set format SNDRV_PCM_FORMAT_S24_LE\n",
__func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_SCK_RATE_MASK,
SMA1303_SCK_64FS,
NULL);
break;
case SNDRV_PCM_FORMAT_S32_LE:
dev_dbg(component->dev,
"%s set format SNDRV_PCM_FORMAT_S32_LE\n",
__func__);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_SCK_RATE_MASK,
SMA1303_SCK_64FS,
NULL);
break;
default:
dev_err(component->dev,
"%s not support data bit : %d\n", __func__,
params_format(params));
return -EINVAL;
}
}
switch (sma1303->format) {
case SND_SOC_DAIFMT_I2S:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_I2S_MODE_MASK,
SMA1303_STANDARD_I2S,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_O_FORMAT_MASK,
SMA1303_O_FMT_I2S,
NULL);
break;
case SND_SOC_DAIFMT_LEFT_J:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_I2S_MODE_MASK,
SMA1303_LJ,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_O_FORMAT_MASK,
SMA1303_O_FMT_LJ,
NULL);
break;
case SND_SOC_DAIFMT_RIGHT_J:
switch (params_width(params)) {
case 16:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_I2S_MODE_MASK,
SMA1303_RJ_16BIT,
NULL);
break;
case 24:
case 32:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_I2S_MODE_MASK,
SMA1303_RJ_24BIT,
NULL);
break;
}
break;
case SND_SOC_DAIFMT_DSP_A:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_I2S_MODE_MASK,
SMA1303_STANDARD_I2S,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_O_FORMAT_MASK,
SMA1303_O_FMT_TDM,
NULL);
break;
}
switch (params_width(params)) {
case 16:
case 24:
case 32:
break;
default:
dev_err(component->dev,
"%s not support data bit : %d\n", __func__,
params_format(params));
return -EINVAL;
}
if (ret < 0)
return -EINVAL;
return 0;
}
static int sma1303_dai_set_sysclk_amp(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_component *component = dai->component;
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
switch (clk_id) {
case SMA1303_EXTERNAL_CLOCK_19_2:
break;
case SMA1303_EXTERNAL_CLOCK_24_576:
break;
case SMA1303_PLL_CLKIN_MCLK:
break;
case SMA1303_PLL_CLKIN_BCLK:
break;
default:
dev_err(component->dev, "Invalid clk id: %d\n", clk_id);
return -EINVAL;
}
sma1303->sys_clk_id = clk_id;
return 0;
}
static int sma1303_dai_mute(struct snd_soc_dai *dai, int mute, int stream)
{
struct snd_soc_component *component = dai->component;
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
if (stream == SNDRV_PCM_STREAM_CAPTURE)
return ret;
if (mute) {
dev_dbg(component->dev, "%s : %s\n", __func__, "MUTE");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_0E_MUTE_VOL_CTRL,
SMA1303_SPK_MUTE_MASK,
SMA1303_SPK_MUTE,
NULL);
/* Need to wait time for mute slope */
msleep(55);
} else {
if (!sma1303->force_mute_status) {
dev_dbg(component->dev, "%s : %s\n",
__func__, "UNMUTE");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_0E_MUTE_VOL_CTRL,
SMA1303_SPK_MUTE_MASK,
SMA1303_SPK_UNMUTE,
NULL);
} else {
dev_dbg(sma1303->dev,
"%s : FORCE MUTE!!!\n", __func__);
}
}
if (ret < 0)
return -EINVAL;
return 0;
}
static int sma1303_dai_set_fmt_amp(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBC_CFC:
dev_dbg(component->dev,
"%s : %s\n", __func__, "I2S/TDM Device mode");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_CONTROLLER_DEVICE_MASK,
SMA1303_DEVICE_MODE,
NULL);
break;
case SND_SOC_DAIFMT_CBP_CFP:
dev_dbg(component->dev,
"%s : %s\n", __func__, "I2S/TDM Controller mode");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_CONTROLLER_DEVICE_MASK,
SMA1303_CONTROLLER_MODE,
NULL);
break;
default:
dev_err(component->dev,
"Unsupported Controller/Device : 0x%x\n", fmt);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
sma1303->format = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
break;
default:
dev_err(component->dev,
"Unsupported Audio Interface Format : 0x%x\n", fmt);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_NF:
dev_dbg(component->dev, "%s : %s\n",
__func__, "Invert BCLK + Normal Frame");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_SCK_RISING_MASK,
SMA1303_SCK_RISING_EDGE,
NULL);
break;
case SND_SOC_DAIFMT_IB_IF:
dev_dbg(component->dev, "%s : %s\n",
__func__, "Invert BCLK + Invert Frame");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_LEFTPOL_MASK|SMA1303_SCK_RISING_MASK,
SMA1303_HIGH_FIRST_CH|SMA1303_SCK_RISING_EDGE,
NULL);
break;
case SND_SOC_DAIFMT_NB_IF:
dev_dbg(component->dev, "%s : %s\n",
__func__, "Normal BCLK + Invert Frame");
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_01_INPUT1_CTRL1,
SMA1303_LEFTPOL_MASK,
SMA1303_HIGH_FIRST_CH,
NULL);
break;
case SND_SOC_DAIFMT_NB_NF:
dev_dbg(component->dev, "%s : %s\n",
__func__, "Normal BCLK + Normal Frame");
break;
default:
dev_err(component->dev,
"Unsupported Bit & Frameclock : 0x%x\n", fmt);
return -EINVAL;
}
if (ret < 0)
return -EINVAL;
return 0;
}
static int sma1303_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width)
{
struct snd_soc_component *component = dai->component;
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
int ret = 0;
dev_dbg(component->dev, "%s : slots = %d, slot_width - %d\n",
__func__, slots, slot_width);
sma1303->frame_size = slot_width * slots;
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A4_TOP_MAN3,
SMA1303_O_FORMAT_MASK,
SMA1303_O_FMT_TDM,
NULL);
switch (slot_width) {
case 16:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2,
SMA1303_TDM_DL_MASK,
SMA1303_TDM_DL_16,
NULL);
break;
case 32:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2,
SMA1303_TDM_DL_MASK,
SMA1303_TDM_DL_32,
NULL);
break;
default:
dev_err(component->dev, "%s not support TDM %d slot_width\n",
__func__, slot_width);
break;
}
switch (slots) {
case 4:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2,
SMA1303_TDM_N_SLOT_MASK,
SMA1303_TDM_N_SLOT_4,
NULL);
break;
case 8:
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2,
SMA1303_TDM_N_SLOT_MASK,
SMA1303_TDM_N_SLOT_8,
NULL);
break;
default:
dev_err(component->dev, "%s not support TDM %d slots\n",
__func__, slots);
break;
}
if (sma1303->tdm_slot_rx < slots)
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A5_TDM1,
SMA1303_TDM_SLOT1_RX_POS_MASK,
(sma1303->tdm_slot_rx) << 3,
NULL);
else
dev_err(component->dev, "%s Incorrect tdm-slot-rx %d set\n",
__func__, sma1303->tdm_slot_rx);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A5_TDM1,
SMA1303_TDM_CLK_POL_MASK,
SMA1303_TDM_CLK_POL_RISE,
NULL);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A5_TDM1,
SMA1303_TDM_TX_MODE_MASK,
SMA1303_TDM_TX_MONO,
NULL);
if (sma1303->tdm_slot_tx < slots)
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_A6_TDM2,
SMA1303_TDM_SLOT1_TX_POS_MASK,
(sma1303->tdm_slot_tx) << 3,
NULL);
else
dev_err(component->dev, "%s Incorrect tdm-slot-tx %d set\n",
__func__, sma1303->tdm_slot_tx);
if (ret < 0)
return -EINVAL;
return 0;
}
static const struct snd_soc_dai_ops sma1303_dai_ops_amp = {
.set_sysclk = sma1303_dai_set_sysclk_amp,
.set_fmt = sma1303_dai_set_fmt_amp,
.hw_params = sma1303_dai_hw_params_amp,
.mute_stream = sma1303_dai_mute,
.set_tdm_slot = sma1303_dai_set_tdm_slot,
};
#define SMA1303_RATES SNDRV_PCM_RATE_8000_192000
#define SMA1303_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver sma1303_dai[] = {
{
.name = "sma1303-amplifier",
.id = 0,
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = SMA1303_RATES,
.formats = SMA1303_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = SMA1303_RATES,
.formats = SMA1303_FORMATS,
},
.ops = &sma1303_dai_ops_amp,
},
};
static void sma1303_check_fault_worker(struct work_struct *work)
{
struct sma1303_priv *sma1303 =
container_of(work, struct sma1303_priv, check_fault_work.work);
int ret = 0;
unsigned int over_temp, ocp_val, uvlo_val;
if (sma1303->tsdw_cnt)
ret = sma1303_regmap_read(sma1303,
SMA1303_0A_SPK_VOL, &sma1303->cur_vol);
else
ret = sma1303_regmap_read(sma1303,
SMA1303_0A_SPK_VOL, &sma1303->init_vol);
if (ret != 0) {
dev_err(sma1303->dev,
"failed to read SMA1303_0A_SPK_VOL : %d\n", ret);
return;
}
ret = sma1303_regmap_read(sma1303, SMA1303_FA_STATUS1, &over_temp);
if (ret != 0) {
dev_err(sma1303->dev,
"failed to read SMA1303_FA_STATUS1 : %d\n", ret);
return;
}
ret = sma1303_regmap_read(sma1303, SMA1303_FB_STATUS2, &ocp_val);
if (ret != 0) {
dev_err(sma1303->dev,
"failed to read SMA1303_FB_STATUS2 : %d\n", ret);
return;
}
ret = sma1303_regmap_read(sma1303, SMA1303_FF_DEVICE_INDEX, &uvlo_val);
if (ret != 0) {
dev_err(sma1303->dev,
"failed to read SMA1303_FF_DEVICE_INDEX : %d\n", ret);
return;
}
if (~over_temp & SMA1303_OT1_OK_STATUS) {
dev_crit(sma1303->dev,
"%s : OT1(Over Temperature Level 1)\n", __func__);
if ((sma1303->cur_vol + 6) <= 0xFF)
sma1303_regmap_write(sma1303,
SMA1303_0A_SPK_VOL, sma1303->cur_vol + 6);
sma1303->tsdw_cnt++;
} else if (sma1303->tsdw_cnt) {
sma1303_regmap_write(sma1303,
SMA1303_0A_SPK_VOL, sma1303->init_vol);
sma1303->tsdw_cnt = 0;
sma1303->cur_vol = sma1303->init_vol;
}
if (~over_temp & SMA1303_OT2_OK_STATUS) {
dev_crit(sma1303->dev,
"%s : OT2(Over Temperature Level 2)\n", __func__);
}
if (ocp_val & SMA1303_OCP_SPK_STATUS) {
dev_crit(sma1303->dev,
"%s : OCP_SPK(Over Current Protect SPK)\n", __func__);
}
if (ocp_val & SMA1303_OCP_BST_STATUS) {
dev_crit(sma1303->dev,
"%s : OCP_BST(Over Current Protect Boost)\n", __func__);
}
if ((ocp_val & SMA1303_CLK_MON_STATUS) && (sma1303->amp_power_status)) {
dev_crit(sma1303->dev,
"%s : CLK_FAULT(No clock input)\n", __func__);
}
if (uvlo_val & SMA1303_UVLO_BST_STATUS) {
dev_crit(sma1303->dev,
"%s : UVLO(Under Voltage Lock Out)\n", __func__);
}
if ((over_temp != sma1303->last_over_temp) ||
(ocp_val != sma1303->last_ocp_val)) {
dev_crit(sma1303->dev, "Please check AMP status");
dev_dbg(sma1303->dev, "STATUS1=0x%02X : STATUS2=0x%02X\n",
over_temp, ocp_val);
sma1303->last_over_temp = over_temp;
sma1303->last_ocp_val = ocp_val;
}
if (sma1303->check_fault_status) {
if (sma1303->check_fault_period > 0)
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
sma1303->check_fault_period * HZ);
else
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
CHECK_PERIOD_TIME * HZ);
}
if (!(~over_temp & SMA1303_OT1_OK_STATUS)
&& !(~over_temp & SMA1303_OT2_OK_STATUS)
&& !(ocp_val & SMA1303_OCP_SPK_STATUS)
&& !(ocp_val & SMA1303_OCP_BST_STATUS)
&& !(ocp_val & SMA1303_CLK_MON_STATUS)
&& !(uvlo_val & SMA1303_UVLO_BST_STATUS)) {
}
}
static int sma1303_probe(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
snd_soc_dapm_sync(dapm);
return 0;
}
static void sma1303_remove(struct snd_soc_component *component)
{
struct sma1303_priv *sma1303 = snd_soc_component_get_drvdata(component);
cancel_delayed_work_sync(&sma1303->check_fault_work);
}
static const struct snd_soc_component_driver sma1303_component = {
.probe = sma1303_probe,
.remove = sma1303_remove,
.controls = sma1303_snd_controls,
.num_controls = ARRAY_SIZE(sma1303_snd_controls),
.dapm_widgets = sma1303_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(sma1303_dapm_widgets),
.dapm_routes = sma1303_audio_map,
.num_dapm_routes = ARRAY_SIZE(sma1303_audio_map),
};
static const struct regmap_config sma_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = SMA1303_FF_DEVICE_INDEX,
.readable_reg = sma1303_readable_register,
.writeable_reg = sma1303_writeable_register,
.volatile_reg = sma1303_volatile_register,
.cache_type = REGCACHE_NONE,
.reg_defaults = sma1303_reg_def,
.num_reg_defaults = ARRAY_SIZE(sma1303_reg_def),
};
static ssize_t check_fault_period_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sma1303_priv *sma1303 = dev_get_drvdata(dev);
return sysfs_emit(buf, "%ld\n", sma1303->check_fault_period);
}
static ssize_t check_fault_period_store(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
struct sma1303_priv *sma1303 = dev_get_drvdata(dev);
int ret;
ret = kstrtol(buf, 10, &sma1303->check_fault_period);
if (ret)
return -EINVAL;
return (ssize_t)count;
}
static DEVICE_ATTR_RW(check_fault_period);
static ssize_t check_fault_status_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sma1303_priv *sma1303 = dev_get_drvdata(dev);
return sysfs_emit(buf, "%ld\n", sma1303->check_fault_status);
}
static ssize_t check_fault_status_store(struct device *dev,
struct device_attribute *devattr, const char *buf, size_t count)
{
struct sma1303_priv *sma1303 = dev_get_drvdata(dev);
int ret;
ret = kstrtol(buf, 10, &sma1303->check_fault_status);
if (ret)
return -EINVAL;
if (sma1303->check_fault_status) {
if (sma1303->check_fault_period > 0)
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
sma1303->check_fault_period * HZ);
else
queue_delayed_work(system_freezable_wq,
&sma1303->check_fault_work,
CHECK_PERIOD_TIME * HZ);
}
return (ssize_t)count;
}
static DEVICE_ATTR_RW(check_fault_status);
static struct attribute *sma1303_attr[] = {
&dev_attr_check_fault_period.attr,
&dev_attr_check_fault_status.attr,
NULL,
};
static struct attribute_group sma1303_attr_group = {
.attrs = sma1303_attr,
};
static int sma1303_i2c_probe(struct i2c_client *client)
{
struct sma1303_priv *sma1303;
int ret, i = 0;
unsigned int device_info, status, otp_stat;
sma1303 = devm_kzalloc(&client->dev,
sizeof(struct sma1303_priv), GFP_KERNEL);
if (!sma1303)
return -ENOMEM;
sma1303->dev = &client->dev;
sma1303->regmap = devm_regmap_init_i2c(client, &sma_i2c_regmap);
if (IS_ERR(sma1303->regmap)) {
ret = PTR_ERR(sma1303->regmap);
dev_err(&client->dev,
"Failed to allocate register map: %d\n", ret);
return ret;
}
ret = sma1303_regmap_read(sma1303,
SMA1303_FF_DEVICE_INDEX, &device_info);
if ((ret != 0) || ((device_info & 0xF8) != SMA1303_DEVICE_ID)) {
dev_err(&client->dev, "device initialization error (%d 0x%02X)",
ret, device_info);
}
dev_dbg(&client->dev, "chip version 0x%02X\n", device_info);
ret += sma1303_regmap_update_bits(sma1303,
SMA1303_00_SYSTEM_CTRL,
SMA1303_RESETBYI2C_MASK, SMA1303_RESETBYI2C_RESET,
NULL);
ret += sma1303_regmap_read(sma1303, SMA1303_FF_DEVICE_INDEX, &status);
sma1303->rev_num = status & SMA1303_REV_NUM_STATUS;
if (sma1303->rev_num == SMA1303_REV_NUM_TV0)
dev_dbg(&client->dev, "SMA1303 Trimming Version 0\n");
else if (sma1303->rev_num == SMA1303_REV_NUM_TV1)
dev_dbg(&client->dev, "SMA1303 Trimming Version 1\n");
ret += sma1303_regmap_read(sma1303, SMA1303_FB_STATUS2, &otp_stat);
if (ret < 0)
dev_err(&client->dev,
"failed to read, register: %02X, ret: %d\n",
SMA1303_FF_DEVICE_INDEX, ret);
if (((sma1303->rev_num == SMA1303_REV_NUM_TV0) &&
((otp_stat & 0x0E) == SMA1303_OTP_STAT_OK_0)) ||
((sma1303->rev_num != SMA1303_REV_NUM_TV0) &&
((otp_stat & 0x0C) == SMA1303_OTP_STAT_OK_1)))
dev_dbg(&client->dev, "SMA1303 OTP Status Successful\n");
else
dev_dbg(&client->dev, "SMA1303 OTP Status Fail\n");
for (i = 0; i < (unsigned int)ARRAY_SIZE(sma1303_reg_def); i++)
ret += sma1303_regmap_write(sma1303,
sma1303_reg_def[i].reg,
sma1303_reg_def[i].def);
sma1303->amp_mode = SMA1303_MONO;
sma1303->amp_power_status = false;
sma1303->check_fault_period = CHECK_PERIOD_TIME;
sma1303->check_fault_status = true;
sma1303->force_mute_status = false;
sma1303->init_vol = 0x31;
sma1303->cur_vol = sma1303->init_vol;
sma1303->last_bclk = 0;
sma1303->last_ocp_val = 0x08;
sma1303->last_over_temp = 0xC0;
sma1303->tsdw_cnt = 0;
sma1303->retry_cnt = SMA1303_I2C_RETRY_COUNT;
sma1303->tdm_slot_rx = 0;
sma1303->tdm_slot_tx = 0;
sma1303->sys_clk_id = SMA1303_PLL_CLKIN_BCLK;
sma1303->dev = &client->dev;
sma1303->kobj = &client->dev.kobj;
INIT_DELAYED_WORK(&sma1303->check_fault_work,
sma1303_check_fault_worker);
i2c_set_clientdata(client, sma1303);
sma1303->pll_matches = sma1303_pll_matches;
sma1303->num_of_pll_matches =
ARRAY_SIZE(sma1303_pll_matches);
ret = devm_snd_soc_register_component(&client->dev,
&sma1303_component, sma1303_dai, 1);
if (ret) {
dev_err(&client->dev, "Failed to register component");
return ret;
}
sma1303->attr_grp = &sma1303_attr_group;
ret = sysfs_create_group(sma1303->kobj, sma1303->attr_grp);
if (ret) {
dev_err(&client->dev,
"failed to create attribute group [%d]\n", ret);
sma1303->attr_grp = NULL;
}
return ret;
}
static void sma1303_i2c_remove(struct i2c_client *client)
{
struct sma1303_priv *sma1303 =
(struct sma1303_priv *) i2c_get_clientdata(client);
cancel_delayed_work_sync(&sma1303->check_fault_work);
}
static const struct i2c_device_id sma1303_i2c_id[] = {
{"sma1303"},
{}
};
MODULE_DEVICE_TABLE(i2c, sma1303_i2c_id);
static const struct of_device_id sma1303_of_match[] = {
{ .compatible = "irondevice,sma1303", },
{ }
};
MODULE_DEVICE_TABLE(of, sma1303_of_match);
static struct i2c_driver sma1303_i2c_driver = {
.driver = {
.name = "sma1303",
.of_match_table = sma1303_of_match,
},
.probe = sma1303_i2c_probe,
.remove = sma1303_i2c_remove,
.id_table = sma1303_i2c_id,
};
module_i2c_driver(sma1303_i2c_driver);
MODULE_DESCRIPTION("ALSA SoC SMA1303 driver");
MODULE_AUTHOR("Gyuhwa Park, <[email protected]>");
MODULE_AUTHOR("Kiseok Jo, <[email protected]>");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019-2022 Bootlin
* Author: Paul Kocialkowski <[email protected]>
*/
#ifndef _LOGICVC_OF_H_
#define _LOGICVC_OF_H_
enum logicvc_of_property_index {
LOGICVC_OF_PROPERTY_DISPLAY_INTERFACE = 0,
LOGICVC_OF_PROPERTY_DISPLAY_COLORSPACE,
LOGICVC_OF_PROPERTY_DISPLAY_DEPTH,
LOGICVC_OF_PROPERTY_ROW_STRIDE,
LOGICVC_OF_PROPERTY_DITHERING,
LOGICVC_OF_PROPERTY_BACKGROUND_LAYER,
LOGICVC_OF_PROPERTY_LAYERS_CONFIGURABLE,
LOGICVC_OF_PROPERTY_LAYERS_COUNT,
LOGICVC_OF_PROPERTY_LAYER_DEPTH,
LOGICVC_OF_PROPERTY_LAYER_COLORSPACE,
LOGICVC_OF_PROPERTY_LAYER_ALPHA_MODE,
LOGICVC_OF_PROPERTY_LAYER_BASE_OFFSET,
LOGICVC_OF_PROPERTY_LAYER_BUFFER_OFFSET,
LOGICVC_OF_PROPERTY_LAYER_PRIMARY,
LOGICVC_OF_PROPERTY_MAXIMUM,
};
struct logicvc_of_property_sv {
const char *string;
u32 value;
};
struct logicvc_of_property {
char *name;
bool optional;
struct logicvc_of_property_sv *sv;
u32 range[2];
};
int logicvc_of_property_parse_u32(struct device_node *of_node,
unsigned int index, u32 *target);
void logicvc_of_property_parse_bool(struct device_node *of_node,
unsigned int index, bool *target);
bool logicvc_of_node_is_layer(struct device_node *of_node);
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Samsung S3C64xx DTS pinctrl constants
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Copyright (c) 2022 Linaro Ltd
* Author: Krzysztof Kozlowski <[email protected]>
*/
#ifndef __DTS_ARM_SAMSUNG_S3C64XX_PINCTRL_H__
#define __DTS_ARM_SAMSUNG_S3C64XX_PINCTRL_H__
#define S3C64XX_PIN_PULL_NONE 0
#define S3C64XX_PIN_PULL_DOWN 1
#define S3C64XX_PIN_PULL_UP 2
#define S3C64XX_PIN_FUNC_INPUT 0
#define S3C64XX_PIN_FUNC_OUTPUT 1
#define S3C64XX_PIN_FUNC_2 2
#define S3C64XX_PIN_FUNC_3 3
#define S3C64XX_PIN_FUNC_4 4
#define S3C64XX_PIN_FUNC_5 5
#define S3C64XX_PIN_FUNC_6 6
#define S3C64XX_PIN_FUNC_EINT 7
#endif /* __DTS_ARM_SAMSUNG_S3C64XX_PINCTRL_H__ */
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019-2021, Intel Corporation. */
#ifndef _ICE_REPR_H_
#define _ICE_REPR_H_
#include <net/dst_metadata.h>
struct ice_repr_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64 tx_drops;
};
enum ice_repr_type {
ICE_REPR_TYPE_VF,
ICE_REPR_TYPE_SF,
};
struct ice_repr {
struct ice_vsi *src_vsi;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
enum ice_repr_type type;
union {
struct ice_vf *vf;
struct ice_dynamic_port *sf;
};
struct {
int (*add)(struct ice_repr *repr);
void (*rem)(struct ice_repr *repr);
int (*ready)(struct ice_repr *repr);
} ops;
};
struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
int xmit_status);
void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
*
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "common.h"
#include "fimc-core.h"
#include "fimc-reg.h"
#include "media-dev.h"
static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
{
if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return FMT_FLAGS_M2M_IN;
else
return FMT_FLAGS_M2M_OUT;
}
void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
{
struct vb2_v4l2_buffer *src_vb, *dst_vb;
if (!ctx || !ctx->fh.m2m_ctx)
return;
src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (src_vb)
v4l2_m2m_buf_done(src_vb, vb_state);
if (dst_vb)
v4l2_m2m_buf_done(dst_vb, vb_state);
if (src_vb && dst_vb)
v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
ctx->fh.m2m_ctx);
}
/* Complete the transaction which has been scheduled for execution. */
static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
if (!fimc_m2m_pending(fimc))
return;
fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
wait_event_timeout(fimc->irq_queue,
!fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
FIMC_SHUTDOWN_TIMEOUT);
}
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
return pm_runtime_resume_and_get(&ctx->fimc_dev->pdev->dev);
}
static void stop_streaming(struct vb2_queue *q)
{
struct fimc_ctx *ctx = q->drv_priv;
fimc_m2m_shutdown(ctx);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
pm_runtime_put(&ctx->fimc_dev->pdev->dev);
}
static void fimc_device_run(void *priv)
{
struct vb2_v4l2_buffer *src_vb, *dst_vb;
struct fimc_ctx *ctx = priv;
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
unsigned long flags;
int ret;
if (WARN(!ctx, "Null context\n"))
return;
fimc = ctx->fimc_dev;
spin_lock_irqsave(&fimc->slock, flags);
set_bit(ST_M2M_PEND, &fimc->state);
sf = &ctx->s_frame;
df = &ctx->d_frame;
if (ctx->state & FIMC_PARAMS) {
/* Prepare the DMA offsets for scaler */
fimc_prepare_dma_offset(ctx, sf);
fimc_prepare_dma_offset(ctx, df);
}
src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, &src_vb->vb2_buf, sf, &sf->addr);
if (ret)
goto dma_unlock;
dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
ret = fimc_prepare_addr(ctx, &dst_vb->vb2_buf, df, &df->addr);
if (ret)
goto dma_unlock;
dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst_vb->flags |=
src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
/* Reconfigure hardware if the context has changed. */
if (fimc->m2m.ctx != ctx) {
ctx->state |= FIMC_PARAMS;
fimc->m2m.ctx = ctx;
}
if (ctx->state & FIMC_PARAMS) {
fimc_set_yuv_order(ctx);
fimc_hw_set_input_path(ctx);
fimc_hw_set_in_dma(ctx);
ret = fimc_set_scaler_info(ctx);
if (ret)
goto dma_unlock;
fimc_hw_set_prescaler(ctx);
fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
fimc_hw_set_effect(ctx);
fimc_hw_set_out_dma(ctx);
if (fimc->drv_data->alpha_color)
fimc_hw_set_rgb_alpha(ctx);
fimc_hw_set_output_path(ctx);
}
fimc_hw_set_input_addr(fimc, &sf->addr);
fimc_hw_set_output_addr(fimc, &df->addr, -1);
fimc_activate_capture(ctx);
ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP);
fimc_hw_activate_input_dma(fimc, true);
dma_unlock:
spin_unlock_irqrestore(&fimc->slock, flags);
}
static void fimc_job_abort(void *priv)
{
fimc_m2m_shutdown(priv);
}
static int fimc_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
const struct fimc_frame *f;
int i;
f = ctx_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
/*
* Return number of non-contiguous planes (plane buffers)
* depending on the configured color format.
*/
if (!f->fmt)
return -EINVAL;
*num_planes = f->fmt->memplanes;
for (i = 0; i < f->fmt->memplanes; i++)
sizes[i] = f->payload[i];
return 0;
}
static int fimc_buf_prepare(struct vb2_buffer *vb)
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
const struct fimc_frame *frame;
int i;
frame = ctx_get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
for (i = 0; i < frame->fmt->memplanes; i++)
vb2_set_plane_payload(vb, i, frame->payload[i]);
return 0;
}
static void fimc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static const struct vb2_ops fimc_qops = {
.queue_setup = fimc_queue_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
.stop_streaming = stop_streaming,
.start_streaming = start_streaming,
};
/*
* V4L2 ioctl handlers
*/
static int fimc_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct fimc_dev *fimc = video_drvdata(file);
__fimc_vidioc_querycap(&fimc->pdev->dev, cap);
return 0;
}
static int fimc_m2m_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct fimc_fmt *fmt;
fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
f->index);
if (!fmt)
return -EINVAL;
f->pixelformat = fmt->fourcc;
return 0;
}
static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
const struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
__fimc_get_format(frame, f);
return 0;
}
static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
{
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_variant *variant = fimc->variant;
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
const struct fimc_fmt *fmt;
u32 max_w, mod_x, mod_y;
if (!IS_M2M(f->type))
return -EINVAL;
fmt = fimc_find_format(&pix->pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (WARN(fmt == NULL, "Pixel format lookup failed"))
return -EINVAL;
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
max_w = variant->pix_limit->scaler_dis_w;
mod_x = ffs(variant->min_inp_pixsize) - 1;
} else {
max_w = variant->pix_limit->out_rot_dis_w;
mod_x = ffs(variant->min_out_pixsize) - 1;
}
if (tiled_fmt(fmt)) {
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
mod_y = ffs(variant->min_vsize_align) - 1;
}
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
return 0;
}
static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
return fimc_try_fmt_mplane(ctx, f);
}
static void __set_frame_format(struct fimc_frame *frame,
const struct fimc_fmt *fmt,
const struct v4l2_pix_format_mplane *pixm)
{
int i;
for (i = 0; i < fmt->memplanes; i++) {
frame->bytesperline[i] = pixm->plane_fmt[i].bytesperline;
frame->payload[i] = pixm->plane_fmt[i].sizeimage;
}
frame->f_width = pixm->width;
frame->f_height = pixm->height;
frame->o_width = pixm->width;
frame->o_height = pixm->height;
frame->width = pixm->width;
frame->height = pixm->height;
frame->offs_h = 0;
frame->offs_v = 0;
frame->fmt = fmt;
}
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_fmt *fmt;
struct vb2_queue *vq;
struct fimc_frame *frame;
int ret;
ret = fimc_try_fmt_mplane(ctx, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&fimc->m2m.vfd, "queue (%d) busy\n", f->type);
return -EBUSY;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
frame = &ctx->s_frame;
else
frame = &ctx->d_frame;
fmt = fimc_find_format(&f->fmt.pix_mp.pixelformat, NULL,
get_m2m_fmt_flags(f->type), 0);
if (!fmt)
return -EINVAL;
__set_frame_format(frame, fmt, &f->fmt.pix_mp);
/* Update RGB Alpha control state and value range */
fimc_alpha_ctrl_update(ctx);
return 0;
}
static int fimc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
const struct fimc_frame *frame;
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
default:
return -EINVAL;
}
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_COMPOSE:
s->r.left = frame->offs_h;
s->r.top = frame->offs_v;
s->r.width = frame->width;
s->r.height = frame->height;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
s->r.left = 0;
s->r.top = 0;
s->r.width = frame->o_width;
s->r.height = frame->o_height;
break;
default:
return -EINVAL;
}
return 0;
}
static int fimc_m2m_try_selection(struct fimc_ctx *ctx,
struct v4l2_selection *s)
{
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_frame *f;
u32 min_size, halign, depth = 0;
int i;
if (s->r.top < 0 || s->r.left < 0) {
v4l2_err(&fimc->m2m.vfd,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
f = &ctx->d_frame;
if (s->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
} else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
f = &ctx->s_frame;
if (s->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
} else {
return -EINVAL;
}
min_size = (f == &ctx->s_frame) ?
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->memplanes; i++)
depth += f->fmt->depth[i];
v4l_bound_align_image(&s->r.width, min_size, f->o_width,
ffs(min_size) - 1,
&s->r.height, min_size, f->o_height,
halign, 64/(ALIGN(depth, 8)));
/* adjust left/top if cropping rectangle is out of bounds */
if (s->r.left + s->r.width > f->o_width)
s->r.left = f->o_width - s->r.width;
if (s->r.top + s->r.height > f->o_height)
s->r.top = f->o_height - s->r.height;
s->r.left = round_down(s->r.left, min_size);
s->r.top = round_down(s->r.top, fimc->variant->hor_offs_align);
dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
s->r.left, s->r.top, s->r.width, s->r.height,
f->f_width, f->f_height);
return 0;
}
static int fimc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
int ret;
ret = fimc_m2m_try_selection(ctx, s);
if (ret)
return ret;
f = (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
&ctx->s_frame : &ctx->d_frame;
/* Check to see if scaling ratio is within supported range */
if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
ret = fimc_check_scaler_ratio(ctx, s->r.width,
s->r.height, ctx->d_frame.width,
ctx->d_frame.height, ctx->rotation);
} else {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
ctx->s_frame.height, s->r.width,
s->r.height, ctx->rotation);
}
if (ret) {
v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
return -EINVAL;
}
f->offs_h = s->r.left;
f->offs_v = s->r.top;
f->width = s->r.width;
f->height = s->r.height;
fimc_ctx_state_set(FIMC_PARAMS, ctx);
return 0;
}
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
.vidioc_enum_fmt_vid_cap = fimc_m2m_enum_fmt,
.vidioc_enum_fmt_vid_out = fimc_m2m_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = fimc_m2m_g_selection,
.vidioc_s_selection = fimc_m2m_s_selection,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct fimc_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->fimc_dev->lock;
src_vq->dev = &ctx->fimc_dev->pdev->dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->fimc_dev->lock;
dst_vq->dev = &ctx->fimc_dev->pdev->dev;
return vb2_queue_init(dst_vq);
}
static int fimc_m2m_set_default_format(struct fimc_ctx *ctx)
{
struct v4l2_pix_format_mplane pixm = {
.pixelformat = V4L2_PIX_FMT_RGB32,
.width = 800,
.height = 600,
.plane_fmt[0] = {
.bytesperline = 800 * 4,
.sizeimage = 800 * 4 * 600,
},
};
const struct fimc_fmt *fmt;
fmt = fimc_find_format(&pixm.pixelformat, NULL, FMT_FLAGS_M2M, 0);
if (!fmt)
return -EINVAL;
__set_frame_format(&ctx->s_frame, fmt, &pixm);
__set_frame_format(&ctx->d_frame, fmt, &pixm);
return 0;
}
static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx;
int ret = -EBUSY;
pr_debug("pid: %d, state: %#lx\n", task_pid_nr(current), fimc->state);
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
/*
* Don't allow simultaneous open() of the mem-to-mem and the
* capture video node that belong to same FIMC IP instance.
*/
if (test_bit(ST_CAPT_BUSY, &fimc->state))
goto unlock;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ret = -ENOMEM;
goto unlock;
}
v4l2_fh_init(&ctx->fh, &fimc->m2m.vfd);
ctx->fimc_dev = fimc;
/* Default color format */
ctx->s_frame.fmt = fimc_get_format(0);
ctx->d_frame.fmt = fimc_get_format(0);
ret = fimc_ctrls_create(ctx);
if (ret)
goto error_fh;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrls.handler;
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
/* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
ctx->in_path = FIMC_IO_DMA;
ctx->out_path = FIMC_IO_DMA;
ctx->scaler.enabled = 1;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error_c;
}
if (fimc->m2m.refcnt++ == 0)
set_bit(ST_M2M_RUN, &fimc->state);
ret = fimc_m2m_set_default_format(ctx);
if (ret < 0)
goto error_m2m_ctx;
mutex_unlock(&fimc->lock);
return 0;
error_m2m_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
unlock:
mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_m2m_release(struct file *file)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
struct fimc_dev *fimc = ctx->fimc_dev;
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
mutex_lock(&fimc->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_M2M_RUN, &fimc->state);
kfree(ctx);
mutex_unlock(&fimc->lock);
return 0;
}
static const struct v4l2_file_operations fimc_m2m_fops = {
.owner = THIS_MODULE,
.open = fimc_m2m_open,
.release = fimc_m2m_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static const struct v4l2_m2m_ops m2m_ops = {
.device_run = fimc_device_run,
.job_abort = fimc_job_abort,
};
int fimc_register_m2m_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev)
{
struct video_device *vfd = &fimc->m2m.vfd;
int ret;
fimc->v4l2_dev = v4l2_dev;
memset(vfd, 0, sizeof(*vfd));
vfd->fops = &fimc_m2m_fops;
vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
vfd->v4l2_dev = v4l2_dev;
vfd->minor = -1;
vfd->release = video_device_release_empty;
vfd->lock = &fimc->lock;
vfd->vfl_dir = VFL_DIR_M2M;
vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
video_set_drvdata(vfd, fimc);
fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(fimc->m2m.m2m_dev)) {
v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
return PTR_ERR(fimc->m2m.m2m_dev);
}
ret = media_entity_pads_init(&vfd->entity, 0, NULL);
if (ret)
goto err_me;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vd;
v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
vfd->name, video_device_node_name(vfd));
return 0;
err_vd:
media_entity_cleanup(&vfd->entity);
err_me:
v4l2_m2m_release(fimc->m2m.m2m_dev);
return ret;
}
void fimc_unregister_m2m_device(struct fimc_dev *fimc)
{
if (!fimc)
return;
if (fimc->m2m.m2m_dev)
v4l2_m2m_release(fimc->m2m.m2m_dev);
if (video_is_registered(&fimc->m2m.vfd)) {
video_unregister_device(&fimc->m2m.vfd);
media_entity_cleanup(&fimc->m2m.vfd.entity);
}
}
|
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel module to match FRAG parameters. */
/* (C) 2001-2002 Andras Kis-Szabo <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <net/checksum.h>
#include <net/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_frag.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: IPv6 fragment match");
MODULE_AUTHOR("Andras Kis-Szabo <[email protected]>");
/* Returns 1 if the id is matched by the range, 0 otherwise */
static inline bool
id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
{
bool r;
pr_debug("id_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ',
min, id, max);
r = (id >= min && id <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool
frag_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
struct frag_hdr _frag;
const struct frag_hdr *fh;
const struct ip6t_frag *fraginfo = par->matchinfo;
unsigned int ptr = 0;
int err;
err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
if (err < 0) {
if (err != -ENOENT)
par->hotdrop = true;
return false;
}
fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
if (fh == NULL) {
par->hotdrop = true;
return false;
}
pr_debug("INFO %04X ", fh->frag_off);
pr_debug("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
pr_debug("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
pr_debug("MF %04X ", fh->frag_off & htons(IP6_MF));
pr_debug("ID %u %08X\n", ntohl(fh->identification),
ntohl(fh->identification));
pr_debug("IPv6 FRAG id %02X ",
id_match(fraginfo->ids[0], fraginfo->ids[1],
ntohl(fh->identification),
!!(fraginfo->invflags & IP6T_FRAG_INV_IDS)));
pr_debug("res %02X %02X%04X %02X ",
fraginfo->flags & IP6T_FRAG_RES, fh->reserved,
ntohs(fh->frag_off) & 0x6,
!((fraginfo->flags & IP6T_FRAG_RES) &&
(fh->reserved || (ntohs(fh->frag_off) & 0x06))));
pr_debug("first %02X %02X %02X ",
fraginfo->flags & IP6T_FRAG_FST,
ntohs(fh->frag_off) & ~0x7,
!((fraginfo->flags & IP6T_FRAG_FST) &&
(ntohs(fh->frag_off) & ~0x7)));
pr_debug("mf %02X %02X %02X ",
fraginfo->flags & IP6T_FRAG_MF,
ntohs(fh->frag_off) & IP6_MF,
!((fraginfo->flags & IP6T_FRAG_MF) &&
!((ntohs(fh->frag_off) & IP6_MF))));
pr_debug("last %02X %02X %02X\n",
fraginfo->flags & IP6T_FRAG_NMF,
ntohs(fh->frag_off) & IP6_MF,
!((fraginfo->flags & IP6T_FRAG_NMF) &&
(ntohs(fh->frag_off) & IP6_MF)));
return id_match(fraginfo->ids[0], fraginfo->ids[1],
ntohl(fh->identification),
!!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) &&
!((fraginfo->flags & IP6T_FRAG_RES) &&
(fh->reserved || (ntohs(fh->frag_off) & 0x6))) &&
!((fraginfo->flags & IP6T_FRAG_FST) &&
(ntohs(fh->frag_off) & ~0x7)) &&
!((fraginfo->flags & IP6T_FRAG_MF) &&
!(ntohs(fh->frag_off) & IP6_MF)) &&
!((fraginfo->flags & IP6T_FRAG_NMF) &&
(ntohs(fh->frag_off) & IP6_MF));
}
static int frag_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_frag *fraginfo = par->matchinfo;
if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
pr_debug("unknown flags %X\n", fraginfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match frag_mt6_reg __read_mostly = {
.name = "frag",
.family = NFPROTO_IPV6,
.match = frag_mt6,
.matchsize = sizeof(struct ip6t_frag),
.checkentry = frag_mt6_check,
.me = THIS_MODULE,
};
static int __init frag_mt6_init(void)
{
return xt_register_match(&frag_mt6_reg);
}
static void __exit frag_mt6_exit(void)
{
xt_unregister_match(&frag_mt6_reg);
}
module_init(frag_mt6_init);
module_exit(frag_mt6_exit);
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2018 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
#include "display/intel_fbc_regs.h"
/**
* DOC: Hardware workarounds
*
* Hardware workarounds are register programming documented to be executed in
* the driver that fall outside of the normal programming sequences for a
* platform. There are some basic categories of workarounds, depending on
* how/when they are applied:
*
* - Context workarounds: workarounds that touch registers that are
* saved/restored to/from the HW context image. The list is emitted (via Load
* Register Immediate commands) once when initializing the device and saved in
* the default context. That default context is then used on every context
* creation to have a "primed golden context", i.e. a context image that
* already contains the changes needed to all the registers.
*
* Context workarounds should be implemented in the \*_ctx_workarounds_init()
* variants respective to the targeted platforms.
*
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
* common power domain and they are reset together. This happens on some
* platforms with render and compute engines. In this case (at least) one of
* them need to keeep the workaround programming: the approach taken in the
* driver is to tie those workarounds to the first compute/render engine that
* is registered. When executing with GuC submission, engine resets are
* outside of kernel driver control, hence the list of registers involved in
* written once, on engine initialization, and then passed to GuC, that
* saves/restores their values before/after the reset takes place. See
* ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
*
* Workarounds for registers specific to RCS and CCS should be implemented in
* rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
* should be implemented in general_render_compute_wa_init(). The settings
* about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
*
* GT workarounds should be implemented in the \*_gt_workarounds_init()
* variants respective to the targeted platforms.
*
* - Register whitelist: some workarounds need to be implemented in userspace,
* but need to touch privileged registers. The whitelist in the kernel
* instructs the hardware to allow the access to happen. From the kernel side,
* this is just a special case of a MMIO workaround (as we write the list of
* these to/be-whitelisted registers to some special HW registers).
*
* Register whitelisting should be done in the \*_whitelist_build() variants
* respective to the targeted platforms.
*
* - Workaround batchbuffers: buffers that get executed automatically by the
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
* programming sequences when switching contexts. The support for workaround
* batchbuffers is enabled these hardware mechanisms:
*
* #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
* context, pointing the hardware to jump to that location when that offset
* is reached in the context restore. Workaround batchbuffer in the driver
* currently uses this mechanism for all platforms.
*
* #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
* pointing the hardware to a buffer to continue executing after the
* engine registers are restored in a context restore sequence. This is
* currently not used in the driver.
*
* - Other: There are WAs that, due to their nature, cannot be applied from a
* central place. Those are peppered around the rest of the code, as needed.
* Workarounds related to the display IP are the main example.
*
* .. [1] Technically, some registers are powercontext saved & restored, so they
* survive a suspend/resume. In practice, writing them again is not too
* costly and simplifies things, so it's the approach taken in the driver.
*/
static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
const char *name, const char *engine_name)
{
wal->gt = gt;
wal->name = name;
wal->engine_name = engine_name;
}
#define WA_LIST_CHUNK (1 << 4)
static void wa_init_finish(struct i915_wa_list *wal)
{
/* Trim unused entries. */
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
struct i915_wa *list = kmemdup_array(wal->list, wal->count,
sizeof(*list), GFP_KERNEL);
if (list) {
kfree(wal->list);
wal->list = list;
}
}
if (!wal->count)
return;
gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
wal->wa_count, wal->name, wal->engine_name);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{
unsigned int addr = i915_mmio_reg_offset(wa->reg);
struct drm_i915_private *i915 = wal->gt->i915;
unsigned int start = 0, end = wal->count;
const unsigned int grow = WA_LIST_CHUNK;
struct i915_wa *wa_;
GEM_BUG_ON(!is_power_of_2(grow));
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
return;
}
if (wal->list) {
memcpy(list, wal->list, sizeof(*wa) * wal->count);
kfree(wal->list);
}
wal->list = list;
}
while (start < end) {
unsigned int mid = start + (end - start) / 2;
if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
wa_ = &wal->list[mid];
if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
drm_err(&i915->drm,
"Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
i915_mmio_reg_offset(wa_->reg),
wa_->clr, wa_->set);
wa_->set &= ~wa->clr;
}
wal->wa_count++;
wa_->set |= wa->set;
wa_->clr |= wa->clr;
wa_->read |= wa->read;
return;
}
}
wal->wa_count++;
wa_ = &wal->list[wal->count++];
*wa_ = *wa;
while (wa_-- > wal->list) {
GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
i915_mmio_reg_offset(wa_[1].reg));
if (i915_mmio_reg_offset(wa_[1].reg) >
i915_mmio_reg_offset(wa_[0].reg))
break;
swap(wa_[1], wa_[0]);
}
}
static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
u32 clear, u32 set, u32 read_mask, bool masked_reg)
{
struct i915_wa wa = {
.reg = reg,
.clr = clear,
.set = set,
.read = read_mask,
.masked_reg = masked_reg,
};
_wa_add(wal, &wa);
}
static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 clear, u32 set, u32 read_mask, bool masked_reg)
{
struct i915_wa wa = {
.mcr_reg = reg,
.clr = clear,
.set = set,
.read = read_mask,
.masked_reg = masked_reg,
.is_mcr = 1,
};
_wa_add(wal, &wa);
}
static void
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
{
wa_mcr_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
}
static void
wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
{
wa_mcr_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
wa_write_clr_set(wal, reg, clr, 0);
}
static void
wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
{
wa_mcr_write_clr_set(wal, reg, clr, 0);
}
/*
* WA operations on "masked register". A masked register has the upper 16 bits
* documented as "masked" in b-spec. Its purpose is to allow writing to just a
* portion of the register without a rmw: you simply write in the upper 16 bits
* the mask of bits you are going to modify.
*
* The wa_masked_* family of functions already does the necessary operations to
* calculate the mask based on the parameters passed, so user only has to
* provide the lower 16 bits of that register.
*/
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
}
static void
wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
}
static void
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
}
static void
wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
}
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
u32 mask, u32 val)
{
wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
}
static void
wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 mask, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
wa_masked_en(wal, HDC_CHICKEN0,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN9_PBE_COMPRESSED_HASH_SELECTION);
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
wa_masked_en(wal, CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl. We keep the tie for all gen9. The
* documentation is a bit hazy and so we want to get common behaviour,
* even though there is no clear evidence we would need both on kbl/bxt.
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says.
*
* Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
* batch buffer programming. Since we can't break old userspace, we
* need to set our default preemption level to safe value. Userspace is
* still able to use more fine-grained preemption levels, since in
* WaEnablePreemptionGranularityControlByUMD we're whitelisting the
* per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
* not real HW workarounds, but merely a way to start using preemption
* while maintaining old contract with userspace.
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
for (i = 0; i < 3; i++) {
u8 ss;
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
continue;
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
}
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
skl_tune_iz_hashing(engine, wal);
}
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
* lacking this could cause system hangs, but coherency performance
* overhead is high and only a few compute workloads really need it
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* WaEnableFloatBlendOptimization:icl */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
0 /* write-only, so skip validation */,
true);
/* WaDisableGPGPUMidThreadPreemption:icl */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
0,
0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
/*
* These settings aren't actually workarounds, but general tuning settings that
* need to be programmed on dg2 platform.
*/
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128);
}
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
/*
* Wa_1409142259:tgl,dg1,adl-p
* Wa_1409347922:tgl,dg1,adl-p
* Wa_1409252684:tgl,dg1,adl-p
* Wa_1409217633:tgl,dg1,adl-p
* Wa_1409207793:tgl,dg1,adl-p
* Wa_1409178076:tgl,dg1,adl-p
* Wa_1408979724:tgl,dg1,adl-p
* Wa_14010443199:tgl,rkl,dg1,adl-p
* Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
* Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
*/
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/*
* Wa_16011163337 - GS_TIMER
*
* TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
* need to program it even on those that don't explicitly list that
* workaround.
*
* Note that the programming of GEN12_FF_MODE2 is further modified
* according to the FF_MODE2 guidance given by Wa_1608008084.
* Wa_1608008084 tells us the FF_MODE2 register will return the wrong
* value when read from the CPU.
*
* The default value for this register is zero for all fields.
* So instead of doing a RMW we should just write the desired values
* for TDS and GS timers. Note that since the readback can't be trusted,
* the clear mask is just set to ~0 to make sure other bits are not
* inadvertently set. For the same reason read verification is ignored.
*/
wa_add(wal,
GEN12_FF_MODE2,
~0,
FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
0, false);
if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
/* Wa_1606376872 */
wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
}
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
wa_masked_en(wal, HIZ_CHICKEN,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
dg2_ctx_gt_tuning_init(engine, wal);
/* Wa_16013271637:dg2 */
wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
/* Wa_14014947963:dg2 */
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
/* Wa_18018764978:dg2 */
wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
/* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
/* Wa_14019877138:dg2 */
wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
dg2_ctx_gt_tuning_init(engine, wal);
/*
* Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
* gen12_emit_indirect_ctx_rcs() rather than here on some early
* steppings.
*/
if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
xelpg_ctx_gt_tuning_init(engine, wal);
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
/* Wa_14014947963 */
wa_masked_field_set(wal, VF_PREEMPTION,
PREEMPTION_VERTEX_COUNT, 0x4000);
/* Wa_16013271637 */
wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
/* Wa_18019627453 */
wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
/* Wa_18018764978 */
wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
}
/* Wa_18019271663 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
/* Wa_14019877138 */
wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/*
* This is a "fake" workaround defined by software to ensure we
* maintain reliable, backward-compatible behavior for userspace with
* regards to how nested MI_BATCH_BUFFER_START commands are handled.
*
* The per-context setting of MI_MODE[12] determines whether the bits
* of a nested MI_BATCH_BUFFER_START instruction should be interpreted
* in the traditional manner or whether they should instead use a new
* tgl+ meaning that breaks backward compatibility, but allows nesting
* into 3rd-level batchbuffers. When this new capability was first
* added in TGL, it remained off by default unless a context
* intentionally opted in to the new behavior. However Xe_HPG now
* flips this on by default and requires that we explicitly opt out if
* we don't want the new behavior.
*
* From a SW perspective, we want to maintain the backward-compatible
* behavior for userspace, so we'll apply a fake workaround to set it
* back to the legacy behavior on platforms where the hardware default
* is to break compatibility. At the moment there is no Linux
* userspace that utilizes third-level batchbuffers, so this will avoid
* userspace from needing to make any changes. using the legacy
* meaning is the correct thing to do. If/when we have userspace
* consumers that want to utilize third-level batch nesting, we can
* provide a context parameter to allow them to opt-in.
*/
wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
}
static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
u8 mocs;
/*
* Some blitter commands do not have a field for MOCS, those
* commands will use MOCS index pointed by BLIT_CCTL.
* BLIT_CCTL registers are needed to be programmed to un-cached.
*/
if (engine->class == COPY_ENGINE_CLASS) {
mocs = engine->gt->mocs.uc_index;
wa_write_clr_set(wal,
BLIT_CCTL(engine->mmio_base),
BLIT_CCTL_MASK,
BLIT_CCTL_MOCS(mocs, mocs));
}
}
/*
* gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
* defined by the hardware team, but it programming general context registers.
* Adding those context register programming in context workaround
* allow us to use the wa framework for proper application and validation.
*/
static void
gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
fakewa_disable_nestedbb_mode(engine, wal);
gen12_ctx_gt_mocs_init(engine, wal);
}
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
wa_init_start(wal, engine->gt, name, engine->name);
/* Applies to all engines */
/*
* Fake workarounds are not the actual workaround but
* programming of context registers using workaround framework.
*/
if (GRAPHICS_VER(i915) >= 12)
gen12_ctx_gt_fake_wa_init(engine, wal);
if (engine->class != RENDER_CLASS)
goto done;
if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 11)
icl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine, wal);
else if (IS_KABYLAKE(i915))
kbl_ctx_workarounds_init(engine, wal);
else if (IS_BROXTON(i915))
bxt_ctx_workarounds_init(engine, wal);
else if (IS_SKYLAKE(i915))
skl_ctx_workarounds_init(engine, wal);
else if (IS_CHERRYVIEW(i915))
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 7)
gen7_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 6)
gen6_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) < 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
done:
wa_init_finish(wal);
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
struct intel_uncore *uncore = rq->engine->uncore;
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
u32 *cs;
int ret;
if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
cs = intel_ring_begin(rq, (wal->count * 2 + 6));
else
cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(wal->gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val;
/* Skip reading the register if it's not really needed */
if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
val = wa->set;
} else {
val = wa->is_mcr ?
intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
val &= ~wa->clr;
val |= wa->set;
}
*cs++ = i915_mmio_reg_offset(wa->reg);
*cs++ = val;
}
*cs++ = MI_NOOP;
/* Wa_14019789679 */
if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
*cs++ = CMD_3DSTATE_MESH_CONTROL;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
}
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(wal->gt, flags);
intel_ring_advance(rq, cs);
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
return 0;
}
static void
gen4_gt_workarounds_init(struct intel_gt *gt,
struct i915_wa_list *wal)
{
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void
g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen4_gt_workarounds_init(gt, wal);
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
}
static void
ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
g4x_gt_workarounds_init(gt, wal);
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
}
static void
snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
}
static void
ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode:ivb */
wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
}
static void
vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
}
static void
hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* L3 caching of data atomics doesn't work -- disable it. */
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */, true);
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
}
static void
gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
unsigned int slice, subslice;
u32 mcr, mcr_mask;
GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
/*
* WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
* Before any MMIO read into slice/subslice specific registers, MCR
* packet control register needs to be programmed to point to any
* enabled s/ss pair. Otherwise, incorrect values will be returned.
* This means each subsequent MMIO read will be forwarded to an
* specific s/ss combination, but this is OK since these registers
* are consistent across s/ss in almost all cases. In the rare
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
slice = ffs(sseu->slice_mask) - 1;
GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
GEM_BUG_ON(!subslice);
subslice--;
/*
* We use GEN8_MCR..() macros to calculate the |mcr| value for
* Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
*/
mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
/* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
gen9_wa_init_mcr(i915, wal);
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
wa_write_or(wal,
MMCD_MISC_CTRL,
MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
wa_write_or(wal,
GAM_ECOCHK,
BDW_DISABLE_HDC_INVALIDATION);
}
static void
skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
}
static void
cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void __set_mcr_steering(struct i915_wa_list *wal,
i915_reg_t steering_reg,
unsigned int slice, unsigned int subslice)
{
u32 mcr, mcr_mask;
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
}
static void debug_dump_steering(struct intel_gt *gt)
{
struct drm_printer p = drm_dbg_printer(>->i915->drm, DRM_UT_DRIVER,
"MCR Steering:");
if (drm_debug_enabled(DRM_UT_DRIVER))
intel_gt_mcr_report_steering(&p, gt, false);
}
static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
gt->default_steering.groupid = slice;
gt->default_steering.instanceid = subslice;
debug_dump_steering(gt);
}
static void
icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = >->info.sseu;
unsigned int subslice;
GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
/*
* Although a platform may have subslices, we need to always steer
* reads to the lowest instance that isn't fused off. When Render
* Power Gating is enabled, grabbing forcewake will only power up a
* single subslice (the "minconfig") if there isn't a real workload
* that needs to be run; this means that if we steer register reads to
* one of the higher subslices, we run the risk of reading back 0's or
* random garbage.
*/
subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
/*
* If the subslice we picked above also steers us to a valid L3 bank,
* then we can just rely on the default steering and won't need to
* worry about explicitly re-steering L3BANK reads later.
*/
if (gt->info.l3bank_mask & BIT(subslice))
gt->steering_table[L3BANK] = NULL;
__add_mcr_wa(gt, wal, 0, subslice);
}
static void
xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = >->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
u32 lncf_mask = 0;
int i;
/*
* On Xe_HP the steering increases in complexity. There are now several
* more units that require steering and we're not guaranteed to be able
* to find a common setting for all of them. These are:
* - GSLICE (fusable)
* - DSS (sub-unit within gslice; fusable)
* - L3 Bank (fusable)
* - MSLICE (fusable)
* - LNCF (sub-unit within mslice; always present if mslice is present)
*
* We'll do our default/implicit steering based on GSLICE (in the
* sliceid field) and DSS (in the subsliceid field). If we can
* find overlap between the valid MSLICE and/or LNCF values with
* a suitable GSLICE, then we can just re-use the default value and
* skip and explicit steering at runtime.
*
* We only need to look for overlap between GSLICE/MSLICE/LNCF to find
* a valid sliceid value. DSS steering is the only type of steering
* that utilizes the 'subsliceid' bits.
*
* Also note that, even though the steering domain is called "GSlice"
* and it is encoded in the register using the gslice format, the spec
* says that the combined (geometry | compute) fuse should be used to
* select the steering.
*/
/* Find the potential gslice candidates */
slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
GEN_DSS_PER_GSLICE);
/*
* Find the potential LNCF candidates. Either LNCF within a valid
* mslice is fine.
*/
for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
lncf_mask |= (0x3 << (i * 2));
/*
* Are there any sliceid values that work for both GSLICE and LNCF
* steering?
*/
if (slice_mask & lncf_mask) {
slice_mask &= lncf_mask;
gt->steering_table[LNCF] = NULL;
}
/* How about sliceid values that also work for MSLICE steering? */
if (slice_mask & gt->info.mslice_mask) {
slice_mask &= gt->info.mslice_mask;
gt->steering_table[MSLICE] = NULL;
}
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
GEN_DSS_PER_GSLICE;
__add_mcr_wa(gt, wal, slice, subslice);
/*
* SQIDI ranges are special because they use different steering
* registers than everything else we work with. On XeHP SDV and
* DG2-G10, any value in the steering registers will work fine since
* all instances are present, but DG2-G11 only has SQIDI instances at
* ID's 2 and 3, so we need to steer to one of those. For simplicity
* we'll just steer to a hardcoded "2" since that value will work
* everywhere.
*/
__set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
__set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
/*
* On DG2, GAM registers have a dedicated steering control register
* and must always be programmed to a hardcoded groupid of "1."
*/
if (IS_DG2(gt->i915))
__set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
}
static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
icl_wa_init_mcr(gt, wal);
/* WaModifyGamTlbPartitioning:icl */
wa_write_clr_set(wal,
GEN11_GACB_PERF_CTRL,
GEN11_HASH_CTRL_MASK,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
wa_write_or(wal,
GEN11_LSN_UNSLCVC,
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
wa_write_or(wal,
GEN8_GAMW_ECO_DEV_RW_IA,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
/*
* Wa_1408615072:icl,ehl (vsunit)
* Wa_1407596294:icl,ehl (hsunit)
*/
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl,ehl */
wa_mcr_write_or(wal,
GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
GWUNIT_CLKGATE_DIS);
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
wa_write_or(wal,
GEN11_SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/*
* This is not a documented workaround, but rather an optimization
* to reduce sampler power.
*/
wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
}
/*
* Though there are per-engine instances of these registers,
* they retain their value through engine resets and should
* only be provided on the GT workaround list rather than
* the engine-specific workaround list.
*/
static void
wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
int id;
for_each_engine(engine, gt, id) {
if (engine->class != VIDEO_DECODE_CLASS ||
(engine->instance % 2))
continue;
wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
IECPUNIT_CLKGATE_DIS);
}
}
static void
gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
icl_wa_init_mcr(gt, wal);
/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
wa_14011060649(gt, wal);
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
/*
* Wa_14015795083
*
* Firmware on some gen12 platforms locks the MISCCPCTL register,
* preventing i915 from modifying it for this workaround. Skip the
* readback verification for this workaround on debug builds; if the
* workaround doesn't stick due to firmware behavior, it's not an error
* that we want CI to flag.
*/
wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
0, 0, false);
}
static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:dg1 */
wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
xehp_init_mcr(gt, wal);
/* Wa_14011060649:dg2 */
wa_14011060649(gt, wal);
if (IS_DG2_G10(gt->i915)) {
/* Wa_22010523718:dg2 */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
CG3DDISCFEG_CLKGATE_DIS);
/* Wa_14011006942:dg2 */
wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
DSS_ROUTER_CLKGATE_DIS);
}
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
/*
* Wa_14015795083
* Skip verification for possibly locked register.
*/
wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
0, 0, false);
/* Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_1509235366:dg2 */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
/* Wa_14010648519:dg2 */
wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
/* Wa_14014830051 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
}
/*
* Unlike older platforms, we no longer setup implicit steering here;
* all MCR accesses are explicitly steered.
*/
debug_dump_steering(gt);
}
static void
wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
int id;
for_each_engine(engine, gt, id)
if (engine->class == VIDEO_DECODE_CLASS)
wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
MFXPIPE_CLKGATE_DIS);
}
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
wa_16021867713(gt, wal);
/*
* Wa_14018778641
* Wa_18018781329
*
* Note that although these registers are MCR on the primary
* GT, the media GT's versions are regular singleton registers.
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
/*
* Wa_14018575942
*
* Issue is seen on media KPI test running on VDBOX engine
* especially VP9 encoding WLs
*/
wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
debug_dump_steering(gt);
}
/*
* The bspec performance guide has recommended MMIO tuning settings. These
* aren't truly "workarounds" but we want to program them through the
* workaround infrastructure to make sure they're (re)applied at the proper
* times.
*
* The programming in this function is for settings that persist through
* engine resets and also are not part of any engine's register state context.
* I.e., settings that only need to be re-applied in the event of a full GT
* reset.
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
if (IS_DG2(gt->i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
}
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
gt_tuning_settings(gt, wal);
if (gt->type == GT_MEDIA) {
if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
xelpmp_gt_workarounds_init(gt, wal);
else
MISSING_CASE(MEDIA_VER_FULL(i915));
return;
}
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
icl_gt_workarounds_init(gt, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_gt_workarounds_init(gt, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(gt, wal);
else if (IS_KABYLAKE(i915))
kbl_gt_workarounds_init(gt, wal);
else if (IS_BROXTON(i915))
gen9_gt_workarounds_init(gt, wal);
else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(gt, wal);
else if (IS_HASWELL(i915))
hsw_gt_workarounds_init(gt, wal);
else if (IS_VALLEYVIEW(i915))
vlv_gt_workarounds_init(gt, wal);
else if (IS_IVYBRIDGE(i915))
ivb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 6)
snb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 5)
ilk_gt_workarounds_init(gt, wal);
else if (IS_G4X(i915))
g4x_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 4)
gen4_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
}
void intel_gt_init_workarounds(struct intel_gt *gt)
{
struct i915_wa_list *wal = >->wa_list;
wa_init_start(wal, gt, "GT", "global");
gt_init_workarounds(gt, wal);
wa_init_finish(wal);
}
static bool
wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
gt_err(gt,
"%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
cur, cur & wa->read, wa->set & wa->read);
return false;
}
return true;
}
static void wa_list_apply(const struct i915_wa_list *wal)
{
struct intel_gt *gt = wal->gt;
struct intel_uncore *uncore = gt->uncore;
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val, old = 0;
/* open-coded rmw due to steering */
if (wa->clr)
old = wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
val = (old & ~wa->clr) | wa->set;
if (val != old || !wa->clr) {
if (wa->is_mcr)
intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
else
intel_uncore_write_fw(uncore, wa->reg, val);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
u32 val = wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
wa_verify(gt, wa, val, wal->name, "application");
}
}
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
}
void intel_gt_apply_workarounds(struct intel_gt *gt)
{
wa_list_apply(>->wa_list);
}
static bool wa_list_verify(struct intel_gt *gt,
const struct i915_wa_list *wal,
const char *from)
{
struct intel_uncore *uncore = gt->uncore;
struct i915_wa *wa;
enum forcewake_domains fw;
unsigned long flags;
unsigned int i;
bool ok = true;
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg),
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
return ok;
}
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
return wa_list_verify(gt, >->wa_list, from);
}
__maybe_unused
static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
return false;
/* NB: Only 3 out of 4 enum values are valid for access field */
if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
return false;
return true;
}
static void
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.reg = reg
};
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
return;
wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
static void
whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.mcr_reg = reg,
.is_mcr = 1,
};
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
return;
wa.mcr_reg.reg |= flags;
_wa_add(wal, &wa);
}
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void
whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
{
whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
whitelist_reg(w, GEN8_CS_CHICKEN1);
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
whitelist_reg(w, GEN8_HDC_CHICKEN1);
/* WaSendPushConstantsFromMMIO:skl,bxt */
whitelist_reg(w, COMMON_SLICE_CHICKEN2);
}
static void skl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:skl */
whitelist_mcr_reg(w, GEN8_L3SQCREG4);
}
static void bxt_whitelist_build(struct intel_engine_cs *engine)
{
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(&engine->whitelist);
}
static void kbl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:kbl */
whitelist_mcr_reg(w, GEN8_L3SQCREG4);
}
static void glk_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
}
static void cml_whitelist_build(struct intel_engine_cs *engine)
{
allow_read_ctx_timestamp(engine);
cfl_whitelist_build(engine);
}
static void icl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
allow_read_ctx_timestamp(engine);
switch (engine->class) {
case RENDER_CLASS:
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
/* hucStatusRegOffset */
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
break;
}
}
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
allow_read_ctx_timestamp(engine);
switch (engine->class) {
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
* Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
/*
* Wa_1808121037:tgl
* Wa_14012131227:dg1
* Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
*/
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
/* Required by recommended tuning setting (not a workaround) */
whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
break;
default:
break;
}
}
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
switch (engine->class) {
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
}
}
static void xelpg_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
switch (engine->class) {
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
}
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
wa_init_start(w, engine->gt, "whitelist", engine->name);
if (engine->gt->type == GT_MEDIA)
; /* none yet */
else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
icl_whitelist_build(engine);
else if (IS_COMETLAKE(i915))
cml_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
glk_whitelist_build(engine);
else if (IS_KABYLAKE(i915))
kbl_whitelist_build(engine);
else if (IS_BROXTON(i915))
bxt_whitelist_build(engine);
else if (IS_SKYLAKE(i915))
skl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
wa_init_finish(w);
}
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
const struct i915_wa_list *wal = &engine->whitelist;
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
intel_uncore_write(uncore,
RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
intel_uncore_write(uncore,
RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(RING_NOPID(base)));
}
/*
* engine_fake_wa_init(), a place holder to program the registers
* which are not part of an official workaround defined by the
* hardware team.
* Adding programming of those register inside workaround will
* allow utilizing wa framework to proper application and verification.
*/
static void
engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
u8 mocs_w, mocs_r;
/*
* RING_CMD_CCTL specifies the default MOCS entry that will be used
* by the command streamer when executing commands that don't have
* a way to explicitly specify a MOCS setting. The default should
* usually reference whichever MOCS entry corresponds to uncached
* behavior, although use of a WB cached entry is recommended by the
* spec in certain circumstances on specific platforms.
*/
if (GRAPHICS_VER(engine->i915) >= 12) {
mocs_r = engine->gt->mocs.uc_index;
mocs_w = engine->gt->mocs.uc_index;
if (HAS_L3_CCS_READ(engine->i915) &&
engine->class == COMPUTE_CLASS) {
mocs_r = engine->gt->mocs.wb_index;
/*
* Even on the few platforms where MOCS 0 is a
* legitimate table entry, it's never the correct
* setting to use here; we can assume the MOCS init
* just forgot to initialize wb_index.
*/
drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
}
wa_masked_field_set(wal,
RING_CMD_CCTL(engine->mmio_base),
CMD_CCTL_MOCS_MASK,
CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
}
}
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_gt *gt = engine->gt;
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
/* Wa_22014600077 */
wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
ENABLE_EU_COUNT_FOR_TDL_FLUSH);
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
IS_DG2(i915)) {
/* Wa_1509727124 */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
SC_DISABLE_POWER_OPTIMIZATION_EBB);
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(i915)) {
/* Wa_22012856258 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_DISABLE_READ_SUPPRESSION);
}
if (IS_DG2(i915)) {
/*
* Wa_22010960976:dg2
* Wa_14013347512:dg2
*/
wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
}
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
IS_DG2(i915)) {
/* Wa_14015150844 */
wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
_MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
0, true);
}
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1606700617:tgl,dg1,adl-p
* Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
* Wa_14010826681:tgl,dg1,rkl,adl-p
* Wa_18019627453:dg2
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
/*
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl[B0+],dg1[B0+]
* Wa_22010931296:tgl[B0+],dg1[B0+]
* Wa_14010919138:rkl,dg1,adl-s,adl-p
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
wa_mcr_masked_en(wal,
GEN10_SAMPLER_MODE,
ENABLE_SMALLPL);
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
/* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
* Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
/*
* "Disable Repacking for Compression (masked R/W access)
* before rendering compressed surfaces for display."
*/
wa_masked_en(wal, CACHE_MODE_0_GEN7,
DISABLE_REPACKING_FOR_COMPRESSION);
}
if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/*
* Wa_1405543622:icl
* Formerly known as WaGAPZPriorityScheme
*/
wa_write_or(wal,
GEN8_GARBCNTL,
GEN11_ARBITRATION_PRIO_ORDER_MASK);
/*
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
wa_write_clr_set(wal,
GEN8_GARBCNTL,
GEN11_HASH_CTRL_EXCL_MASK,
GEN11_HASH_CTRL_EXCL_BIT0);
wa_write_clr_set(wal,
GEN11_GLBLINVL,
GEN11_BANK_HASH_ADDR_EXCL_MASK,
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
wa_mcr_write_or(wal,
GEN8_L3SQCREG4,
GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* Wa_1606682166:icl */
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
wa_mcr_write_clr_set(wal,
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
GEN11_ENABLE_32_PLANE_MODE);
/*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
wa_write_or(wal,
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/* Wa_22010271021 */
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
}
/*
* Intel platforms that support fine-grained preemption (i.e., gen9 and
* beyond) allow the kernel-mode driver to choose between two different
* options for controlling preemption granularity and behavior.
*
* Option 1 (hardware default):
* Preemption settings are controlled in a global manner via
* kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
* and settings chosen by the kernel-mode driver will apply to all
* userspace clients.
*
* Option 2:
* Preemption settings are controlled on a per-context basis via
* register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
* context switch and is writable by userspace (e.g., via
* MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
* which allows different userspace drivers/clients to select
* different settings, or to change those settings on the fly in
* response to runtime needs. This option was known by name
* "FtrPerCtxtPreemptionGranularityControl" at one time, although
* that name is somewhat misleading as other non-granularity
* preemption settings are also impacted by this decision.
*
* On Linux, our policy has always been to let userspace drivers
* control preemption granularity/settings (Option 2). This was
* originally mandatory on gen9 to prevent ABI breakage (old gen9
* userspace developed before object-level preemption was enabled would
* not behave well if i915 were to go with Option 1 and enable that
* preemption in a global manner). On gen9 each context would have
* object-level preemption disabled by default (see
* WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
* userspace drivers could opt-in to object-level preemption as they
* saw fit. For post-gen9 platforms, we continue to utilize Option 2;
* even though it is no longer necessary for ABI compatibility when
* enabling a new platform, it does ensure that userspace will be able
* to implement any workarounds that show up requiring temporary
* adjustments to preemption behavior at runtime.
*
* Notes/Workarounds:
* - Wa_14015141709: On DG2 and early steppings of MTL,
* CS_CHICKEN1[0] does not disable object-level preemption as
* it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
* using Option 1). Effectively this means userspace is unable
* to disable object-level preemption on these platforms/steppings
* despite the setting here.
*
* - Wa_16013994831: May require that userspace program
* CS_CHICKEN1[10] when certain runtime conditions are true.
* Userspace requires Option 2 to be in effect for their update of
* CS_CHICKEN1[10] to be effective.
*
* Other workarounds may appear in the future that will also require
* Option 2 behavior to allow proper userspace implementation.
*/
if (GRAPHICS_VER(i915) >= 9)
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
}
if (IS_BROXTON(i915)) {
/* WaDisablePooledEuLoadBalancingFix:bxt */
wa_masked_en(wal,
FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
if (GRAPHICS_VER(i915) == 9) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
wa_mcr_write_or(wal,
BDW_SCRATCH1,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
wa_mcr_write_clr_set(wal,
GEN8_L3SQCREG1,
L3_PRIO_CREDITS_MASK,
L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_mcr_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
/* Disable atomics in L3 to prevent unrecoverable hangs */
wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
EVICTION_PERF_FIX_ENABLE, 0);
}
if (IS_HASWELL(i915)) {
/* WaSampleCChickenBitEnable:hsw */
wa_masked_en(wal,
HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
wa_masked_dis(wal,
CACHE_MODE_0_GEN7,
/* enable HiZ Raw Stall Optimization */
HIZ_RAW_STALL_OPT_DISABLE);
}
if (IS_VALLEYVIEW(i915)) {
/* WaDisableEarlyCull:vlv */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_clr_set(wal,
GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
/* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
if (IS_IVYBRIDGE(i915)) {
/* WaDisableEarlyCull:ivb */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
if (0) { /* causes HiZ corruption on ivb:gt1 */
/* enable HiZ Raw Stall Optimization */
wa_masked_dis(wal,
CACHE_MODE_0_GEN7,
HIZ_RAW_STALL_OPT_DISABLE);
}
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_clr_set(wal,
GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
/* WaDisablePSDDualDispatchEnable:ivb */
if (INTEL_INFO(i915)->gt == 1)
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
if (GRAPHICS_VER(i915) == 7) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
RING_MODE_GEN7(RENDER_RING_BASE),
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization:ivb,hsw
* WaDisable4x2SubspanOptimization isn't listed for VLV.
*/
wa_masked_en(wal,
CACHE_MODE_1,
PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal,
GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
if (IS_GRAPHICS_VER(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
* order to use MI_WAIT_FOR_EVENT within the CS. It should
* already be programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
wa_masked_en(wal,
RING_MI_MODE(RENDER_RING_BASE),
ASYNC_FLIP_PERF_DISABLE);
if (GRAPHICS_VER(i915) == 6) {
/*
* Required for the hardware to program scanline values for
* waiting
* WaEnableFlushTlbInvalidationMode:snb
*/
wa_masked_en(wal,
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
wa_masked_en(wal,
_3D_CHICKEN,
_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
wa_masked_en(wal,
_3D_CHICKEN3,
/* WaStripsFansDisableFastClipPerformanceFix:snb */
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
/*
* Bspec says:
* "This bit must be set if 3DSTATE_CLIP clip mode is set
* to normal and 3DSTATE_SF number of SF output attributes
* is more than 16."
*/
_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal,
GEN6_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
/* WaDisable_RenderCache_OperationalFlush:snb */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
wa_masked_dis(wal,
CACHE_MODE_0,
CM0_STC_EVICT_DISABLE_LRA_SNB);
}
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
if (GRAPHICS_VER(i915) == 4)
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
* address may no longer be valid, leading to a GPU hang.
*
* This imposes the requirement that userspace reload their
* CONSTANT_BUFFER on every batch, fortunately a requirement
* they are already accustomed to from before contexts were
* enabled.
*/
wa_add(wal, ECOSKPD(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */,
true);
}
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
/* Wa_16018031267, Wa_16018063123 */
if (NEEDS_FASTCOLOR_BLT_WABB(engine))
wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
XEHP_BLITTER_SCHEDULING_MODE_MASK,
XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
/* boilerplate for any CCS engine workaround */
}
/*
* The bspec performance guide has recommended MMIO tuning settings. These
* aren't truly "workarounds" but we want to program them with the same
* workaround infrastructure to ensure that they're automatically added to
* the GuC save/restore lists, re-applied at the right times, and checked for
* any conflicting programming requested by real workarounds.
*
* Programming settings should be added here only if their registers are not
* part of an engine's register state context. If a register is part of a
* context, then any tuning settings should be programmed in an appropriate
* function invoked by __intel_engine_init_ctx_wa().
*/
static void
add_render_compute_tuning_settings(struct intel_gt *gt,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
* This tuning setting proves beneficial only on ATS-M designs; the
* default "age based" setting is optimal on regular DG2 and other
* platforms.
*/
if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
u32 mode;
if (!IS_DG2(gt->i915))
return;
/*
* Wa_14019159160: This workaround, along with others, leads to
* significant challenges in utilizing load balancing among the
* CCS slices. Consequently, an architectural decision has been
* made to completely disable automatic CCS load balancing.
*/
wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
/*
* After having disabled automatic load balancing we need to
* assign all slices to a single CCS. We will call it CCS mode 1
*/
mode = intel_gt_apply_ccs_mode(gt);
wa_masked_en(wal, XEHP_CCS_MODE, mode);
}
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
* specific engine. Since all render+compute engines get reset
* together, and the contents of these registers are lost during
* the shared render domain reset, we'll define such workarounds
* here and then add them to just a single RCS or CCS engine's
* workaround list (whichever engine has the XXXX flag).
*/
static void
general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_gt *gt = engine->gt;
add_render_compute_tuning_settings(gt, wal);
if (GRAPHICS_VER(i915) >= 11) {
/* This is not a Wa (although referred to as
* WaSetInidrectStateOverride in places), this allows
* applications that reference sampler states through
* the BindlessSamplerStateBaseAddress to have their
* border color relative to DynamicStateBaseAddress
* rather than BindlessSamplerStateBaseAddress.
*
* Otherwise SAMPLER_STATE border colors have to be
* copied in multiple heaps (DynamicStateBaseAddress &
* BindlessSamplerStateBaseAddress)
*
* BSpec: 46052
*/
wa_mcr_masked_en(wal,
GEN10_SAMPLER_MODE,
GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
/* Wa_14020495402 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
/*
* Wa_14017066071
* Wa_14017654203
*/
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
MTL_DISABLE_SAMPLER_SC_OOO);
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
/* Wa_22015279794 */
wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
DISABLE_PREFETCH_INTO_IC);
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
IS_DG2(i915)) {
/* Wa_22013037850 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
DISABLE_128B_EVICTION_COMMAND_UDW);
/* Wa_18017747507 */
wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
IS_DG2(i915)) {
/* Wa_22014226127 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
}
if (IS_DG2(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
/*
* Wa_16011620976:dg2_g11
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
/* Wa_18028616096 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
/*
* Wa_22012826095:dg2
* Wa_22013059131:dg2
*/
wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
MAXREQS_PER_BANK,
REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
/* Wa_22013059131:dg2 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
/*
* Wa_22012654132
*
* Note that register 0xE420 is write-only and cannot be read
* back for verification on DG2 (due to Wa_14012342262), so
* we need to explicitly skip the readback.
*/
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
0 /* write-only, so skip validation */,
true);
}
}
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
if (GRAPHICS_VER(engine->i915) < 4)
return;
engine_fake_wa_init(engine, wal);
/*
* These are common workarounds that just need to applied
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
ccs_engine_wa_mode(engine, wal);
}
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
else if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
}
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
wa_init_start(wal, engine->gt, "engine", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
wa_list_apply(&engine->wa_list);
}
static const struct i915_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xe000, .end = 0xe7ff },
{},
};
static const struct i915_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
{ .start = 0xde80, .end = 0xe8ff },
{ .start = 0x24a00, .end = 0x24a7f },
{},
};
static const struct i915_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
{ .start = 0x8140, .end = 0x815f },
{ .start = 0x8c80, .end = 0x8dff },
{ .start = 0x94d0, .end = 0x955f },
{ .start = 0x9680, .end = 0x96ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xc800, .end = 0xcfff },
{ .start = 0xd800, .end = 0xd8ff },
{ .start = 0xdc00, .end = 0xffff },
{ .start = 0x17000, .end = 0x17fff },
{ .start = 0x24a00, .end = 0x24a7f },
{},
};
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
const struct i915_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
else if (GRAPHICS_VER(i915) >= 8)
mcr_ranges = mcr_ranges_gen8;
else
return false;
/*
* Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
for (i = 0; mcr_ranges[i].start; i++)
if (offset >= mcr_ranges[i].start &&
offset <= mcr_ranges[i].end)
return true;
return false;
}
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = rq->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (GRAPHICS_VER(i915) >= 8)
srm++;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
count++;
}
cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 offset = i915_mmio_reg_offset(wa->reg);
if (mcr_range(i915, offset))
continue;
*cs++ = srm;
*cs++ = offset;
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
intel_ring_advance(rq, cs);
return 0;
}
static int engine_wa_list_verify(struct intel_context *ce,
const struct i915_wa_list * const wal,
const char *from)
{
const struct i915_wa *wa;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
unsigned int i;
u32 *results;
int err;
if (!wal->count)
return 0;
vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_engine_pm_get(ce->engine);
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(vma->obj, &ww);
if (err == 0)
err = intel_context_pin_ww(ce, &ww);
if (err)
goto err_pm;
err = i915_vma_pin_ww(vma, &ww, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err)
goto err_unpin;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
}
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err == 0)
err = wa_list_srm(rq, wal, vma);
i915_request_get(rq);
if (err)
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (err)
goto err_rq;
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_rq;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
goto err_rq;
}
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
err = -ENXIO;
}
i915_gem_object_unpin_map(vma->obj);
err_rq:
i915_request_put(rq);
err_vma:
i915_vma_unpin(vma);
err_unpin:
intel_context_unpin(ce);
err_pm:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
intel_engine_pm_put(ce->engine);
i915_vma_put(vma);
return err;
}
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from)
{
return engine_wa_list_verify(engine->kernel_context,
&engine->wa_list,
from);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_workarounds.c"
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* Common support for CompuLab CM-T3x30 CoMs
*/
#include "omap3-cm-t3x.dtsi"
/ {
cpus {
cpu@0 {
cpu0-supply = <&vcc>;
};
};
sound {
compatible = "ti,omap-twl4030";
ti,model = "cm-t35";
ti,mcbsp = <&mcbsp2>;
};
};
&omap3_pmx_core {
smsc1_pins: smsc1-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20b8, PIN_OUTPUT | MUX_MODE0) /* gpmc_ncs5.gpmc_ncs5 */
OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLUP | MUX_MODE4) /* uart3_cts_rctx.gpio_163 */
>;
};
hsusb0_pins: hsusb0-pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */
OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */
OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */
OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */
OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */
OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */
OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */
OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */
OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */
OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */
OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
>;
};
};
#include "omap-gpmc-smsc911x.dtsi"
&gpmc {
ranges = <5 0 0x2c000000 0x01000000>, /* CM-T3x30 SMSC9x Eth */
<0 0 0x00000000 0x01000000>; /* CM-T3x NAND */
smsc1: ethernet@gpmc {
compatible = "smsc,lan9221", "smsc,lan9115";
pinctrl-names = "default";
pinctrl-0 = <&smsc1_pins>;
interrupt-parent = <&gpio6>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
reg = <5 0 0xff>;
};
};
&i2c1 {
twl: twl@48 {
reg = <0x48>;
interrupts = <7>; /* SYS_NIRQ cascaded to intc */
interrupt-parent = <&intc>;
twl_audio: audio {
compatible = "ti,twl4030-audio";
codec {
};
};
};
};
#include "twl4030.dtsi"
#include "twl4030_omap3.dtsi"
#include <dt-bindings/input/input.h>
&venc {
vdda-supply = <&vdac>;
};
&mmc1 {
vmmc-supply = <&vmmc1>;
};
&twl_gpio {
ti,use-leds;
/* pullups: BIT(0) */
ti,pullups = <0x000001>;
};
&twl_keypad {
linux,keymap = <
MATRIX_KEY(0x00, 0x01, KEY_A)
MATRIX_KEY(0x00, 0x02, KEY_B)
MATRIX_KEY(0x00, 0x03, KEY_LEFT)
MATRIX_KEY(0x01, 0x01, KEY_UP)
MATRIX_KEY(0x01, 0x02, KEY_ENTER)
MATRIX_KEY(0x01, 0x03, KEY_DOWN)
MATRIX_KEY(0x02, 0x01, KEY_RIGHT)
MATRIX_KEY(0x02, 0x02, KEY_C)
MATRIX_KEY(0x02, 0x03, KEY_D)
>;
};
&hsusb1_phy {
reset-gpios = <&twl_gpio 6 GPIO_ACTIVE_LOW>;
};
&hsusb2_phy {
reset-gpios = <&twl_gpio 7 GPIO_ACTIVE_LOW>;
};
&usb_otg_hs {
pinctrl-names = "default";
pinctrl-0 = <&hsusb0_pins>;
interface-type = <0>;
usb-phy = <&usb2_phy>;
phys = <&usb2_phy>;
phy-names = "usb2-phy";
mode = <3>;
power = <50>;
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.