code
stringlengths
0
23.9M
// SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/clock/tegra114-car.h> #include <dt-bindings/gpio/tegra-gpio.h> #include <dt-bindings/memory/tegra114-mc.h> #include <dt-bindings/pinctrl/pinctrl-tegra.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/soc/tegra-pmc.h> / { compatible = "nvidia,tegra114"; interrupt-parent = <&lic>; #address-cells = <1>; #size-cells = <1>; memory@80000000 { device_type = "memory"; reg = <0x80000000 0x0>; }; sram@40000000 { compatible = "mmio-sram"; reg = <0x40000000 0x40000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x40000000 0x40000>; vde_pool: sram@400 { reg = <0x400 0x3fc00>; pool; }; }; host1x@50000000 { compatible = "nvidia,tegra114-host1x"; reg = <0x50000000 0x00028000>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, /* syncpt */ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; /* general */ interrupt-names = "syncpt", "host1x"; clocks = <&tegra_car TEGRA114_CLK_HOST1X>; clock-names = "host1x"; resets = <&tegra_car 28>, <&mc TEGRA114_MC_RESET_HC>; reset-names = "host1x", "mc"; iommus = <&mc TEGRA_SWGROUP_HC>; #address-cells = <1>; #size-cells = <1>; ranges = <0x54000000 0x54000000 0x01000000>; gr2d@54140000 { compatible = "nvidia,tegra114-gr2d"; reg = <0x54140000 0x00040000>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_GR2D>; resets = <&tegra_car 21>, <&mc TEGRA114_MC_RESET_2D>; reset-names = "2d", "mc"; iommus = <&mc TEGRA_SWGROUP_G2>; }; gr3d@54180000 { compatible = "nvidia,tegra114-gr3d"; reg = <0x54180000 0x00040000>; clocks = <&tegra_car TEGRA114_CLK_GR3D>; resets = <&tegra_car 24>, <&mc TEGRA114_MC_RESET_3D>; reset-names = "3d", "mc"; iommus = <&mc TEGRA_SWGROUP_NV>; }; dc@54200000 { compatible = "nvidia,tegra114-dc"; reg = <0x54200000 0x00040000>; interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_DISP1>, <&tegra_car TEGRA114_CLK_PLL_P>; clock-names = "dc", "parent"; resets = <&tegra_car 27>; reset-names = "dc"; iommus = <&mc TEGRA_SWGROUP_DC>; nvidia,head = <0>; rgb { status = "disabled"; }; }; dc@54240000 { compatible = "nvidia,tegra114-dc"; reg = <0x54240000 0x00040000>; interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_DISP2>, <&tegra_car TEGRA114_CLK_PLL_P>; clock-names = "dc", "parent"; resets = <&tegra_car 26>; reset-names = "dc"; iommus = <&mc TEGRA_SWGROUP_DCB>; nvidia,head = <1>; rgb { status = "disabled"; }; }; hdmi@54280000 { compatible = "nvidia,tegra114-hdmi"; reg = <0x54280000 0x00040000>; interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_HDMI>, <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; clock-names = "hdmi", "parent"; resets = <&tegra_car 51>; reset-names = "hdmi"; status = "disabled"; }; dsia: dsi@54300000 { compatible = "nvidia,tegra114-dsi"; reg = <0x54300000 0x00040000>; clocks = <&tegra_car TEGRA114_CLK_DSIA>, <&tegra_car TEGRA114_CLK_DSIALP>, <&tegra_car TEGRA114_CLK_PLL_D_OUT0>; clock-names = "dsi", "lp", "parent"; resets = <&tegra_car 48>; reset-names = "dsi"; nvidia,mipi-calibrate = <&mipi 0x060>; /* DSIA & DSIB pads */ status = "disabled"; #address-cells = <1>; #size-cells = <0>; }; dsib: dsi@54400000 { compatible = "nvidia,tegra114-dsi"; reg = <0x54400000 0x00040000>; clocks = <&tegra_car TEGRA114_CLK_DSIB>, <&tegra_car TEGRA114_CLK_DSIBLP>, <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; clock-names = "dsi", "lp", "parent"; resets = <&tegra_car 82>; reset-names = "dsi"; nvidia,mipi-calibrate = <&mipi 0x180>; /* DSIC & DSID pads */ status = "disabled"; #address-cells = <1>; #size-cells = <0>; }; }; gic: interrupt-controller@50041000 { compatible = "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; reg = <0x50041000 0x1000>, <0x50042000 0x1000>, <0x50044000 0x2000>, <0x50046000 0x2000>; interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; interrupt-parent = <&gic>; }; lic: interrupt-controller@60004000 { compatible = "nvidia,tegra114-ictlr", "nvidia,tegra30-ictlr"; reg = <0x60004000 0x100>, <0x60004100 0x50>, <0x60004200 0x50>, <0x60004300 0x50>, <0x60004400 0x50>; interrupt-controller; #interrupt-cells = <3>; interrupt-parent = <&gic>; }; timer@60005000 { compatible = "nvidia,tegra114-timer", "nvidia,tegra30-timer"; reg = <0x60005000 0x400>; interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_TIMER>; }; tegra_car: clock@60006000 { compatible = "nvidia,tegra114-car"; reg = <0x60006000 0x1000>; #clock-cells = <1>; #reset-cells = <1>; }; flow-controller@60007000 { compatible = "nvidia,tegra114-flowctrl"; reg = <0x60007000 0x1000>; }; apbdma: dma@6000a000 { compatible = "nvidia,tegra114-apbdma"; reg = <0x6000a000 0x1400>; interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_APBDMA>; resets = <&tegra_car 34>; reset-names = "dma"; #dma-cells = <1>; }; ahb: ahb@6000c000 { compatible = "nvidia,tegra114-ahb", "nvidia,tegra30-ahb"; reg = <0x6000c000 0x150>; }; gpio: gpio@6000d000 { compatible = "nvidia,tegra114-gpio", "nvidia,tegra30-gpio"; reg = <0x6000d000 0x1000>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; #gpio-cells = <2>; gpio-controller; #interrupt-cells = <2>; interrupt-controller; gpio-ranges = <&pinmux 0 0 246>; }; vde@6001a000 { compatible = "nvidia,tegra114-vde"; reg = <0x6001a000 0x1000>, /* Syntax Engine */ <0x6001b000 0x1000>, /* Video Bitstream Engine */ <0x6001c000 0x100>, /* Macroblock Engine */ <0x6001c200 0x100>, /* Post-processing Engine */ <0x6001c400 0x100>, /* Motion Compensation Engine */ <0x6001c600 0x100>, /* Transform Engine */ <0x6001c800 0x100>, /* Pixel prediction block */ <0x6001ca00 0x100>, /* Video DMA */ <0x6001d800 0x400>; /* Video frame controls */ reg-names = "sxe", "bsev", "mbe", "ppe", "mce", "tfe", "ppb", "vdma", "frameid"; iram = <&vde_pool>; /* IRAM region */ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, /* Sync token interrupt */ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, /* BSE-V interrupt */ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; /* SXE interrupt */ interrupt-names = "sync-token", "bsev", "sxe"; clocks = <&tegra_car TEGRA114_CLK_VDE>; reset-names = "vde", "mc"; resets = <&tegra_car 61>, <&mc TEGRA114_MC_RESET_VDE>; iommus = <&mc TEGRA_SWGROUP_VDE>; }; apbmisc@70000800 { compatible = "nvidia,tegra114-apbmisc", "nvidia,tegra20-apbmisc"; reg = <0x70000800 0x64>, /* Chip revision */ <0x70000008 0x04>; /* Strapping options */ }; pinmux: pinmux@70000868 { compatible = "nvidia,tegra114-pinmux"; reg = <0x70000868 0x148>, /* Pad control registers */ <0x70003000 0x40c>; /* Mux registers */ }; /* * There are two serial driver i.e. 8250 based simple serial * driver and APB DMA based serial driver for higher baudrate * and performace. To enable the 8250 based driver, the compatible * is "nvidia,tegra114-uart", "nvidia,tegra20-uart" and to enable * the APB DMA based serial driver, the compatible is * "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart". */ uarta: serial@70006000 { compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart"; reg = <0x70006000 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_UARTA>; resets = <&tegra_car 6>; dmas = <&apbdma 8>, <&apbdma 8>; dma-names = "rx", "tx"; status = "disabled"; }; uartb: serial@70006040 { compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart"; reg = <0x70006040 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_UARTB>; resets = <&tegra_car 7>; dmas = <&apbdma 9>, <&apbdma 9>; dma-names = "rx", "tx"; status = "disabled"; }; uartc: serial@70006200 { compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart"; reg = <0x70006200 0x100>; reg-shift = <2>; interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_UARTC>; resets = <&tegra_car 55>; dmas = <&apbdma 10>, <&apbdma 10>; dma-names = "rx", "tx"; status = "disabled"; }; uartd: serial@70006300 { compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart"; reg = <0x70006300 0x100>; reg-shift = <2>; interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_UARTD>; resets = <&tegra_car 65>; dmas = <&apbdma 19>, <&apbdma 19>; dma-names = "rx", "tx"; status = "disabled"; }; pwm: pwm@7000a000 { compatible = "nvidia,tegra114-pwm", "nvidia,tegra20-pwm"; reg = <0x7000a000 0x100>; #pwm-cells = <2>; clocks = <&tegra_car TEGRA114_CLK_PWM>; resets = <&tegra_car 17>; reset-names = "pwm"; status = "disabled"; }; i2c@7000c000 { compatible = "nvidia,tegra114-i2c"; reg = <0x7000c000 0x100>; interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_I2C1>; clock-names = "div-clk"; resets = <&tegra_car 12>; reset-names = "i2c"; dmas = <&apbdma 21>, <&apbdma 21>; dma-names = "rx", "tx"; status = "disabled"; }; i2c@7000c400 { compatible = "nvidia,tegra114-i2c"; reg = <0x7000c400 0x100>; interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_I2C2>; clock-names = "div-clk"; resets = <&tegra_car 54>; reset-names = "i2c"; dmas = <&apbdma 22>, <&apbdma 22>; dma-names = "rx", "tx"; status = "disabled"; }; i2c@7000c500 { compatible = "nvidia,tegra114-i2c"; reg = <0x7000c500 0x100>; interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_I2C3>; clock-names = "div-clk"; resets = <&tegra_car 67>; reset-names = "i2c"; dmas = <&apbdma 23>, <&apbdma 23>; dma-names = "rx", "tx"; status = "disabled"; }; i2c@7000c700 { compatible = "nvidia,tegra114-i2c"; reg = <0x7000c700 0x100>; interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_I2C4>; clock-names = "div-clk"; resets = <&tegra_car 103>; reset-names = "i2c"; dmas = <&apbdma 26>, <&apbdma 26>; dma-names = "rx", "tx"; status = "disabled"; }; i2c@7000d000 { compatible = "nvidia,tegra114-i2c"; reg = <0x7000d000 0x100>; interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_I2C5>; clock-names = "div-clk"; resets = <&tegra_car 47>; reset-names = "i2c"; dmas = <&apbdma 24>, <&apbdma 24>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000d400 { compatible = "nvidia,tegra114-spi"; reg = <0x7000d400 0x200>; interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC1>; clock-names = "spi"; resets = <&tegra_car 41>; reset-names = "spi"; dmas = <&apbdma 15>, <&apbdma 15>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000d600 { compatible = "nvidia,tegra114-spi"; reg = <0x7000d600 0x200>; interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC2>; clock-names = "spi"; resets = <&tegra_car 44>; reset-names = "spi"; dmas = <&apbdma 16>, <&apbdma 16>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000d800 { compatible = "nvidia,tegra114-spi"; reg = <0x7000d800 0x200>; interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC3>; clock-names = "spi"; resets = <&tegra_car 46>; reset-names = "spi"; dmas = <&apbdma 17>, <&apbdma 17>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000da00 { compatible = "nvidia,tegra114-spi"; reg = <0x7000da00 0x200>; interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC4>; clock-names = "spi"; resets = <&tegra_car 68>; reset-names = "spi"; dmas = <&apbdma 18>, <&apbdma 18>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000dc00 { compatible = "nvidia,tegra114-spi"; reg = <0x7000dc00 0x200>; interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC5>; clock-names = "spi"; resets = <&tegra_car 104>; reset-names = "spi"; dmas = <&apbdma 27>, <&apbdma 27>; dma-names = "rx", "tx"; status = "disabled"; }; spi@7000de00 { compatible = "nvidia,tegra114-spi"; reg = <0x7000de00 0x200>; interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&tegra_car TEGRA114_CLK_SBC6>; clock-names = "spi"; resets = <&tegra_car 105>; reset-names = "spi"; dmas = <&apbdma 28>, <&apbdma 28>; dma-names = "rx", "tx"; status = "disabled"; }; rtc@7000e000 { compatible = "nvidia,tegra114-rtc", "nvidia,tegra20-rtc"; reg = <0x7000e000 0x100>; interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_RTC>; }; kbc@7000e200 { compatible = "nvidia,tegra114-kbc"; reg = <0x7000e200 0x100>; interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_KBC>; resets = <&tegra_car 36>; reset-names = "kbc"; status = "disabled"; }; tegra_pmc: pmc@7000e400 { compatible = "nvidia,tegra114-pmc"; reg = <0x7000e400 0x400>; clocks = <&tegra_car TEGRA114_CLK_PCLK>, <&clk32k_in>; clock-names = "pclk", "clk32k_in"; #clock-cells = <1>; }; fuse@7000f800 { compatible = "nvidia,tegra114-efuse"; reg = <0x7000f800 0x400>; clocks = <&tegra_car TEGRA114_CLK_FUSE>; clock-names = "fuse"; resets = <&tegra_car 39>; reset-names = "fuse"; }; mc: memory-controller@70019000 { compatible = "nvidia,tegra114-mc"; reg = <0x70019000 0x1000>; clocks = <&tegra_car TEGRA114_CLK_MC>; clock-names = "mc"; interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; #reset-cells = <1>; #iommu-cells = <1>; }; ahub@70080000 { compatible = "nvidia,tegra114-ahub"; reg = <0x70080000 0x200>, <0x70080200 0x100>, <0x70081000 0x200>; interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_D_AUDIO>, <&tegra_car TEGRA114_CLK_APBIF>; clock-names = "d_audio", "apbif"; resets = <&tegra_car 106>, /* d_audio */ <&tegra_car 107>, /* apbif */ <&tegra_car 30>, /* i2s0 */ <&tegra_car 11>, /* i2s1 */ <&tegra_car 18>, /* i2s2 */ <&tegra_car 101>, /* i2s3 */ <&tegra_car 102>, /* i2s4 */ <&tegra_car 108>, /* dam0 */ <&tegra_car 109>, /* dam1 */ <&tegra_car 110>, /* dam2 */ <&tegra_car 10>, /* spdif */ <&tegra_car 153>, /* amx */ <&tegra_car 154>; /* adx */ reset-names = "d_audio", "apbif", "i2s0", "i2s1", "i2s2", "i2s3", "i2s4", "dam0", "dam1", "dam2", "spdif", "amx", "adx"; dmas = <&apbdma 1>, <&apbdma 1>, <&apbdma 2>, <&apbdma 2>, <&apbdma 3>, <&apbdma 3>, <&apbdma 4>, <&apbdma 4>, <&apbdma 6>, <&apbdma 6>, <&apbdma 7>, <&apbdma 7>, <&apbdma 12>, <&apbdma 12>, <&apbdma 13>, <&apbdma 13>, <&apbdma 14>, <&apbdma 14>, <&apbdma 29>, <&apbdma 29>; dma-names = "rx0", "tx0", "rx1", "tx1", "rx2", "tx2", "rx3", "tx3", "rx4", "tx4", "rx5", "tx5", "rx6", "tx6", "rx7", "tx7", "rx8", "tx8", "rx9", "tx9"; ranges; #address-cells = <1>; #size-cells = <1>; tegra_i2s0: i2s@70080300 { compatible = "nvidia,tegra114-i2s", "nvidia,tegra30-i2s"; reg = <0x70080300 0x100>; nvidia,ahub-cif-ids = <4 4>; clocks = <&tegra_car TEGRA114_CLK_I2S0>; resets = <&tegra_car 30>; reset-names = "i2s"; status = "disabled"; }; tegra_i2s1: i2s@70080400 { compatible = "nvidia,tegra114-i2s", "nvidia,tegra30-i2s"; reg = <0x70080400 0x100>; nvidia,ahub-cif-ids = <5 5>; clocks = <&tegra_car TEGRA114_CLK_I2S1>; resets = <&tegra_car 11>; reset-names = "i2s"; status = "disabled"; }; tegra_i2s2: i2s@70080500 { compatible = "nvidia,tegra114-i2s", "nvidia,tegra30-i2s"; reg = <0x70080500 0x100>; nvidia,ahub-cif-ids = <6 6>; clocks = <&tegra_car TEGRA114_CLK_I2S2>; resets = <&tegra_car 18>; reset-names = "i2s"; status = "disabled"; }; tegra_i2s3: i2s@70080600 { compatible = "nvidia,tegra114-i2s", "nvidia,tegra30-i2s"; reg = <0x70080600 0x100>; nvidia,ahub-cif-ids = <7 7>; clocks = <&tegra_car TEGRA114_CLK_I2S3>; resets = <&tegra_car 101>; reset-names = "i2s"; status = "disabled"; }; tegra_i2s4: i2s@70080700 { compatible = "nvidia,tegra114-i2s", "nvidia,tegra30-i2s"; reg = <0x70080700 0x100>; nvidia,ahub-cif-ids = <8 8>; clocks = <&tegra_car TEGRA114_CLK_I2S4>; resets = <&tegra_car 102>; reset-names = "i2s"; status = "disabled"; }; }; mipi: mipi@700e3000 { compatible = "nvidia,tegra114-mipi"; reg = <0x700e3000 0x100>; clocks = <&tegra_car TEGRA114_CLK_MIPI_CAL>; #nvidia,mipi-calibrate-cells = <1>; }; mmc@78000000 { compatible = "nvidia,tegra114-sdhci"; reg = <0x78000000 0x200>; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_SDMMC1>; clock-names = "sdhci"; resets = <&tegra_car 14>; reset-names = "sdhci"; status = "disabled"; }; mmc@78000200 { compatible = "nvidia,tegra114-sdhci"; reg = <0x78000200 0x200>; interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_SDMMC2>; clock-names = "sdhci"; resets = <&tegra_car 9>; reset-names = "sdhci"; status = "disabled"; }; mmc@78000400 { compatible = "nvidia,tegra114-sdhci"; reg = <0x78000400 0x200>; interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_SDMMC3>; clock-names = "sdhci"; resets = <&tegra_car 69>; reset-names = "sdhci"; status = "disabled"; }; mmc@78000600 { compatible = "nvidia,tegra114-sdhci"; reg = <0x78000600 0x200>; interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; clocks = <&tegra_car TEGRA114_CLK_SDMMC4>; clock-names = "sdhci"; resets = <&tegra_car 15>; reset-names = "sdhci"; status = "disabled"; }; usb@7d000000 { compatible = "nvidia,tegra114-ehci", "nvidia,tegra30-ehci"; reg = <0x7d000000 0x4000>; interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; phy_type = "utmi"; clocks = <&tegra_car TEGRA114_CLK_USBD>; resets = <&tegra_car 22>; reset-names = "usb"; nvidia,phy = <&phy1>; status = "disabled"; }; phy1: usb-phy@7d000000 { compatible = "nvidia,tegra114-usb-phy", "nvidia,tegra30-usb-phy"; reg = <0x7d000000 0x4000>, <0x7d000000 0x4000>; interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; phy_type = "utmi"; clocks = <&tegra_car TEGRA114_CLK_USBD>, <&tegra_car TEGRA114_CLK_PLL_U>, <&tegra_car TEGRA114_CLK_USBD>; clock-names = "reg", "pll_u", "utmi-pads"; resets = <&tegra_car 22>, <&tegra_car 22>; reset-names = "usb", "utmi-pads"; #phy-cells = <0>; nvidia,hssync-start-delay = <0>; nvidia,idle-wait-delay = <17>; nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <9>; nvidia,xcvr-lsfslew = <0>; nvidia,xcvr-lsrslew = <3>; nvidia,hssquelch-level = <2>; nvidia,hsdiscon-level = <5>; nvidia,xcvr-hsslew = <12>; nvidia,has-utmi-pad-registers; nvidia,pmc = <&tegra_pmc 0>; status = "disabled"; }; usb@7d008000 { compatible = "nvidia,tegra114-ehci", "nvidia,tegra30-ehci"; reg = <0x7d008000 0x4000>; interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; phy_type = "utmi"; clocks = <&tegra_car TEGRA114_CLK_USB3>; resets = <&tegra_car 59>; reset-names = "usb"; nvidia,phy = <&phy3>; status = "disabled"; }; phy3: usb-phy@7d008000 { compatible = "nvidia,tegra114-usb-phy", "nvidia,tegra30-usb-phy"; reg = <0x7d008000 0x4000>, <0x7d000000 0x4000>; interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; phy_type = "utmi"; clocks = <&tegra_car TEGRA114_CLK_USB3>, <&tegra_car TEGRA114_CLK_PLL_U>, <&tegra_car TEGRA114_CLK_USBD>; clock-names = "reg", "pll_u", "utmi-pads"; resets = <&tegra_car 59>, <&tegra_car 22>; reset-names = "usb", "utmi-pads"; #phy-cells = <0>; nvidia,hssync-start-delay = <0>; nvidia,idle-wait-delay = <17>; nvidia,elastic-limit = <16>; nvidia,term-range-adj = <6>; nvidia,xcvr-setup = <9>; nvidia,xcvr-lsfslew = <0>; nvidia,xcvr-lsrslew = <3>; nvidia,hssquelch-level = <2>; nvidia,hsdiscon-level = <5>; nvidia,xcvr-hsslew = <12>; nvidia,pmc = <&tegra_pmc 2>; status = "disabled"; }; cpus { #address-cells = <1>; #size-cells = <0>; cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <0>; }; cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <1>; }; cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <2>; }; cpu@3 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <3>; }; }; timer { compatible = "arm,armv7-timer"; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>; interrupt-parent = <&gic>; }; };
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Device Tree Source for A20-Olimex-SOM-EVB-eMMC Board * * Copyright (C) 2018 Olimex Ltd. * Author: Stefan Mavrodiev <[email protected]> */ /dts-v1/; #include "sun7i-a20-olimex-som-evb.dts" / { model = "Olimex A20-Olimex-SOM-EVB-eMMC"; compatible = "olimex,a20-olimex-som-evb-emmc", "allwinner,sun7i-a20"; mmc2_pwrseq: pwrseq { compatible = "mmc-pwrseq-emmc"; reset-gpios = <&pio 2 18 GPIO_ACTIVE_LOW>; }; }; &mmc2 { vmmc-supply = <&reg_vcc3v3>; mmc-pwrseq = <&mmc2_pwrseq>; bus-width = <4>; non-removable; status = "okay"; emmc: emmc@0 { reg = <0>; compatible = "mmc-card"; broken-hpi; }; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * OLPC HGPK (XO-1) touchpad PS/2 mouse driver */ #ifndef _HGPK_H #define _HGPK_H #define HGPK_GS 0xff /* The GlideSensor */ #define HGPK_PT 0xcf /* The PenTablet */ enum hgpk_model_t { HGPK_MODEL_PREA = 0x0a, /* pre-B1s */ HGPK_MODEL_A = 0x14, /* found on B1s, PT disabled in hardware */ HGPK_MODEL_B = 0x28, /* B2s, has capacitance issues */ HGPK_MODEL_C = 0x3c, HGPK_MODEL_D = 0x50, /* C1, mass production */ }; enum hgpk_spew_flag { NO_SPEW, MAYBE_SPEWING, SPEW_DETECTED, RECALIBRATING, }; #define SPEW_WATCH_COUNT 42 /* at 12ms/packet, this is 1/2 second */ enum hgpk_mode { HGPK_MODE_MOUSE, HGPK_MODE_GLIDESENSOR, HGPK_MODE_PENTABLET, HGPK_MODE_INVALID }; struct hgpk_data { struct psmouse *psmouse; enum hgpk_mode mode; bool powered; enum hgpk_spew_flag spew_flag; int spew_count, x_tally, y_tally; /* spew detection */ unsigned long recalib_window; struct delayed_work recalib_wq; int abs_x, abs_y; int dupe_count; int xbigj, ybigj, xlast, ylast; /* jumpiness detection */ int xsaw_secondary, ysaw_secondary; /* jumpiness detection */ }; int hgpk_detect(struct psmouse *psmouse, bool set_properties); int hgpk_init(struct psmouse *psmouse); #ifdef CONFIG_MOUSE_PS2_OLPC void hgpk_module_init(void); #else static inline void hgpk_module_init(void) { } #endif #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * omap-usb2.c - USB PHY, talking to USB controller on TI SoCs. * * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy/omap_control_phy.h> #include <linux/phy/omap_usb.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/sys_soc.h> #include <linux/usb/phy_companion.h> #define USB2PHY_ANA_CONFIG1 0x4c #define USB2PHY_DISCON_BYP_LATCH BIT(31) #define USB2PHY_CHRG_DET 0x14 #define USB2PHY_CHRG_DET_USE_CHG_DET_REG BIT(29) #define USB2PHY_CHRG_DET_DIS_CHG_DET BIT(28) /* SoC Specific USB2_OTG register definitions */ #define AM654_USB2_OTG_PD BIT(8) #define AM654_USB2_VBUS_DET_EN BIT(5) #define AM654_USB2_VBUSVALID_DET_EN BIT(4) #define OMAP_DEV_PHY_PD BIT(0) #define OMAP_USB2_PHY_PD BIT(28) #define AM437X_USB2_PHY_PD BIT(0) #define AM437X_USB2_OTG_PD BIT(1) #define AM437X_USB2_OTGVDET_EN BIT(19) #define AM437X_USB2_OTGSESSEND_EN BIT(20) /* Driver Flags */ #define OMAP_USB2_HAS_START_SRP BIT(0) #define OMAP_USB2_HAS_SET_VBUS BIT(1) #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2) #define OMAP_USB2_DISABLE_CHRG_DET BIT(3) struct omap_usb { struct usb_phy phy; struct phy_companion *comparator; void __iomem *pll_ctrl_base; void __iomem *phy_base; struct device *dev; struct device *control_dev; struct clk *wkupclk; struct clk *optclk; u8 flags; struct regmap *syscon_phy_power; /* ctrl. reg. acces */ unsigned int power_reg; /* power reg. index within syscon */ u32 mask; u32 power_on; u32 power_off; }; #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) struct usb_phy_data { const char *label; u8 flags; u32 mask; u32 power_on; u32 power_off; }; static inline u32 omap_usb_readl(void __iomem *addr, unsigned int offset) { return __raw_readl(addr + offset); } static inline void omap_usb_writel(void __iomem *addr, unsigned int offset, u32 data) { __raw_writel(data, addr + offset); } /** * omap_usb2_set_comparator() - links the comparator present in the system with this phy * * @comparator: the companion phy(comparator) for this phy * * The phy companion driver should call this API passing the phy_companion * filled with set_vbus and start_srp to be used by usb phy. * * For use by phy companion driver */ int omap_usb2_set_comparator(struct phy_companion *comparator) { struct omap_usb *phy; struct usb_phy *x = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR(x)) return -ENODEV; phy = phy_to_omapusb(x); phy->comparator = comparator; return 0; } EXPORT_SYMBOL_GPL(omap_usb2_set_comparator); static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); if (!phy->comparator || !phy->comparator->set_vbus) return -ENODEV; return phy->comparator->set_vbus(phy->comparator, enabled); } static int omap_usb_start_srp(struct usb_otg *otg) { struct omap_usb *phy = phy_to_omapusb(otg->usb_phy); if (!phy->comparator || !phy->comparator->start_srp) return -ENODEV; return phy->comparator->start_srp(phy->comparator); } static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host) { otg->host = host; if (!host) otg->state = OTG_STATE_UNDEFINED; return 0; } static int omap_usb_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { otg->gadget = gadget; if (!gadget) otg->state = OTG_STATE_UNDEFINED; return 0; } static int omap_usb_phy_power(struct omap_usb *phy, int on) { u32 val; int ret; if (!phy->syscon_phy_power) { omap_control_phy_power(phy->control_dev, on); return 0; } if (on) val = phy->power_on; else val = phy->power_off; ret = regmap_update_bits(phy->syscon_phy_power, phy->power_reg, phy->mask, val); return ret; } static int omap_usb_power_off(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); return omap_usb_phy_power(phy, false); } static int omap_usb_power_on(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); return omap_usb_phy_power(phy, true); } static int omap_usb2_disable_clocks(struct omap_usb *phy) { clk_disable_unprepare(phy->wkupclk); if (!IS_ERR(phy->optclk)) clk_disable_unprepare(phy->optclk); return 0; } static int omap_usb2_enable_clocks(struct omap_usb *phy) { int ret; ret = clk_prepare_enable(phy->wkupclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); goto err0; } if (!IS_ERR(phy->optclk)) { ret = clk_prepare_enable(phy->optclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable optclk %d\n", ret); goto err1; } } return 0; err1: clk_disable_unprepare(phy->wkupclk); err0: return ret; } static int omap_usb_init(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); u32 val; omap_usb2_enable_clocks(phy); if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { /* * * Reduce the sensitivity of internal PHY by enabling the * DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This * resolves issues with certain devices which can otherwise * be prone to false disconnects. * */ val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1); val |= USB2PHY_DISCON_BYP_LATCH; omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val); } if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) { val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET); val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG | USB2PHY_CHRG_DET_DIS_CHG_DET; omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val); } return 0; } static int omap_usb_exit(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); return omap_usb2_disable_clocks(phy); } static const struct phy_ops ops = { .init = omap_usb_init, .exit = omap_usb_exit, .power_on = omap_usb_power_on, .power_off = omap_usb_power_off, .owner = THIS_MODULE, }; static const struct usb_phy_data omap_usb2_data = { .label = "omap_usb2", .flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS, .mask = OMAP_DEV_PHY_PD, .power_off = OMAP_DEV_PHY_PD, }; static const struct usb_phy_data omap5_usb2_data = { .label = "omap5_usb2", .flags = 0, .mask = OMAP_DEV_PHY_PD, .power_off = OMAP_DEV_PHY_PD, }; static const struct usb_phy_data dra7x_usb2_data = { .label = "dra7x_usb2", .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, .mask = OMAP_DEV_PHY_PD, .power_off = OMAP_DEV_PHY_PD, }; static const struct usb_phy_data dra7x_usb2_phy2_data = { .label = "dra7x_usb2_phy2", .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, .mask = OMAP_USB2_PHY_PD, .power_off = OMAP_USB2_PHY_PD, }; static const struct usb_phy_data am437x_usb2_data = { .label = "am437x_usb2", .flags = 0, .mask = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD | AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN, .power_on = AM437X_USB2_OTGVDET_EN | AM437X_USB2_OTGSESSEND_EN, .power_off = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD, }; static const struct usb_phy_data am654_usb2_data = { .label = "am654_usb2", .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, .mask = AM654_USB2_OTG_PD | AM654_USB2_VBUS_DET_EN | AM654_USB2_VBUSVALID_DET_EN, .power_on = AM654_USB2_VBUS_DET_EN | AM654_USB2_VBUSVALID_DET_EN, .power_off = AM654_USB2_OTG_PD, }; static const struct of_device_id omap_usb2_id_table[] = { { .compatible = "ti,omap-usb2", .data = &omap_usb2_data, }, { .compatible = "ti,omap5-usb2", .data = &omap5_usb2_data, }, { .compatible = "ti,dra7x-usb2", .data = &dra7x_usb2_data, }, { .compatible = "ti,dra7x-usb2-phy2", .data = &dra7x_usb2_phy2_data, }, { .compatible = "ti,am437x-usb2", .data = &am437x_usb2_data, }, { .compatible = "ti,am654-usb2", .data = &am654_usb2_data, }, {}, }; MODULE_DEVICE_TABLE(of, omap_usb2_id_table); static void omap_usb2_init_errata(struct omap_usb *phy) { static const struct soc_device_attribute am65x_sr10_soc_devices[] = { { .family = "AM65X", .revision = "SR1.0" }, { /* sentinel */ } }; /* * Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by * Default Without VBUS Presence. * * AM654x SR1.0 has a silicon bug due to which D+ is pulled high after * POR, which could cause enumeration failure with some USB hubs. * Disabling the USB2_PHY Charger Detect function will put D+ * into the normal state. */ if (soc_device_match(am65x_sr10_soc_devices)) phy->flags |= OMAP_USB2_DISABLE_CHRG_DET; } static int omap_usb2_probe(struct platform_device *pdev) { struct omap_usb *phy; struct phy *generic_phy; struct phy_provider *phy_provider; struct usb_otg *otg; struct device_node *node = pdev->dev.of_node; struct device_node *control_node; struct platform_device *control_pdev; const struct usb_phy_data *phy_data; phy_data = device_get_match_data(&pdev->dev); if (!phy_data) return -EINVAL; phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; phy->dev = &pdev->dev; phy->phy.dev = phy->dev; phy->phy.label = phy_data->label; phy->phy.otg = otg; phy->phy.type = USB_PHY_TYPE_USB2; phy->mask = phy_data->mask; phy->power_on = phy_data->power_on; phy->power_off = phy_data->power_off; phy->flags = phy_data->flags; omap_usb2_init_errata(phy); phy->phy_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->phy_base)) return PTR_ERR(phy->phy_base); phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node, "syscon-phy-power"); if (IS_ERR(phy->syscon_phy_power)) { dev_dbg(&pdev->dev, "can't get syscon-phy-power, using control device\n"); phy->syscon_phy_power = NULL; control_node = of_parse_phandle(node, "ctrl-module", 0); if (!control_node) { dev_err(&pdev->dev, "Failed to get control device phandle\n"); return -EINVAL; } control_pdev = of_find_device_by_node(control_node); if (!control_pdev) { dev_err(&pdev->dev, "Failed to get control device\n"); return -EINVAL; } phy->control_dev = &control_pdev->dev; } else { if (of_property_read_u32_index(node, "syscon-phy-power", 1, &phy->power_reg)) { dev_err(&pdev->dev, "couldn't get power reg. offset\n"); return -EINVAL; } } phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(&pdev->dev, "unable to get wkupclk %ld, trying old name\n", PTR_ERR(phy->wkupclk)); phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); if (IS_ERR(phy->wkupclk)) return dev_err_probe(&pdev->dev, PTR_ERR(phy->wkupclk), "unable to get usb_phy_cm_clk32k\n"); dev_warn(&pdev->dev, "found usb_phy_cm_clk32k, please fix DTS\n"); } phy->optclk = devm_clk_get(phy->dev, "refclk"); if (IS_ERR(phy->optclk)) { if (PTR_ERR(phy->optclk) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n"); phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m"); if (IS_ERR(phy->optclk)) { if (PTR_ERR(phy->optclk) != -EPROBE_DEFER) { dev_dbg(&pdev->dev, "unable to get usb_otg_ss_refclk960m\n"); } } else { dev_warn(&pdev->dev, "found usb_otg_ss_refclk960m, please fix DTS\n"); } } otg->set_host = omap_usb_set_host; otg->set_peripheral = omap_usb_set_peripheral; if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS) otg->set_vbus = omap_usb_set_vbus; if (phy_data->flags & OMAP_USB2_HAS_START_SRP) otg->start_srp = omap_usb_start_srp; otg->usb_phy = &phy->phy; platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) { pm_runtime_disable(phy->dev); return PTR_ERR(generic_phy); } phy_set_drvdata(generic_phy, phy); omap_usb_power_off(generic_phy); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { pm_runtime_disable(phy->dev); return PTR_ERR(phy_provider); } usb_add_phy_dev(&phy->phy); return 0; } static void omap_usb2_remove(struct platform_device *pdev) { struct omap_usb *phy = platform_get_drvdata(pdev); usb_remove_phy(&phy->phy); pm_runtime_disable(phy->dev); } static struct platform_driver omap_usb2_driver = { .probe = omap_usb2_probe, .remove = omap_usb2_remove, .driver = { .name = "omap-usb2", .of_match_table = omap_usb2_id_table, }, }; module_platform_driver(omap_usb2_driver); MODULE_ALIAS("platform:omap_usb2"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP USB2 phy driver"); MODULE_LICENSE("GPL v2");
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * A small micro-assembler. It is intentionally kept simple, does only * support a subset of instructions, and does not try to hide pipeline * effects like branch delay slots. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle ([email protected]) * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/inst.h> #include <asm/elf.h> #include <asm/bugs.h> #include <asm/uasm.h> #define RS_MASK 0x1f #define RS_SH 16 #define RT_MASK 0x1f #define RT_SH 21 #define SCIMM_MASK 0x3ff #define SCIMM_SH 16 /* This macro sets the non-variable bits of an instruction. */ #define M(a, b, c, d, e, f) \ ((a) << OP_SH \ | (b) << RT_SH \ | (c) << RS_SH \ | (d) << RD_SH \ | (e) << RE_SH \ | (f) << FUNC_SH) #include "uasm.c" static const struct insn insn_table_MM[insn_invalid] = { [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, [insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, [insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, [insn_beql] = {0, 0}, [insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM}, [insn_bgezl] = {0, 0}, [insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM}, [insn_bltzl] = {0, 0}, [insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM}, [insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM}, [insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS}, [insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE}, [insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS}, [insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE}, [insn_daddu] = {0, 0}, [insn_daddiu] = {0, 0}, [insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS}, [insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS}, [insn_dmfc0] = {0, 0}, [insn_dmtc0] = {0, 0}, [insn_dsll] = {0, 0}, [insn_dsll32] = {0, 0}, [insn_dsra] = {0, 0}, [insn_dsrl] = {0, 0}, [insn_dsrl32] = {0, 0}, [insn_drotr] = {0, 0}, [insn_drotr32] = {0, 0}, [insn_dsubu] = {0, 0}, [insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0}, [insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE}, [insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE}, [insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM}, [insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM}, [insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS}, [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS}, [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_ld] = {0, 0}, [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM}, [insn_lld] = {0, 0}, [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM}, [insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD}, [insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS}, [insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS}, [insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD}, [insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS}, [insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS}, [insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD}, [insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD}, [insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, [insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM}, [insn_rfe] = {0, 0}, [insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM}, [insn_scd] = {0, 0}, [insn_sd] = {0, 0}, [insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD}, [insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD}, [insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD}, [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD}, [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD}, [insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD}, [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD}, [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD}, [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD}, [insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD}, [insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, [insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS}, [insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0}, [insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0}, [insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0}, [insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0}, [insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM}, [insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS}, [insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD}, [insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, [insn_dins] = {0, 0}, [insn_dinsm] = {0, 0}, [insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, [insn_bbit0] = {0, 0}, [insn_bbit1] = {0, 0}, [insn_lwx] = {0, 0}, [insn_ldx] = {0, 0}, }; #undef M static inline u32 build_bimm(s32 arg) { WARN(arg > 0xffff || arg < -0x10000, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); } static inline u32 build_jimm(u32 arg) { WARN(arg & ~((JIMM_MASK << 2) | 1), KERN_WARNING "Micro-assembler field overflow\n"); return (arg >> 1) & JIMM_MASK; } /* * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. */ static void build_insn(u32 **buf, enum opcode opc, ...) { const struct insn *ip; va_list ap; u32 op; if (opc < 0 || opc >= insn_invalid || (opc == insn_daddiu && r4k_daddiu_bug()) || (insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0)) panic("Unsupported Micro-assembler instruction %d", opc); ip = &insn_table_MM[opc]; op = ip->match; va_start(ap, opc); if (ip->fields & RS) { if (opc == insn_mfc0 || opc == insn_mtc0 || opc == insn_cfc1 || opc == insn_ctc1) op |= build_rt(va_arg(ap, u32)); else op |= build_rs(va_arg(ap, u32)); } if (ip->fields & RT) { if (opc == insn_mfc0 || opc == insn_mtc0 || opc == insn_cfc1 || opc == insn_ctc1) op |= build_rs(va_arg(ap, u32)); else op |= build_rt(va_arg(ap, u32)); } if (ip->fields & RD) op |= build_rd(va_arg(ap, u32)); if (ip->fields & RE) op |= build_re(va_arg(ap, u32)); if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32)); if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32)); if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); if (ip->fields & SCIMM) op |= build_scimm(va_arg(ap, u32)); va_end(ap); #ifdef CONFIG_CPU_LITTLE_ENDIAN **buf = ((op & 0xffff) << 16) | (op >> 16); #else **buf = op; #endif (*buf)++; } static inline void __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) { long laddr = (long)lab->addr; long raddr = (long)rel->addr; switch (rel->type) { case R_MIPS_PC16: #ifdef CONFIG_CPU_LITTLE_ENDIAN *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16); #else *rel->addr |= build_bimm(laddr - (raddr + 4)); #endif break; default: panic("Unsupported Micro-assembler relocation %d", rel->type); } }
/* * Copyright 2023 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "priv.h" #include <subdev/bios.h> #include <subdev/bios/pmu.h> #include <nvfw/fw.h> union nvfw_falcon_appif_hdr { struct nvfw_falcon_appif_hdr_v1 { u8 ver; u8 hdr; u8 len; u8 cnt; } v1; }; union nvfw_falcon_appif { struct nvfw_falcon_appif_v1 { #define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004 u32 id; u32 dmem_base; } v1; }; union nvfw_falcon_appif_dmemmapper { struct { u32 signature; u16 version; u16 size; u32 cmd_in_buffer_offset; u32 cmd_in_buffer_size; u32 cmd_out_buffer_offset; u32 cmd_out_buffer_size; u32 nvf_img_data_buffer_offset; u32 nvf_img_data_buffer_size; u32 printf_buffer_hdr; u32 ucode_build_time_stamp; u32 ucode_signature; #define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015 #define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB 0x00000019 u32 init_cmd; u32 ucode_feature; u32 ucode_cmd_mask0; u32 ucode_cmd_mask1; u32 multi_tgt_tbl; } v3; }; struct nvfw_fwsec_frts_cmd { struct { u32 ver; u32 hdr; u64 addr; u32 size; u32 flags; } read_vbios; struct { u32 ver; u32 hdr; u32 addr; u32 size; #define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002 u32 type; } frts_region; }; static int nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd) { union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset); const u8 *dmem = fw->fw.img + fw->dmem_base_img; int i; if (WARN_ON(hdr->v1.ver != 1)) return -EINVAL; for (i = 0; i < hdr->v1.cnt; i++) { union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len); union nvfw_falcon_appif_dmemmapper *dmemmap; struct nvfw_fwsec_frts_cmd *frtscmd; if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER) continue; dmemmap = (void *)(dmem + app->v1.dmem_base); dmemmap->v3.init_cmd = init_cmd; frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset); frtscmd->read_vbios.ver = 1; frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios); frtscmd->read_vbios.addr = 0; frtscmd->read_vbios.size = 0; frtscmd->read_vbios.flags = 2; if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) { frtscmd->frts_region.ver = 1; frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region); frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12; frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12; frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB; } break; } if (WARN_ON(i == hdr->v1.cnt)) return -EINVAL; return 0; } union nvfw_falcon_ucode_desc { struct nvkm_falcon_ucode_desc_v2 { u32 Hdr; u32 StoredSize; u32 UncompressedSize; u32 VirtualEntry; u32 InterfaceOffset; u32 IMEMPhysBase; u32 IMEMLoadSize; u32 IMEMVirtBase; u32 IMEMSecBase; u32 IMEMSecSize; u32 DMEMOffset; u32 DMEMPhysBase; u32 DMEMLoadSize; u32 altIMEMLoadSize; u32 altDMEMLoadSize; } v2; struct nvkm_falcon_ucode_desc_v3 { u32 Hdr; u32 StoredSize; u32 PKCDataOffset; u32 InterfaceOffset; u32 IMEMPhysBase; u32 IMEMLoadSize; u32 IMEMVirtBase; u32 DMEMPhysBase; u32 DMEMLoadSize; u16 EngineIdMask; u8 UcodeId; u8 SignatureCount; u16 SignatureVersions; u16 Reserved; } v3; }; static int nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name, const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd, struct nvkm_falcon_fw *fw) { struct nvkm_subdev *subdev = &gsp->subdev; const struct firmware *bl; const struct nvfw_bin_hdr *hdr; const struct nvfw_bl_desc *bld; int ret; /* Build ucode. */ ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true, (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, &gsp->falcon, fw); if (WARN_ON(ret)) return ret; fw->nmem_base_img = 0; fw->nmem_base = desc->IMEMPhysBase; fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize; fw->imem_base_img = 0; fw->imem_base = desc->IMEMSecBase; fw->imem_size = desc->IMEMSecSize; fw->dmem_base_img = desc->DMEMOffset; fw->dmem_base = desc->DMEMPhysBase; fw->dmem_size = desc->DMEMLoadSize; /* Bootloader. */ ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl); if (ret) return ret; hdr = nvfw_bin_hdr(subdev, bl->data); bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); fw->boot_addr = bld->start_tag << 8; fw->boot_size = bld->code_size; fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL); if (!fw->boot) ret = -ENOMEM; nvkm_firmware_put(bl); /* Patch in interface data. */ return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); } static int nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name, const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd, struct nvkm_falcon_fw *fw) { struct nvkm_device *device = gsp->subdev.device; struct nvkm_bios *bios = device->bios; int ret; /* Build ucode. */ ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true, (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize, &gsp->falcon, fw); if (WARN_ON(ret)) return ret; fw->imem_base_img = 0; fw->imem_base = desc->IMEMPhysBase; fw->imem_size = desc->IMEMLoadSize; fw->dmem_base_img = desc->IMEMLoadSize; fw->dmem_base = desc->DMEMPhysBase; fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256); fw->dmem_sign = desc->PKCDataOffset; fw->boot_addr = 0; fw->fuse_ver = desc->SignatureVersions; fw->ucode_id = desc->UcodeId; fw->engine_id = desc->EngineIdMask; /* Patch in signature. */ ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4, nvbios_pointer(bios, 0), desc->SignatureCount, (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0); if (WARN_ON(ret)) return ret; /* Patch in interface data. */ return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); } static int nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; struct nvkm_bios *bios = device->bios; const union nvfw_falcon_ucode_desc *desc; struct nvbios_pmuE flcn_ucode; u8 idx, ver, hdr; u32 data; u16 size, vers; struct nvkm_falcon_fw fw = {}; u32 mbox0 = 0; int ret; /* Lookup in VBIOS. */ for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) { if (flcn_ucode.type == 0x85) break; } if (WARN_ON(!data)) return -EINVAL; /* Deteremine version. */ desc = nvbios_pointer(bios, flcn_ucode.data); if (WARN_ON(!(desc->v2.Hdr & 0x00000001))) return -EINVAL; size = (desc->v2.Hdr & 0xffff0000) >> 16; vers = (desc->v2.Hdr & 0x0000ff00) >> 8; switch (vers) { case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break; case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break; default: nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers); return -EINVAL; } if (ret) { nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret); return ret; } /* Boot. */ ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0); nvkm_falcon_fw_dtor(&fw); if (ret) return ret; return 0; } int nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; int ret; u32 err; ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB); if (ret) return ret; /* Verify. */ err = nvkm_rd32(device, 0x001400 + (0x15 * 4)) & 0x0000ffff; if (err) { nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err); return -EIO; } return 0; } int nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; int ret; u32 err, wpr2_lo, wpr2_hi; ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS); if (ret) return ret; /* Verify. */ err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16; if (err) { nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err); return -EIO; } wpr2_lo = nvkm_rd32(device, 0x1fa824); wpr2_hi = nvkm_rd32(device, 0x1fa828); nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi); return 0; }
/* SPDX-License-Identifier: GPL-2.0 */ /* * platform data for the Au1550 NAND driver */ #ifndef _AU1550ND_H_ #define _AU1550ND_H_ #include <linux/mtd/partitions.h> struct au1550nd_platdata { struct mtd_partition *parts; int num_parts; int devwidth; /* 0 = 8bit device, 1 = 16bit device */ }; #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Mimi Zohar <[email protected]> * Kylene Hall <[email protected]> * * File: ima_crypto.c * Calculates md5/sha1 file hash, template hash, boot-aggreate hash */ #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/ratelimit.h> #include <linux/file.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/err.h> #include <linux/slab.h> #include <crypto/hash.h> #include "ima.h" /* minimum file size for ahash use */ static unsigned long ima_ahash_minsize; module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); /* default is 0 - 1 page. */ static int ima_maxorder; static unsigned int ima_bufsize = PAGE_SIZE; static int param_set_bufsize(const char *val, const struct kernel_param *kp) { unsigned long long size; int order; size = memparse(val, NULL); order = get_order(size); if (order > MAX_PAGE_ORDER) return -EINVAL; ima_maxorder = order; ima_bufsize = PAGE_SIZE << order; return 0; } static const struct kernel_param_ops param_ops_bufsize = { .set = param_set_bufsize, .get = param_get_uint, }; #define param_check_bufsize(name, p) __param_check(name, p, unsigned int) module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); static struct crypto_shash *ima_shash_tfm; static struct crypto_ahash *ima_ahash_tfm; int ima_sha1_idx __ro_after_init; int ima_hash_algo_idx __ro_after_init; /* * Additional number of slots reserved, as needed, for SHA1 * and IMA default algo. */ int ima_extra_slots __ro_after_init; struct ima_algo_desc *ima_algo_array __ro_after_init; static int __init ima_init_ima_crypto(void) { long rc; ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); if (IS_ERR(ima_shash_tfm)) { rc = PTR_ERR(ima_shash_tfm); pr_err("Can not allocate %s (reason: %ld)\n", hash_algo_name[ima_hash_algo], rc); return rc; } pr_info("Allocated hash algorithm: %s\n", hash_algo_name[ima_hash_algo]); return 0; } static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) { struct crypto_shash *tfm = ima_shash_tfm; int rc, i; if (algo < 0 || algo >= HASH_ALGO__LAST) algo = ima_hash_algo; if (algo == ima_hash_algo) return tfm; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) return ima_algo_array[i].tfm; tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); if (IS_ERR(tfm)) { rc = PTR_ERR(tfm); pr_err("Can not allocate %s (reason: %d)\n", hash_algo_name[algo], rc); } return tfm; } int __init ima_init_crypto(void) { enum hash_algo algo; long rc; int i; rc = ima_init_ima_crypto(); if (rc) return rc; ima_sha1_idx = -1; ima_hash_algo_idx = -1; for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { algo = ima_tpm_chip->allocated_banks[i].crypto_id; if (algo == HASH_ALGO_SHA1) ima_sha1_idx = i; if (algo == ima_hash_algo) ima_hash_algo_idx = i; } if (ima_sha1_idx < 0) { ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; if (ima_hash_algo == HASH_ALGO_SHA1) ima_hash_algo_idx = ima_sha1_idx; } if (ima_hash_algo_idx < 0) ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++; ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, sizeof(*ima_algo_array), GFP_KERNEL); if (!ima_algo_array) { rc = -ENOMEM; goto out; } for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) { algo = ima_tpm_chip->allocated_banks[i].crypto_id; ima_algo_array[i].algo = algo; /* unknown TPM algorithm */ if (algo == HASH_ALGO__LAST) continue; if (algo == ima_hash_algo) { ima_algo_array[i].tfm = ima_shash_tfm; continue; } ima_algo_array[i].tfm = ima_alloc_tfm(algo); if (IS_ERR(ima_algo_array[i].tfm)) { if (algo == HASH_ALGO_SHA1) { rc = PTR_ERR(ima_algo_array[i].tfm); ima_algo_array[i].tfm = NULL; goto out_array; } ima_algo_array[i].tfm = NULL; } } if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) { if (ima_hash_algo == HASH_ALGO_SHA1) { ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm; } else { ima_algo_array[ima_sha1_idx].tfm = ima_alloc_tfm(HASH_ALGO_SHA1); if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) { rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm); goto out_array; } } ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1; } if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) && ima_hash_algo_idx != ima_sha1_idx) { ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm; ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo; } return 0; out_array: for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { if (!ima_algo_array[i].tfm || ima_algo_array[i].tfm == ima_shash_tfm) continue; crypto_free_shash(ima_algo_array[i].tfm); } kfree(ima_algo_array); out: crypto_free_shash(ima_shash_tfm); return rc; } static void ima_free_tfm(struct crypto_shash *tfm) { int i; if (tfm == ima_shash_tfm) return; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm == tfm) return; crypto_free_shash(tfm); } /** * ima_alloc_pages() - Allocate contiguous pages. * @max_size: Maximum amount of memory to allocate. * @allocated_size: Returned size of actual allocation. * @last_warn: Should the min_size allocation warn or not. * * Tries to do opportunistic allocation for memory first trying to allocate * max_size amount of memory and then splitting that until zero order is * reached. Allocation is tried without generating allocation warnings unless * last_warn is set. Last_warn set affects only last allocation of zero order. * * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) * * Return pointer to allocated memory, or NULL on failure. */ static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, int last_warn) { void *ptr; int order = ima_maxorder; gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; if (order) order = min(get_order(max_size), order); for (; order; order--) { ptr = (void *)__get_free_pages(gfp_mask, order); if (ptr) { *allocated_size = PAGE_SIZE << order; return ptr; } } /* order is zero - one page */ gfp_mask = GFP_KERNEL; if (!last_warn) gfp_mask |= __GFP_NOWARN; ptr = (void *)__get_free_pages(gfp_mask, 0); if (ptr) { *allocated_size = PAGE_SIZE; return ptr; } *allocated_size = 0; return NULL; } /** * ima_free_pages() - Free pages allocated by ima_alloc_pages(). * @ptr: Pointer to allocated pages. * @size: Size of allocated buffer. */ static void ima_free_pages(void *ptr, size_t size) { if (!ptr) return; free_pages((unsigned long)ptr, get_order(size)); } static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) { struct crypto_ahash *tfm = ima_ahash_tfm; int rc; if (algo < 0 || algo >= HASH_ALGO__LAST) algo = ima_hash_algo; if (algo != ima_hash_algo || !tfm) { tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); if (!IS_ERR(tfm)) { if (algo == ima_hash_algo) ima_ahash_tfm = tfm; } else { rc = PTR_ERR(tfm); pr_err("Can not allocate %s (reason: %d)\n", hash_algo_name[algo], rc); } } return tfm; } static void ima_free_atfm(struct crypto_ahash *tfm) { if (tfm != ima_ahash_tfm) crypto_free_ahash(tfm); } static inline int ahash_wait(int err, struct crypto_wait *wait) { err = crypto_wait_req(err, wait); if (err) pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); return err; } static int ima_calc_file_hash_atfm(struct file *file, struct ima_digest_data *hash, struct crypto_ahash *tfm) { loff_t i_size, offset; char *rbuf[2] = { NULL, }; int rc, rbuf_len, active = 0, ahash_rc = 0; struct ahash_request *req; struct scatterlist sg[1]; struct crypto_wait wait; size_t rbuf_size[2]; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out1; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out2; /* * Try to allocate maximum size of memory. * Fail if even a single page cannot be allocated. */ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); if (!rbuf[0]) { rc = -ENOMEM; goto out1; } /* Only allocate one buffer if that is enough. */ if (i_size > rbuf_size[0]) { /* * Try to allocate secondary buffer. If that fails fallback to * using single buffering. Use previous memory allocation size * as baseline for possible allocation size. */ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], &rbuf_size[1], 0); } for (offset = 0; offset < i_size; offset += rbuf_len) { if (!rbuf[1] && offset) { /* Not using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } /* read buffer */ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); rc = integrity_kernel_read(file, offset, rbuf[active], rbuf_len); if (rc != rbuf_len) { if (rc >= 0) rc = -EINVAL; /* * Forward current rc, do not overwrite with return value * from ahash_wait() */ ahash_wait(ahash_rc, &wait); goto out3; } if (rbuf[1] && offset) { /* Using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request. */ rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3; } sg_init_one(&sg[0], rbuf[active], rbuf_len); ahash_request_set_crypt(req, sg, NULL, rbuf_len); ahash_rc = crypto_ahash_update(req); if (rbuf[1]) active = !active; /* swap buffers, if we use two */ } /* wait for the last update request to complete */ rc = ahash_wait(ahash_rc, &wait); out3: ima_free_pages(rbuf[0], rbuf_size[0]); ima_free_pages(rbuf[1], rbuf_size[1]); out2: if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &wait); } out1: ahash_request_free(req); return rc; } static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) { struct crypto_ahash *tfm; int rc; tfm = ima_alloc_atfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = ima_calc_file_hash_atfm(file, hash, tfm); ima_free_atfm(tfm); return rc; } static int ima_calc_file_hash_tfm(struct file *file, struct ima_digest_data *hash, struct crypto_shash *tfm) { loff_t i_size, offset = 0; char *rbuf; int rc; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; i_size = i_size_read(file_inode(file)); if (i_size == 0) goto out; rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!rbuf) return -ENOMEM; while (offset < i_size) { int rbuf_len; rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; } if (rbuf_len == 0) { /* unexpected EOF */ rc = -EINVAL; break; } offset += rbuf_len; rc = crypto_shash_update(shash, rbuf, rbuf_len); if (rc) break; } kfree(rbuf); out: if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; } static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) { struct crypto_shash *tfm; int rc; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = ima_calc_file_hash_tfm(file, hash, tfm); ima_free_tfm(tfm); return rc; } /* * ima_calc_file_hash - calculate file hash * * Asynchronous hash (ahash) allows using HW acceleration for calculating * a hash. ahash performance varies for different data sizes on different * crypto accelerators. shash performance might be better for smaller files. * The 'ima.ahash_minsize' module parameter allows specifying the best * minimum file size for using ahash on the system. * * If the ima.ahash_minsize parameter is not specified, this function uses * shash for the hash calculation. If ahash fails, it falls back to using * shash. */ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) { loff_t i_size; int rc; struct file *f = file; bool new_file_instance = false; /* * For consistency, fail file's opened with the O_DIRECT flag on * filesystems mounted with/without DAX option. */ if (file->f_flags & O_DIRECT) { hash->length = hash_digest_size[ima_hash_algo]; hash->algo = ima_hash_algo; return -EINVAL; } /* Open a new file instance in O_RDONLY if we cannot read */ if (!(file->f_mode & FMODE_READ)) { int flags = file->f_flags & ~(O_WRONLY | O_APPEND | O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); flags |= O_RDONLY; f = dentry_open(&file->f_path, flags, file->f_cred); if (IS_ERR(f)) return PTR_ERR(f); new_file_instance = true; } i_size = i_size_read(file_inode(f)); if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { rc = ima_calc_file_ahash(f, hash); if (!rc) goto out; } rc = ima_calc_file_shash(f, hash); out: if (new_file_instance) fput(f); return rc; } /* * Calculate the hash of template data */ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, struct ima_template_entry *entry, int tfm_idx) { SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm); struct ima_template_desc *td = entry->template_desc; int num_fields = entry->template_desc->num_fields; int rc, i; shash->tfm = ima_algo_array[tfm_idx].tfm; rc = crypto_shash_init(shash); if (rc != 0) return rc; for (i = 0; i < num_fields; i++) { u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; u8 *data_to_hash = field_data[i].data; u32 datalen = field_data[i].len; u32 datalen_to_hash = !ima_canonical_fmt ? datalen : (__force u32)cpu_to_le32(datalen); if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { rc = crypto_shash_update(shash, (const u8 *) &datalen_to_hash, sizeof(datalen_to_hash)); if (rc) break; } else if (strcmp(td->fields[i]->field_id, "n") == 0) { memcpy(buffer, data_to_hash, datalen); data_to_hash = buffer; datalen = IMA_EVENT_NAME_LEN_MAX + 1; } rc = crypto_shash_update(shash, data_to_hash, datalen); if (rc) break; } if (!rc) rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest); return rc; } int ima_calc_field_array_hash(struct ima_field_data *field_data, struct ima_template_entry *entry) { u16 alg_id; int rc, i; rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx); if (rc) return rc; entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1; for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { if (i == ima_sha1_idx) continue; if (i < NR_BANKS(ima_tpm_chip)) { alg_id = ima_tpm_chip->allocated_banks[i].alg_id; entry->digests[i].alg_id = alg_id; } /* for unmapped TPM algorithms digest is still a padded SHA1 */ if (!ima_algo_array[i].tfm) { memcpy(entry->digests[i].digest, entry->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE); continue; } rc = ima_calc_field_array_hash_tfm(field_data, entry, i); if (rc) return rc; } return rc; } static int calc_buffer_ahash_atfm(const void *buf, loff_t len, struct ima_digest_data *hash, struct crypto_ahash *tfm) { struct ahash_request *req; struct scatterlist sg; struct crypto_wait wait; int rc, ahash_rc = 0; hash->length = crypto_ahash_digestsize(tfm); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) return -ENOMEM; crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out; sg_init_one(&sg, buf, len); ahash_request_set_crypt(req, &sg, NULL, len); ahash_rc = crypto_ahash_update(req); /* wait for the update request to complete */ rc = ahash_wait(ahash_rc, &wait); if (!rc) { ahash_request_set_crypt(req, NULL, hash->digest, 0); rc = ahash_wait(crypto_ahash_final(req), &wait); } out: ahash_request_free(req); return rc; } static int calc_buffer_ahash(const void *buf, loff_t len, struct ima_digest_data *hash) { struct crypto_ahash *tfm; int rc; tfm = ima_alloc_atfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); ima_free_atfm(tfm); return rc; } static int calc_buffer_shash_tfm(const void *buf, loff_t size, struct ima_digest_data *hash, struct crypto_shash *tfm) { SHASH_DESC_ON_STACK(shash, tfm); unsigned int len; int rc; shash->tfm = tfm; hash->length = crypto_shash_digestsize(tfm); rc = crypto_shash_init(shash); if (rc != 0) return rc; while (size) { len = size < PAGE_SIZE ? size : PAGE_SIZE; rc = crypto_shash_update(shash, buf, len); if (rc) break; buf += len; size -= len; } if (!rc) rc = crypto_shash_final(shash, hash->digest); return rc; } static int calc_buffer_shash(const void *buf, loff_t len, struct ima_digest_data *hash) { struct crypto_shash *tfm; int rc; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); rc = calc_buffer_shash_tfm(buf, len, hash, tfm); ima_free_tfm(tfm); return rc; } int ima_calc_buffer_hash(const void *buf, loff_t len, struct ima_digest_data *hash) { int rc; if (ima_ahash_minsize && len >= ima_ahash_minsize) { rc = calc_buffer_ahash(buf, len, hash); if (!rc) return 0; } return calc_buffer_shash(buf, len, hash); } static void ima_pcrread(u32 idx, struct tpm_digest *d) { if (!ima_tpm_chip) return; if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0) pr_err("Error Communicating to TPM chip\n"); } /* * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, * allowing firmware to configure and enable different banks. * * Knowing which TPM bank is read to calculate the boot_aggregate digest * needs to be conveyed to a verifier. For this reason, use the same * hash algorithm for reading the TPM PCRs as for calculating the boot * aggregate digest as stored in the measurement list. */ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, struct crypto_shash *tfm) { struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; int rc; u32 i; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", d.alg_id); rc = crypto_shash_init(shash); if (rc != 0) return rc; /* cumulative digest over TPM registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, &d); /* now accumulate with current aggregate */ rc = crypto_shash_update(shash, d.digest, crypto_shash_digestsize(tfm)); if (rc != 0) return rc; } /* * Extend cumulative digest over TPM registers 8-9, which contain * measurement for the kernel command line (reg. 8) and image (reg. 9) * in a typical PCR allocation. Registers 8-9 are only included in * non-SHA1 boot_aggregate digests to avoid ambiguity. */ if (alg_id != TPM_ALG_SHA1) { for (i = TPM_PCR8; i < TPM_PCR10; i++) { ima_pcrread(i, &d); rc = crypto_shash_update(shash, d.digest, crypto_shash_digestsize(tfm)); } } if (!rc) crypto_shash_final(shash, digest); return rc; } int ima_calc_boot_aggregate(struct ima_digest_data *hash) { struct crypto_shash *tfm; u16 crypto_id, alg_id; int rc, i, bank_idx = -1; for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; if (crypto_id == hash->algo) { bank_idx = i; break; } if (crypto_id == HASH_ALGO_SHA256) bank_idx = i; if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) bank_idx = i; } if (bank_idx == -1) { pr_err("No suitable TPM algorithm for boot aggregate\n"); return 0; } hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); hash->length = crypto_shash_digestsize(tfm); alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); ima_free_tfm(tfm); return rc; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #ifndef __LOCAL_ST21NFCA_H_ #define __LOCAL_ST21NFCA_H_ #include <net/nfc/hci.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #define HCI_MODE 0 /* framing in HCI mode */ #define ST21NFCA_SOF_EOF_LEN 2 /* Almost every time value is 0 */ #define ST21NFCA_HCI_LLC_LEN 1 /* Size in worst case : * In normal case CRC len = 2 but byte stuffing * may appear in case one CRC byte = ST21NFCA_SOF_EOF */ #define ST21NFCA_HCI_LLC_CRC 4 #define ST21NFCA_HCI_LLC_LEN_CRC (ST21NFCA_SOF_EOF_LEN + \ ST21NFCA_HCI_LLC_LEN + \ ST21NFCA_HCI_LLC_CRC) #define ST21NFCA_HCI_LLC_MIN_SIZE (1 + ST21NFCA_HCI_LLC_LEN_CRC) /* Worst case when adding byte stuffing between each byte */ #define ST21NFCA_HCI_LLC_MAX_PAYLOAD 29 #define ST21NFCA_HCI_LLC_MAX_SIZE (ST21NFCA_HCI_LLC_LEN_CRC + 1 + \ ST21NFCA_HCI_LLC_MAX_PAYLOAD) /* Reader RF commands */ #define ST21NFCA_WR_XCHG_DATA 0x10 #define ST21NFCA_DEVICE_MGNT_GATE 0x01 #define ST21NFCA_RF_READER_F_GATE 0x14 #define ST21NFCA_RF_CARD_F_GATE 0x24 #define ST21NFCA_APDU_READER_GATE 0xf0 #define ST21NFCA_CONNECTIVITY_GATE 0x41 /* * ref ISO7816-3 chap 8.1. the initial character TS is followed by a * sequence of at most 32 characters. */ #define ST21NFCA_ESE_MAX_LENGTH 33 #define ST21NFCA_ESE_HOST_ID 0xc0 #define DRIVER_DESC "HCI NFC driver for ST21NFCA" #define ST21NFCA_HCI_MODE 0 #define ST21NFCA_NUM_DEVICES 256 #define ST21NFCA_VENDOR_OUI 0x0080E1 /* STMicroelectronics */ #define ST21NFCA_FACTORY_MODE 2 struct st21nfca_se_status { bool is_ese_present; bool is_uicc_present; }; enum st21nfca_state { ST21NFCA_ST_COLD, ST21NFCA_ST_READY, }; /** * enum nfc_vendor_cmds - supported nfc vendor commands * * @FACTORY_MODE: Allow to set the driver into a mode where no secure element * are activated. It does not consider any NFC_ATTR_VENDOR_DATA. * @HCI_CLEAR_ALL_PIPES: Allow to execute a HCI clear all pipes command. * It does not consider any NFC_ATTR_VENDOR_DATA. * @HCI_DM_PUT_DATA: Allow to configure specific CLF registry as for example * RF trimmings or low level drivers configurations (I2C, SPI, SWP). * @HCI_DM_UPDATE_AID: Allow to configure an AID routing into the CLF routing * table following RF technology, CLF mode or protocol. * @HCI_DM_GET_INFO: Allow to retrieve CLF information. * @HCI_DM_GET_DATA: Allow to retrieve CLF configurable data such as low * level drivers configurations or RF trimmings. * @HCI_DM_LOAD: Allow to load a firmware into the CLF. A complete * packet can be more than 8KB. * @HCI_DM_RESET: Allow to run a CLF reset in order to "commit" CLF * configuration changes without CLF power off. * @HCI_GET_PARAM: Allow to retrieve an HCI CLF parameter (for example the * white list). * @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF * technology. When using this command to anti-collision is done. * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF * connectivity. */ enum nfc_vendor_cmds { FACTORY_MODE, HCI_CLEAR_ALL_PIPES, HCI_DM_PUT_DATA, HCI_DM_UPDATE_AID, HCI_DM_GET_INFO, HCI_DM_GET_DATA, HCI_DM_LOAD, HCI_DM_RESET, HCI_GET_PARAM, HCI_DM_FIELD_GENERATOR, HCI_LOOPBACK, }; struct st21nfca_vendor_info { struct completion req_completion; struct sk_buff *rx_skb; }; struct st21nfca_dep_info { struct sk_buff *tx_pending; struct work_struct tx_work; u8 curr_nfc_dep_pni; u32 idx; u8 to; u8 did; u8 bsi; u8 bri; u8 lri; } __packed; struct st21nfca_se_info { u8 atr[ST21NFCA_ESE_MAX_LENGTH]; struct completion req_completion; struct timer_list bwi_timer; int wt_timeout; /* in msecs */ bool bwi_active; struct timer_list se_active_timer; bool se_active; int expected_pipes; int count_pipes; bool xch_error; se_io_cb_t cb; void *cb_context; struct work_struct timeout_work; }; struct st21nfca_hci_info { const struct nfc_phy_ops *phy_ops; void *phy_id; struct nfc_hci_dev *hdev; struct st21nfca_se_status *se_status; enum st21nfca_state state; struct mutex info_lock; int async_cb_type; data_exchange_cb_t async_cb; void *async_cb_context; struct st21nfca_dep_info dep_info; struct st21nfca_se_info se_info; struct st21nfca_vendor_info vendor_info; }; int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, struct nfc_hci_dev **hdev, struct st21nfca_se_status *se_status); void st21nfca_hci_remove(struct nfc_hci_dev *hdev); int st21nfca_dep_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb); int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb); int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len); int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb); void st21nfca_dep_init(struct nfc_hci_dev *hdev); void st21nfca_dep_deinit(struct nfc_hci_dev *hdev); int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, u8 event, struct sk_buff *skb); int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb); int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev); int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx); int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx); int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context); void st21nfca_se_init(struct nfc_hci_dev *hdev); void st21nfca_se_deinit(struct nfc_hci_dev *hdev); int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *ndev, u8 event, struct sk_buff *skb); int st21nfca_vendor_cmds_init(struct nfc_hci_dev *ndev); #endif /* __LOCAL_ST21NFCA_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * benchmark.c: * Author: Konstantin Khlebnikov <[email protected]> */ #include <linux/radix-tree.h> #include <linux/slab.h> #include <linux/errno.h> #include <time.h> #include "test.h" #define NSEC_PER_SEC 1000000000L static long long benchmark_iter(struct radix_tree_root *root, bool tagged) { volatile unsigned long sink = 0; struct radix_tree_iter iter; struct timespec start, finish; long long nsec; int l, loops = 1; void **slot; #ifdef BENCHMARK again: #endif clock_gettime(CLOCK_MONOTONIC, &start); for (l = 0; l < loops; l++) { if (tagged) { radix_tree_for_each_tagged(slot, root, &iter, 0, 0) sink ^= (unsigned long)slot; } else { radix_tree_for_each_slot(slot, root, &iter, 0) sink ^= (unsigned long)slot; } } clock_gettime(CLOCK_MONOTONIC, &finish); nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + (finish.tv_nsec - start.tv_nsec); #ifdef BENCHMARK if (loops == 1 && nsec * 5 < NSEC_PER_SEC) { loops = NSEC_PER_SEC / nsec / 4 + 1; goto again; } #endif nsec /= loops; return nsec; } static void benchmark_insert(struct radix_tree_root *root, unsigned long size, unsigned long step) { struct timespec start, finish; unsigned long index; long long nsec; clock_gettime(CLOCK_MONOTONIC, &start); for (index = 0 ; index < size ; index += step) item_insert(root, index); clock_gettime(CLOCK_MONOTONIC, &finish); nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + (finish.tv_nsec - start.tv_nsec); printv(2, "Size: %8ld, step: %8ld, insertion: %15lld ns\n", size, step, nsec); } static void benchmark_tagging(struct radix_tree_root *root, unsigned long size, unsigned long step) { struct timespec start, finish; unsigned long index; long long nsec; clock_gettime(CLOCK_MONOTONIC, &start); for (index = 0 ; index < size ; index += step) radix_tree_tag_set(root, index, 0); clock_gettime(CLOCK_MONOTONIC, &finish); nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + (finish.tv_nsec - start.tv_nsec); printv(2, "Size: %8ld, step: %8ld, tagging: %17lld ns\n", size, step, nsec); } static void benchmark_delete(struct radix_tree_root *root, unsigned long size, unsigned long step) { struct timespec start, finish; unsigned long index; long long nsec; clock_gettime(CLOCK_MONOTONIC, &start); for (index = 0 ; index < size ; index += step) item_delete(root, index); clock_gettime(CLOCK_MONOTONIC, &finish); nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + (finish.tv_nsec - start.tv_nsec); printv(2, "Size: %8ld, step: %8ld, deletion: %16lld ns\n", size, step, nsec); } static void benchmark_size(unsigned long size, unsigned long step) { RADIX_TREE(tree, GFP_KERNEL); long long normal, tagged; benchmark_insert(&tree, size, step); benchmark_tagging(&tree, size, step); tagged = benchmark_iter(&tree, true); normal = benchmark_iter(&tree, false); printv(2, "Size: %8ld, step: %8ld, tagged iteration: %8lld ns\n", size, step, tagged); printv(2, "Size: %8ld, step: %8ld, normal iteration: %8lld ns\n", size, step, normal); benchmark_delete(&tree, size, step); item_kill_tree(&tree); rcu_barrier(); } void benchmark(void) { unsigned long size[] = {1 << 10, 1 << 20, 0}; unsigned long step[] = {1, 2, 7, 15, 63, 64, 65, 128, 256, 512, 12345, 0}; int c, s; printv(1, "starting benchmarks\n"); printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); for (c = 0; size[c]; c++) for (s = 0; step[s]; s++) benchmark_size(size[c], step[s]); }
/* SPDX-License-Identifier: GPL-2.0-or-later * * Test handler for the s390x DIAGNOSE 0x0318 instruction. * * Copyright (C) 2020, IBM */ #ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER #define SELFTEST_KVM_DIAG318_TEST_HANDLER uint64_t get_diag318_info(void); #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 IBM Corporation. */ #include "ops.h" #include "stdio.h" #include "io.h" #include <libfdt.h> #include "../include/asm/opal-api.h" /* Global OPAL struct used by opal-call.S */ struct opal { u64 base; u64 entry; } opal; static u32 opal_con_id; /* see opal-wrappers.S */ int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); int64_t opal_console_flush(uint64_t term_number); int64_t opal_poll_events(uint64_t *outstanding_event_mask); void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); static int opal_con_open(void) { /* * When OPAL loads the boot kernel it stashes the OPAL base and entry * address in r8 and r9 so the kernel can use the OPAL console * before unflattening the devicetree. While executing the wrapper will * probably trash r8 and r9 so this kentry hook restores them before * entering the decompressed kernel. */ platform_ops.kentry = opal_kentry; return 0; } static void opal_con_putc(unsigned char c) { int64_t rc; uint64_t olen, len; do { rc = opal_console_write_buffer_space(opal_con_id, &olen); len = be64_to_cpu(olen); if (rc) return; opal_poll_events(NULL); } while (len < 1); olen = cpu_to_be64(1); opal_console_write(opal_con_id, &olen, &c); } static void opal_con_close(void) { opal_console_flush(opal_con_id); } static void opal_init(void) { void *opal_node; opal_node = finddevice("/ibm,opal"); if (!opal_node) return; if (getprop(opal_node, "opal-base-address", &opal.base, sizeof(u64)) < 0) return; opal.base = be64_to_cpu(opal.base); if (getprop(opal_node, "opal-entry-address", &opal.entry, sizeof(u64)) < 0) return; opal.entry = be64_to_cpu(opal.entry); } int opal_console_init(void *devp, struct serial_console_data *scdp) { opal_init(); if (devp) { int n = getprop(devp, "reg", &opal_con_id, sizeof(u32)); if (n != sizeof(u32)) return -1; opal_con_id = be32_to_cpu(opal_con_id); } else opal_con_id = 0; scdp->open = opal_con_open; scdp->putc = opal_con_putc; scdp->close = opal_con_close; return 0; }
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2018 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ #define ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ /* ***************************************** * SIF_RTR_CTRL_5 (Prototype: RTR_CTRL) ***************************************** */ #define mmSIF_RTR_CTRL_5_PERM_SEL 0x356108 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_0 0x356114 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_1 0x356118 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_2 0x35611C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_3 0x356120 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_4 0x356124 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_5 0x356128 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_6 0x35612C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_7 0x356130 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_8 0x356134 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_9 0x356138 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_10 0x35613C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_11 0x356140 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_12 0x356144 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_13 0x356148 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_14 0x35614C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_15 0x356150 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_16 0x356154 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_17 0x356158 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_18 0x35615C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_19 0x356160 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_20 0x356164 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_21 0x356168 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_22 0x35616C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_23 0x356170 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_24 0x356174 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_25 0x356178 #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_26 0x35617C #define mmSIF_RTR_CTRL_5_HBM_POLY_H3_27 0x356180 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x356184 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x356188 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x35618C #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x356190 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x356194 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x356198 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x35619C #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3561A0 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3561A4 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3561A8 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3561AC #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3561B0 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3561B4 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3561B8 #define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3561BC #define mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x35626C #define mmSIF_RTR_CTRL_5_RL_HBM_EN 0x356274 #define mmSIF_RTR_CTRL_5_RL_HBM_SAT 0x356278 #define mmSIF_RTR_CTRL_5_RL_HBM_RST 0x35627C #define mmSIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x356280 #define mmSIF_RTR_CTRL_5_SCRAM_HBM_EN 0x356284 #define mmSIF_RTR_CTRL_5_RL_PCI_EN 0x356288 #define mmSIF_RTR_CTRL_5_RL_PCI_SAT 0x35628C #define mmSIF_RTR_CTRL_5_RL_PCI_RST 0x356290 #define mmSIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x356294 #define mmSIF_RTR_CTRL_5_RL_SRAM_EN 0x35629C #define mmSIF_RTR_CTRL_5_RL_SRAM_SAT 0x3562A0 #define mmSIF_RTR_CTRL_5_RL_SRAM_RST 0x3562A4 #define mmSIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3562AC #define mmSIF_RTR_CTRL_5_RL_SRAM_RED 0x3562B4 #define mmSIF_RTR_CTRL_5_E2E_HBM_EN 0x3562EC #define mmSIF_RTR_CTRL_5_E2E_PCI_EN 0x3562F0 #define mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3562F4 #define mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3562F8 #define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x356404 #define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x356408 #define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x35640C #define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x356410 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x356414 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x356418 #define mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x35641C #define mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x356420 #define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x356424 #define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x356428 #define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x35642C #define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x356430 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x356434 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x356438 #define mmSIF_RTR_CTRL_5_NL_HBM_SEL_0 0x356450 #define mmSIF_RTR_CTRL_5_NL_HBM_SEL_1 0x356454 #define mmSIF_RTR_CTRL_5_NON_LIN_EN 0x356480 #define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x356500 #define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x356504 #define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x356508 #define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x35650C #define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x356510 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x356514 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x356520 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x356524 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x356528 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x35652C #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x356530 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x356534 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x356538 #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x35653C #define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x356540 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x356550 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x356554 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x356558 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x35655C #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x356560 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x356564 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x356568 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x35656C #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x356570 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x356574 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x356578 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x35657C #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x356580 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x356584 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x356588 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x35658C #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x356590 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x356594 #define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x356598 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3565E4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3565E8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3565EC #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3565F0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3565F4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3565F8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3565FC #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x356600 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x356604 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x356608 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x35660C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x356610 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x356614 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x356618 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x35661C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x356620 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x356624 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x356628 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x35662C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x356630 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x356634 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x356638 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x35663C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x356640 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x356644 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x356648 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x35664C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x356650 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x356654 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x356658 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x35665C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x356660 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x356664 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x356668 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x35666C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x356670 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x356674 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x356678 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x35667C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x356680 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x356684 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x356688 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x35668C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x356690 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x356694 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x356698 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x35669C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3566A0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3566A4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3566A8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3566AC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3566B0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3566B4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3566B8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3566BC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3566C0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3566C4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3566C8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3566CC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3566D0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3566D4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3566D8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3566DC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3566E0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3566E4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3566E8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3566EC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3566F0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3566F4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3566F8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3566FC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x356700 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x356704 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x356708 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x35670C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x356710 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x356714 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x356718 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x35671C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x356720 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x356724 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x356728 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x35672C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x356730 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x356734 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x356738 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x35673C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x356740 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x356744 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x356748 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x35674C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x356750 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x356754 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x356758 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x35675C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x356760 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x356764 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x356768 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x35676C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x356770 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x356774 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x356778 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x35677C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x356780 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x356784 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x356788 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x35678C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x356790 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x356794 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x356798 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x35679C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3567A0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3567A4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3567A8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3567AC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3567B0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3567B4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3567B8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3567BC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3567C0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3567C4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3567C8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3567CC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3567D0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3567D4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3567D8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3567DC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3567E0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x356824 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x356828 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x35682C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x356830 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x356834 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x356838 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x35683C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x356840 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x356844 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x356848 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x35684C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x356850 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x356854 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x356858 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x35685C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x356860 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x356864 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x356868 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x35686C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x356870 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x356874 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x356878 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x35687C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x356880 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x356884 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x356888 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x35688C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x356890 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x356894 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x356898 #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x35689C #define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3568A0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3568A4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3568A8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3568AC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3568B0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3568B4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3568B8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3568BC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3568C0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3568C4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3568C8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3568CC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3568D0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3568D4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3568D8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3568DC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3568E0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3568E4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3568E8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3568EC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3568F0 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3568F4 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3568F8 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3568FC #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x356900 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x356904 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x356908 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x35690C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x356910 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x356914 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x356918 #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x35691C #define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x356920 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x356924 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x356928 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x35692C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x356930 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x356934 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x356938 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x35693C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x356940 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x356944 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x356948 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x35694C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x356950 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x356954 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x356958 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x35695C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x356960 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x356964 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x356968 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x35696C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x356970 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x356974 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x356978 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x35697C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x356980 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x356984 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x356988 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x35698C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x356990 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x356994 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x356998 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x35699C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3569A0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3569A4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3569A8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3569AC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3569B0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3569B4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3569B8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3569BC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3569C0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3569C4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3569C8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3569CC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3569D0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3569D4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3569D8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3569DC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3569E0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3569E4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3569E8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3569EC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3569F0 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3569F4 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3569F8 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3569FC #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x356A00 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x356A04 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x356A08 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x356A0C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x356A10 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x356A14 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x356A18 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x356A1C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x356A20 #define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x356A64 #define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x356A68 #define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x356A6C #define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x356A70 #define mmSIF_RTR_CTRL_5_RGL_CFG 0x356B64 #define mmSIF_RTR_CTRL_5_RGL_SHIFT 0x356B68 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x356B6C #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x356B70 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x356B74 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x356B78 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x356B7C #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x356B80 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x356B84 #define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x356B88 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_0 0x356BAC #define mmSIF_RTR_CTRL_5_RGL_TOKEN_1 0x356BB0 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_2 0x356BB4 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_3 0x356BB8 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_4 0x356BBC #define mmSIF_RTR_CTRL_5_RGL_TOKEN_5 0x356BC0 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_6 0x356BC4 #define mmSIF_RTR_CTRL_5_RGL_TOKEN_7 0x356BC8 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_0 0x356BEC #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_1 0x356BF0 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_2 0x356BF4 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_3 0x356BF8 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_4 0x356BFC #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_5 0x356C00 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_6 0x356C04 #define mmSIF_RTR_CTRL_5_RGL_BANK_ID_7 0x356C08 #define mmSIF_RTR_CTRL_5_RGL_WDT 0x356C2C #define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x356C30 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x356C34 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x356C38 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x356C3C #define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x356C40 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x356C44 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x356C48 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x356C4C #define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x356C50 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x356C54 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x356C58 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x356C5C #define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x356C60 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x356C64 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x356C68 #define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x356C6C #define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x356C70 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x356C74 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x356C78 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x356C7C #define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x356C80 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x356C84 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x356C88 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x356C8C #define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x356C90 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x356C94 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x356C98 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x356C9C #define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x356CA0 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x356CA4 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x356CA8 #define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x356CAC #define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x356CB0 #define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x356CB4 #define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x356CB8 #define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x356CBC #endif /* ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ */
// SPDX-License-Identifier: MIT // // Copyright 2024 Advanced Micro Devices, Inc. #ifndef _DML21_UTILS_H_ #define _DML21_UTILS_H_ struct dc_state; struct dc_plane_state; struct pipe_ctx; struct dml2_context; struct dml2_display_rq_regs; struct dml2_display_dlg_regs; struct dml2_display_ttu_regs; int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id); bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id); void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs, struct dml2_display_dlg_regs *disp_dlg_regs, struct dml2_display_ttu_regs *disp_ttu_regs, struct pipe_ctx *out); void dml21_populate_mall_allocation_size(struct dc_state *context, struct dml2_context *in_ctx, struct dml2_per_plane_programming *pln_prog, struct pipe_ctx *dc_pipe); bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx); void find_valid_pipe_idx_for_stream_index(const struct dml2_context *dml_ctx, unsigned int *dml_pipe_idx, unsigned int stream_index); void find_pipe_regs_idx(const struct dml2_context *dml_ctx, struct pipe_ctx *pipe, unsigned int *pipe_regs_idx); int dml21_find_dc_pipes_for_plane(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__], struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__], int dml_plane_idx); void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_per_stream_programming *stream_prog); void dml21_handle_phantom_streams_planes(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); unsigned int dml21_get_dc_plane_idx_from_plane_id(unsigned int plane_id); void dml21_build_fams2_programming(const struct dc *dc, struct dc_state *context, struct dml2_context *dml_ctx); bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format); #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * DMA driver for AMD Queue-based DMA Subsystem * * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/dmaengine.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/dma-map-ops.h> #include <linux/platform_device.h> #include <linux/platform_data/amd_qdma.h> #include <linux/regmap.h> #include "qdma.h" #define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H") #define QDMA_REG_OFF(d, r) ((d)->roffs[r].off) /* MMIO regmap config for all QDMA registers */ static const struct regmap_config qdma_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan) { return container_of(chan, struct qdma_queue, vchan.chan); } static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc) { return container_of(vdesc, struct qdma_mm_vdesc, vdesc); } static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev) { u32 idx; idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx; qdev->qintr_ring_idx %= qdev->qintr_ring_num; return idx; } static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data, enum qdma_reg_fields field) { const struct qdma_reg_field *f = &qdev->rfields[field]; u16 low_pos, hi_pos, low_bit, hi_bit; u64 value = 0, mask; low_pos = f->lsb / BITS_PER_TYPE(*data); hi_pos = f->msb / BITS_PER_TYPE(*data); if (low_pos == hi_pos) { low_bit = f->lsb % BITS_PER_TYPE(*data); hi_bit = f->msb % BITS_PER_TYPE(*data); mask = GENMASK(hi_bit, low_bit); value = (data[low_pos] & mask) >> low_bit; } else if (hi_pos == low_pos + 1) { low_bit = f->lsb % BITS_PER_TYPE(*data); hi_bit = low_bit + (f->msb - f->lsb); value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) | data[low_pos]; mask = GENMASK_ULL(hi_bit, low_bit); value = (value & mask) >> low_bit; } else { hi_bit = f->msb % BITS_PER_TYPE(*data); mask = GENMASK(hi_bit, 0); value = data[hi_pos] & mask; low_bit = f->msb - f->lsb - hi_bit; value <<= low_bit; low_bit -= 32; value |= (u64)data[hi_pos - 1] << low_bit; mask = GENMASK(31, 32 - low_bit); value |= (data[hi_pos - 2] & mask) >> low_bit; } return value; } static void qdma_set_field(const struct qdma_device *qdev, u32 *data, enum qdma_reg_fields field, u64 value) { const struct qdma_reg_field *f = &qdev->rfields[field]; u16 low_pos, hi_pos, low_bit; low_pos = f->lsb / BITS_PER_TYPE(*data); hi_pos = f->msb / BITS_PER_TYPE(*data); low_bit = f->lsb % BITS_PER_TYPE(*data); data[low_pos++] |= value << low_bit; if (low_pos <= hi_pos) data[low_pos++] |= (u32)(value >> (32 - low_bit)); if (low_pos <= hi_pos) data[low_pos] |= (u32)(value >> (64 - low_bit)); } static inline int qdma_reg_write(const struct qdma_device *qdev, const u32 *data, enum qdma_regs reg) { const struct qdma_reg *r = &qdev->roffs[reg]; int ret; if (r->count > 1) ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count); else ret = regmap_write(qdev->regmap, r->off, *data); return ret; } static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data, enum qdma_regs reg) { const struct qdma_reg *r = &qdev->roffs[reg]; int ret; if (r->count > 1) ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count); else ret = regmap_read(qdev->regmap, r->off, data); return ret; } static int qdma_context_cmd_execute(const struct qdma_device *qdev, enum qdma_ctxt_type type, enum qdma_ctxt_cmd cmd, u16 index) { u32 value = 0; int ret; qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index); qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd); qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type); ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD); if (ret) return ret; ret = regmap_read_poll_timeout(qdev->regmap, QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD), value, !qdma_get_field(qdev, &value, QDMA_REGF_CMD_BUSY), QDMA_POLL_INTRVL_US, QDMA_POLL_TIMEOUT_US); if (ret) { qdma_err(qdev, "Context command execution timed out"); return ret; } return 0; } static int qdma_context_write_data(const struct qdma_device *qdev, const u32 *data) { u32 mask[QDMA_CTXT_REGMAP_LEN]; int ret; memset(mask, ~0, sizeof(mask)); ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK); if (ret) return ret; ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA); if (ret) return ret; return 0; } static void qdma_prep_sw_desc_context(const struct qdma_device *qdev, const struct qdma_ctxt_sw_desc *ctxt, u32 *data) { memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base); qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec); qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid); qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B); qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID); qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM); qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1); qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1); qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1); qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1); qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1); qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1); qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1); qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1); } static void qdma_prep_intr_context(const struct qdma_device *qdev, const struct qdma_ctxt_intr *ctxt, u32 *data) { memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base); qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec); qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size); qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid); qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color); qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid); } static void qdma_prep_fmap_context(const struct qdma_device *qdev, const struct qdma_ctxt_fmap *ctxt, u32 *data) { memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data)); qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase); qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax); } /* * Program the indirect context register space * * Once the queue is enabled, context is dynamically updated by hardware. Any * modification of the context through this API when the queue is enabled can * result in unexpected behavior. Reading the context when the queue is enabled * is not recommended as it can result in reduced performance. */ static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type, enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt) { int ret; mutex_lock(&qdev->ctxt_lock); if (cmd == QDMA_CTXT_WRITE) { ret = qdma_context_write_data(qdev, ctxt); if (ret) goto failed; } ret = qdma_context_cmd_execute(qdev, type, cmd, index); if (ret) goto failed; if (cmd == QDMA_CTXT_READ) { ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA); if (ret) goto failed; } failed: mutex_unlock(&qdev->ctxt_lock); return ret; } static int qdma_check_queue_status(struct qdma_device *qdev, enum dma_transfer_direction dir, u16 qid) { u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0}; enum qdma_ctxt_type type; int ret; if (dir == DMA_MEM_TO_DEV) type = QDMA_CTXT_DESC_SW_H2C; else type = QDMA_CTXT_DESC_SW_C2H; ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data); if (ret) return ret; status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE); if (status) { qdma_err(qdev, "queue %d already in use", qid); return -EBUSY; } return 0; } static int qdma_clear_queue_context(const struct qdma_queue *queue) { enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C, QDMA_CTXT_DESC_HW_H2C, QDMA_CTXT_DESC_CR_H2C, QDMA_CTXT_PFTCH, }; enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H, QDMA_CTXT_DESC_HW_C2H, QDMA_CTXT_DESC_CR_C2H, QDMA_CTXT_PFTCH, }; struct qdma_device *qdev = queue->qdev; enum qdma_ctxt_type *type; int ret, num, i; if (queue->dir == DMA_MEM_TO_DEV) { type = h2c_types; num = ARRAY_SIZE(h2c_types); } else { type = c2h_types; num = ARRAY_SIZE(c2h_types); } for (i = 0; i < num; i++) { ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR, queue->qid, NULL); if (ret) { qdma_err(qdev, "Failed to clear ctxt %d", type[i]); return ret; } } return 0; } static int qdma_setup_fmap_context(struct qdma_device *qdev) { u32 ctxt[QDMA_CTXT_REGMAP_LEN]; struct qdma_ctxt_fmap fmap; int ret; ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR, qdev->fid, NULL); if (ret) { qdma_err(qdev, "Failed clearing context"); return ret; } fmap.qbase = 0; fmap.qmax = qdev->chan_num * 2; qdma_prep_fmap_context(qdev, &fmap, ctxt); ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE, qdev->fid, ctxt); if (ret) qdma_err(qdev, "Failed setup fmap, ret %d", ret); return ret; } static int qdma_setup_queue_context(struct qdma_device *qdev, const struct qdma_ctxt_sw_desc *sw_desc, enum dma_transfer_direction dir, u16 qid) { u32 ctxt[QDMA_CTXT_REGMAP_LEN]; enum qdma_ctxt_type type; int ret; if (dir == DMA_MEM_TO_DEV) type = QDMA_CTXT_DESC_SW_H2C; else type = QDMA_CTXT_DESC_SW_C2H; qdma_prep_sw_desc_context(qdev, sw_desc, ctxt); /* Setup SW descriptor context */ ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt); if (ret) qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid); return ret; } /* * Enable or disable memory-mapped DMA engines * 1: enable, 0: disable */ static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl) { int ret; ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL); ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL); return ret; } static int qdma_get_hw_info(struct qdma_device *qdev) { struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev); u32 value = 0; int ret; ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT); if (ret) return ret; value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1; if (pdata->max_mm_channels * 2 > value) { qdma_err(qdev, "not enough hw queues %d", value); return -EINVAL; } qdev->chan_num = pdata->max_mm_channels; ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID); if (ret) return ret; qdma_info(qdev, "max channel %d, function id %d", qdev->chan_num, qdev->fid); return 0; } static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx) { struct qdma_device *qdev = queue->qdev; return regmap_write(qdev->regmap, queue->pidx_reg, pidx | QDMA_QUEUE_ARM_BIT); } static inline int qdma_update_cidx(const struct qdma_queue *queue, u16 ridx, u16 cidx) { struct qdma_device *qdev = queue->qdev; return regmap_write(qdev->regmap, queue->cidx_reg, ((u32)ridx << 16) | cidx); } /** * qdma_free_vdesc - Free descriptor * @vdesc: Virtual DMA descriptor */ static void qdma_free_vdesc(struct virt_dma_desc *vdesc) { struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc); kfree(vd); } static int qdma_alloc_queues(struct qdma_device *qdev, enum dma_transfer_direction dir) { struct qdma_queue *q, **queues; u32 i, pidx_base; int ret; if (dir == DMA_MEM_TO_DEV) { queues = &qdev->h2c_queues; pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX); } else { queues = &qdev->c2h_queues; pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX); } *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q), GFP_KERNEL); if (!*queues) return -ENOMEM; for (i = 0; i < qdev->chan_num; i++) { ret = qdma_check_queue_status(qdev, dir, i); if (ret) return ret; q = &(*queues)[i]; q->ring_size = QDMA_DEFAULT_RING_SIZE; q->idx_mask = q->ring_size - 2; q->qdev = qdev; q->dir = dir; q->qid = i; q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE; q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) + i * QDMA_DMAP_REG_STRIDE; q->vchan.desc_free = qdma_free_vdesc; vchan_init(&q->vchan, &qdev->dma_dev); } return 0; } static int qdma_device_verify(struct qdma_device *qdev) { u32 value; int ret; ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value); if (ret) return ret; value = FIELD_GET(QDMA_IDENTIFIER_MASK, value); if (value != QDMA_IDENTIFIER) { qdma_err(qdev, "Invalid identifier"); return -ENODEV; } qdev->rfields = qdma_regfs_default; qdev->roffs = qdma_regos_default; return 0; } static int qdma_device_setup(struct qdma_device *qdev) { struct device *dev = &qdev->pdev->dev; u32 ring_sz = QDMA_DEFAULT_RING_SIZE; int ret = 0; while (dev && get_dma_ops(dev)) dev = dev->parent; if (!dev) { qdma_err(qdev, "dma device not found"); return -EINVAL; } set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev)); ret = qdma_setup_fmap_context(qdev); if (ret) { qdma_err(qdev, "Failed setup fmap context"); return ret; } /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */ ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE); if (ret) { qdma_err(qdev, "Failed to setup ring %d of size %ld", QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE); return ret; } /* Enable memory-mapped DMA engine in both directions */ ret = qdma_sgdma_control(qdev, 1); if (ret) { qdma_err(qdev, "Failed to SGDMA with error %d", ret); return ret; } ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV); if (ret) { qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret); return ret; } ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM); if (ret) { qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret); return ret; } return 0; } /** * qdma_free_queue_resources() - Free queue resources * @chan: DMA channel */ static void qdma_free_queue_resources(struct dma_chan *chan) { struct qdma_queue *queue = to_qdma_queue(chan); struct qdma_device *qdev = queue->qdev; struct device *dev = qdev->dma_dev.dev; qdma_clear_queue_context(queue); vchan_free_chan_resources(&queue->vchan); dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE, queue->desc_base, queue->dma_desc_base); } /** * qdma_alloc_queue_resources() - Allocate queue resources * @chan: DMA channel */ static int qdma_alloc_queue_resources(struct dma_chan *chan) { struct qdma_queue *queue = to_qdma_queue(chan); struct qdma_device *qdev = queue->qdev; struct qdma_ctxt_sw_desc desc; size_t size; int ret; ret = qdma_clear_queue_context(queue); if (ret) return ret; size = queue->ring_size * QDMA_MM_DESC_SIZE; queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size, &queue->dma_desc_base, GFP_KERNEL); if (!queue->desc_base) { qdma_err(qdev, "Failed to allocate descriptor ring"); return -ENOMEM; } /* Setup SW descriptor queue context for DMA memory map */ desc.vec = qdma_get_intr_ring_idx(qdev); desc.desc_base = queue->dma_desc_base; ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid); if (ret) { qdma_err(qdev, "Failed to setup SW desc ctxt for %s", chan->name); dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base, queue->dma_desc_base); return ret; } queue->pidx = 0; queue->cidx = 0; return 0; } static bool qdma_filter_fn(struct dma_chan *chan, void *param) { struct qdma_queue *queue = to_qdma_queue(chan); struct qdma_queue_info *info = param; return info->dir == queue->dir; } static int qdma_xfer_start(struct qdma_queue *queue) { struct qdma_device *qdev = queue->qdev; int ret; if (!vchan_next_desc(&queue->vchan)) return 0; qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d", queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid); ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx); if (ret) { qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d", queue->pidx, CHAN_STR(queue), queue->qid); } return ret; } static void qdma_issue_pending(struct dma_chan *chan) { struct qdma_queue *queue = to_qdma_queue(chan); unsigned long flags; spin_lock_irqsave(&queue->vchan.lock, flags); if (vchan_issue_pending(&queue->vchan)) { if (queue->submitted_vdesc) { queue->issued_vdesc = queue->submitted_vdesc; queue->submitted_vdesc = NULL; } qdma_xfer_start(queue); } spin_unlock_irqrestore(&queue->vchan.lock, flags); } static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q) { struct qdma_mm_desc *desc; if (((q->pidx + 1) & q->idx_mask) == q->cidx) return NULL; desc = q->desc_base + q->pidx; q->pidx = (q->pidx + 1) & q->idx_mask; return desc; } static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc) { struct qdma_mm_desc *desc; struct scatterlist *sg; u64 addr, *src, *dst; u32 rest, len; int ret = 0; u32 i; if (!vdesc->sg_len) return 0; if (q->dir == DMA_MEM_TO_DEV) { dst = &vdesc->dev_addr; src = &addr; } else { dst = &addr; src = &vdesc->dev_addr; } for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) { addr = sg_dma_address(sg) + vdesc->sg_off; rest = sg_dma_len(sg) - vdesc->sg_off; while (rest) { len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN); desc = qdma_get_desc(q); if (!desc) { ret = -EBUSY; goto out; } desc->src_addr = cpu_to_le64(*src); desc->dst_addr = cpu_to_le64(*dst); desc->len = cpu_to_le32(len); vdesc->dev_addr += len; vdesc->sg_off += len; vdesc->pending_descs++; addr += len; rest -= len; } vdesc->sg_off = 0; } out: vdesc->sg_len -= i; vdesc->pidx = q->pidx; return ret; } static void qdma_fill_pending_vdesc(struct qdma_queue *q) { struct virt_dma_chan *vc = &q->vchan; struct qdma_mm_vdesc *vdesc = NULL; struct virt_dma_desc *vd; int ret; if (!list_empty(&vc->desc_issued)) { vd = &q->issued_vdesc->vdesc; list_for_each_entry_from(vd, &vc->desc_issued, node) { vdesc = to_qdma_vdesc(vd); ret = qdma_hw_enqueue(q, vdesc); if (ret) { q->issued_vdesc = vdesc; return; } } q->issued_vdesc = vdesc; } if (list_empty(&vc->desc_submitted)) return; if (q->submitted_vdesc) vd = &q->submitted_vdesc->vdesc; else vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node); list_for_each_entry_from(vd, &vc->desc_submitted, node) { vdesc = to_qdma_vdesc(vd); ret = qdma_hw_enqueue(q, vdesc); if (ret) break; } q->submitted_vdesc = vdesc; } static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct virt_dma_chan *vc = to_virt_chan(tx->chan); struct qdma_queue *q = to_qdma_queue(&vc->chan); struct virt_dma_desc *vd; unsigned long flags; dma_cookie_t cookie; vd = container_of(tx, struct virt_dma_desc, tx); spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); list_move_tail(&vd->node, &vc->desc_submitted); qdma_fill_pending_vdesc(q); spin_unlock_irqrestore(&vc->lock, flags); return cookie; } static struct dma_async_tx_descriptor * qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags, void *context) { struct qdma_queue *q = to_qdma_queue(chan); struct dma_async_tx_descriptor *tx; struct qdma_mm_vdesc *vdesc; vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT); if (!vdesc) return NULL; vdesc->sgl = sgl; vdesc->sg_len = sg_len; if (dir == DMA_MEM_TO_DEV) vdesc->dev_addr = q->cfg.dst_addr; else vdesc->dev_addr = q->cfg.src_addr; tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags); tx->tx_submit = qdma_tx_submit; return tx; } static int qdma_device_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct qdma_queue *q = to_qdma_queue(chan); memcpy(&q->cfg, cfg, sizeof(*cfg)); return 0; } static int qdma_arm_err_intr(const struct qdma_device *qdev) { u32 value = 0; qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid); qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx); qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1); return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT); } static irqreturn_t qdma_error_isr(int irq, void *data) { struct qdma_device *qdev = data; u32 err_stat = 0; int ret; ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT); if (ret) { qdma_err(qdev, "read error state failed, ret %d", ret); goto out; } qdma_err(qdev, "global error %d", err_stat); ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT); if (ret) qdma_err(qdev, "clear error state failed, ret %d", ret); out: qdma_arm_err_intr(qdev); return IRQ_HANDLED; } static irqreturn_t qdma_queue_isr(int irq, void *data) { struct qdma_intr_ring *intr = data; struct qdma_queue *q = NULL; struct qdma_device *qdev; u32 index, comp_desc; u64 intr_ent; u8 color; int ret; u16 qid; qdev = intr->qdev; index = intr->cidx; while (1) { struct virt_dma_desc *vd; struct qdma_mm_vdesc *vdesc; unsigned long flags; u32 cidx; intr_ent = le64_to_cpu(intr->base[index]); color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent); if (color != intr->color) break; qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent); if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent)) q = qdev->c2h_queues; else q = qdev->h2c_queues; q += qid; cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent); spin_lock_irqsave(&q->vchan.lock, flags); comp_desc = (cidx - q->cidx) & q->idx_mask; vd = vchan_next_desc(&q->vchan); if (!vd) goto skip; vdesc = to_qdma_vdesc(vd); while (comp_desc > vdesc->pending_descs) { list_del(&vd->node); vchan_cookie_complete(vd); comp_desc -= vdesc->pending_descs; vd = vchan_next_desc(&q->vchan); vdesc = to_qdma_vdesc(vd); } vdesc->pending_descs -= comp_desc; if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) { list_del(&vd->node); vchan_cookie_complete(vd); } q->cidx = cidx; qdma_fill_pending_vdesc(q); qdma_xfer_start(q); skip: spin_unlock_irqrestore(&q->vchan.lock, flags); /* * Wrap the index value and flip the expected color value if * interrupt aggregation PIDX has wrapped around. */ index++; index &= QDMA_INTR_RING_IDX_MASK; if (!index) intr->color = !intr->color; } /* * Update the software interrupt aggregation ring CIDX if a valid entry * was found. */ if (q) { qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index); /* * Record the last read index of status descriptor from the * interrupt aggregation ring. */ intr->cidx = index; ret = qdma_update_cidx(q, intr->ridx, index); if (ret) { qdma_err(qdev, "Failed to update IRQ CIDX"); return IRQ_NONE; } } return IRQ_HANDLED; } static int qdma_init_error_irq(struct qdma_device *qdev) { struct device *dev = &qdev->pdev->dev; int ret; u32 vec; vec = qdev->queue_irq_start - 1; ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr, IRQF_ONESHOT, "amd-qdma-error", qdev); if (ret) { qdma_err(qdev, "Failed to request error IRQ vector: %d", vec); return ret; } ret = qdma_arm_err_intr(qdev); if (ret) qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret); return ret; } static int qdmam_alloc_qintr_rings(struct qdma_device *qdev) { u32 ctxt[QDMA_CTXT_REGMAP_LEN]; struct device *dev = &qdev->pdev->dev; struct qdma_intr_ring *ring; struct qdma_ctxt_intr intr_ctxt; u32 vector; int ret, i; qdev->qintr_ring_num = qdev->queue_irq_num; qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num, sizeof(*qdev->qintr_rings), GFP_KERNEL); if (!qdev->qintr_rings) return -ENOMEM; vector = qdev->queue_irq_start; for (i = 0; i < qdev->qintr_ring_num; i++, vector++) { ring = &qdev->qintr_rings[i]; ring->qdev = qdev; ring->msix_id = qdev->err_irq_idx + i + 1; ring->ridx = i; ring->color = 1; ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE, &ring->dev_base, GFP_KERNEL); if (!ring->base) { qdma_err(qdev, "Failed to alloc intr ring %d", i); return -ENOMEM; } intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base); intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096; intr_ctxt.vec = ring->msix_id; intr_ctxt.valid = true; intr_ctxt.color = true; ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL, QDMA_CTXT_CLEAR, ring->ridx, NULL); if (ret) { qdma_err(qdev, "Failed clear intr ctx, ret %d", ret); return ret; } qdma_prep_intr_context(qdev, &intr_ctxt, ctxt); ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL, QDMA_CTXT_WRITE, ring->ridx, ctxt); if (ret) { qdma_err(qdev, "Failed setup intr ctx, ret %d", ret); return ret; } ret = devm_request_threaded_irq(dev, vector, NULL, qdma_queue_isr, IRQF_ONESHOT, "amd-qdma-queue", ring); if (ret) { qdma_err(qdev, "Failed to request irq %d", vector); return ret; } } return 0; } static int qdma_intr_init(struct qdma_device *qdev) { int ret; ret = qdma_init_error_irq(qdev); if (ret) { qdma_err(qdev, "Failed to init error IRQs, ret %d", ret); return ret; } ret = qdmam_alloc_qintr_rings(qdev); if (ret) { qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret); return ret; } return 0; } static void amd_qdma_remove(struct platform_device *pdev) { struct qdma_device *qdev = platform_get_drvdata(pdev); qdma_sgdma_control(qdev, 0); dma_async_device_unregister(&qdev->dma_dev); mutex_destroy(&qdev->ctxt_lock); } static int amd_qdma_probe(struct platform_device *pdev) { struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev); struct qdma_device *qdev; struct resource *res; void __iomem *regs; int ret; qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) return -ENOMEM; platform_set_drvdata(pdev, qdev); qdev->pdev = pdev; mutex_init(&qdev->ctxt_lock); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { qdma_err(qdev, "Failed to get IRQ resource"); ret = -ENODEV; goto failed; } qdev->err_irq_idx = pdata->irq_index; qdev->queue_irq_start = res->start + 1; qdev->queue_irq_num = resource_size(res) - 1; regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(regs)) { ret = PTR_ERR(regs); qdma_err(qdev, "Failed to map IO resource, err %d", ret); goto failed; } qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &qdma_regmap_config); if (IS_ERR(qdev->regmap)) { ret = PTR_ERR(qdev->regmap); qdma_err(qdev, "Regmap init failed, err %d", ret); goto failed; } ret = qdma_device_verify(qdev); if (ret) goto failed; ret = qdma_get_hw_info(qdev); if (ret) goto failed; INIT_LIST_HEAD(&qdev->dma_dev.channels); ret = qdma_device_setup(qdev); if (ret) goto failed; ret = qdma_intr_init(qdev); if (ret) { qdma_err(qdev, "Failed to initialize IRQs %d", ret); goto failed_disable_engine; } dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask); qdev->dma_dev.dev = &pdev->dev; qdev->dma_dev.filter.map = pdata->device_map; qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2; qdev->dma_dev.filter.fn = qdma_filter_fn; qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources; qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources; qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg; qdev->dma_dev.device_config = qdma_device_config; qdev->dma_dev.device_issue_pending = qdma_issue_pending; qdev->dma_dev.device_tx_status = dma_cookie_status; qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ret = dma_async_device_register(&qdev->dma_dev); if (ret) { qdma_err(qdev, "Failed to register AMD QDMA: %d", ret); goto failed_disable_engine; } return 0; failed_disable_engine: qdma_sgdma_control(qdev, 0); failed: mutex_destroy(&qdev->ctxt_lock); qdma_err(qdev, "Failed to probe AMD QDMA driver"); return ret; } static struct platform_driver amd_qdma_driver = { .driver = { .name = "amd-qdma", }, .probe = amd_qdma_probe, .remove = amd_qdma_remove, }; module_platform_driver(amd_qdma_driver); MODULE_DESCRIPTION("AMD QDMA driver"); MODULE_AUTHOR("XRT Team <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* exynos_drm_vidi.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * Author: Inki Dae <[email protected]> */ #ifndef _EXYNOS_DRM_VIDI_H_ #define _EXYNOS_DRM_VIDI_H_ #ifdef CONFIG_DRM_EXYNOS_VIDI int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file_priv); #else #define vidi_connection_ioctl NULL #endif #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * IBM TrackPoint PS/2 mouse driver * * Stephen Evanchik <[email protected]> */ #ifndef _TRACKPOINT_H #define _TRACKPOINT_H /* * These constants are from the TrackPoint System * Engineering documentation Version 4 from IBM Watson * research: * http://wwwcssrv.almaden.ibm.com/trackpoint/download.html */ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ /* * Valid first byte responses to the "Read Secondary ID" (0xE1) command. * 0x01 was the original IBM trackpoint, others implement very limited * subset of trackpoint features. */ #define TP_VARIANT_IBM 0x01 #define TP_VARIANT_ALPS 0x02 #define TP_VARIANT_ELAN 0x03 #define TP_VARIANT_NXP 0x04 #define TP_VARIANT_JYT_SYNAPTICS 0x05 #define TP_VARIANT_SYNAPTICS 0x06 /* * Commands */ #define TP_RECALIB 0x51 /* Recalibrate */ #define TP_POWER_DOWN 0x44 /* Can only be undone through HW reset */ #define TP_EXT_DEV 0x21 /* Determines if external device is connected (RO) */ #define TP_EXT_BTN 0x4B /* Read extended button status */ #define TP_POR 0x7F /* Execute Power on Reset */ #define TP_POR_RESULTS 0x25 /* Read Power on Self test results */ #define TP_DISABLE_EXT 0x40 /* Disable external pointing device */ #define TP_ENABLE_EXT 0x41 /* Enable external pointing device */ /* * Mode manipulation */ #define TP_SET_SOFT_TRANS 0x4E /* Set mode */ #define TP_CANCEL_SOFT_TRANS 0xB9 /* Cancel mode */ #define TP_SET_HARD_TRANS 0x45 /* Mode can only be set */ /* * Register oriented commands/properties */ #define TP_WRITE_MEM 0x81 #define TP_READ_MEM 0x80 /* Not used in this implementation */ /* * RAM Locations for properties */ #define TP_SENS 0x4A /* Sensitivity */ #define TP_MB 0x4C /* Read Middle Button Status (RO) */ #define TP_INERTIA 0x4D /* Negative Inertia */ #define TP_SPEED 0x60 /* Speed of TP Cursor */ #define TP_REACH 0x57 /* Backup for Z-axis press */ #define TP_DRAGHYS 0x58 /* Drag Hysteresis */ /* (how hard it is to drag */ /* with Z-axis pressed) */ #define TP_MINDRAG 0x59 /* Minimum amount of force needed */ /* to trigger dragging */ #define TP_THRESH 0x5C /* Minimum value for a Z-axis press */ #define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */ #define TP_Z_TIME 0x5E /* How sharp of a press */ #define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */ #define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */ /* must last (x*107ms) for drift */ /* correction to occur */ /* * Toggling Flag bits */ #define TP_TOGGLE 0x47 /* Toggle command */ #define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */ #define TP_MASK_MB 0x01 #define TP_TOGGLE_EXT_DEV 0x23 /* Disable external device */ #define TP_MASK_EXT_DEV 0x02 #define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */ #define TP_MASK_DRIFT 0x80 #define TP_TOGGLE_BURST 0x28 /* Burst Mode */ #define TP_MASK_BURST 0x80 #define TP_TOGGLE_PTSON 0x2C /* Press to Select */ #define TP_MASK_PTSON 0x01 #define TP_TOGGLE_HARD_TRANS 0x2C /* Alternate method to set Hard Transparency */ #define TP_MASK_HARD_TRANS 0x80 #define TP_TOGGLE_TWOHAND 0x2D /* Two handed */ #define TP_MASK_TWOHAND 0x01 #define TP_TOGGLE_STICKY_TWO 0x2D /* Sticky two handed */ #define TP_MASK_STICKY_TWO 0x04 #define TP_TOGGLE_SKIPBACK 0x2D /* Suppress movement after drag release */ #define TP_MASK_SKIPBACK 0x08 #define TP_TOGGLE_SOURCE_TAG 0x20 /* Bit 3 of the first packet will be set to to the origin of the packet (external or TP) */ #define TP_MASK_SOURCE_TAG 0x80 #define TP_TOGGLE_EXT_TAG 0x22 /* Bit 3 of the first packet coming from the external device will be forced to 1 */ #define TP_MASK_EXT_TAG 0x04 /* Power on Self Test Results */ #define TP_POR_SUCCESS 0x3B /* * Default power on values */ #define TP_DEF_SENS 0x80 #define TP_DEF_INERTIA 0x06 #define TP_DEF_SPEED 0x61 #define TP_DEF_REACH 0x0A #define TP_DEF_DRAGHYS 0xFF #define TP_DEF_MINDRAG 0x14 #define TP_DEF_THRESH 0x08 #define TP_DEF_UP_THRESH 0xFF #define TP_DEF_Z_TIME 0x26 #define TP_DEF_JENKS_CURV 0x87 #define TP_DEF_DRIFT_TIME 0x05 /* Toggles */ #define TP_DEF_MB 0x00 #define TP_DEF_PTSON 0x00 #define TP_DEF_SKIPBACK 0x00 #define TP_DEF_EXT_DEV 0x00 /* 0 means enabled */ #define TP_DEF_TWOHAND 0x00 #define TP_DEF_SOURCE_TAG 0x00 #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) struct trackpoint_data { u8 variant_id; u8 firmware_id; u8 sensitivity, speed, inertia, reach; u8 draghys, mindrag; u8 thresh, upthresh; u8 ztime, jenks; u8 drift_time; /* toggles */ bool press_to_select; bool skipback; bool ext_dev; }; int trackpoint_detect(struct psmouse *psmouse, bool set_properties); #endif /* _TRACKPOINT_H */
/* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #ifndef BNXT_XDP_H #define BNXT_XDP_H DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len, struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct xdp_buff *xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags); bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, u8 *data_ptr, unsigned int len, struct xdp_buff *xdp); void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp); struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, struct page_pool *pool, struct xdp_buff *xdp, struct rx_cmp_ext *rxcmp1); #endif
/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ /* * Copyright (c) 2023 Amlogic, Inc. All rights reserved. * Author: Huqiang Qin <[email protected]> */ #ifndef _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H #define _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H /* IRQID[11:0] - GPIOAO[11:0] */ #define IRQID_GPIOAO_0 0 #define IRQID_GPIOAO_1 1 #define IRQID_GPIOAO_2 2 #define IRQID_GPIOAO_3 3 #define IRQID_GPIOAO_4 4 #define IRQID_GPIOAO_5 5 #define IRQID_GPIOAO_6 6 #define IRQID_GPIOAO_7 7 #define IRQID_GPIOAO_8 8 #define IRQID_GPIOAO_9 9 #define IRQID_GPIOAO_10 10 #define IRQID_GPIOAO_11 11 /* IRQID[27:12] - GPIOZ[15:0] */ #define IRQID_GPIOZ_0 12 #define IRQID_GPIOZ_1 13 #define IRQID_GPIOZ_2 14 #define IRQID_GPIOZ_3 15 #define IRQID_GPIOZ_4 16 #define IRQID_GPIOZ_5 17 #define IRQID_GPIOZ_6 18 #define IRQID_GPIOZ_7 19 #define IRQID_GPIOZ_8 20 #define IRQID_GPIOZ_9 21 #define IRQID_GPIOZ_10 22 #define IRQID_GPIOZ_11 23 #define IRQID_GPIOZ_12 24 #define IRQID_GPIOZ_13 25 #define IRQID_GPIOZ_14 26 #define IRQID_GPIOZ_15 27 /* IRQID[36:28] - GPIOH[8:0] */ #define IRQID_GPIOH_0 28 #define IRQID_GPIOH_1 29 #define IRQID_GPIOH_2 30 #define IRQID_GPIOH_3 31 #define IRQID_GPIOH_4 32 #define IRQID_GPIOH_5 33 #define IRQID_GPIOH_6 34 #define IRQID_GPIOH_7 35 #define IRQID_GPIOH_8 36 /* IRQID[52:37] - BOOT[15:0] */ #define IRQID_BOOT_0 37 #define IRQID_BOOT_1 38 #define IRQID_BOOT_2 39 #define IRQID_BOOT_3 40 #define IRQID_BOOT_4 41 #define IRQID_BOOT_5 42 #define IRQID_BOOT_6 43 #define IRQID_BOOT_7 44 #define IRQID_BOOT_8 45 #define IRQID_BOOT_9 46 #define IRQID_BOOT_10 47 #define IRQID_BOOT_11 48 #define IRQID_BOOT_12 49 #define IRQID_BOOT_13 50 #define IRQID_BOOT_14 51 #define IRQID_BOOT_15 52 /* IRQID[60:53] - GPIOC[7:0] */ #define IRQID_GPIOC_0 53 #define IRQID_GPIOC_1 54 #define IRQID_GPIOC_2 55 #define IRQID_GPIOC_3 56 #define IRQID_GPIOC_4 57 #define IRQID_GPIOC_5 58 #define IRQID_GPIOC_6 59 #define IRQID_GPIOC_7 60 /* IRQID[76:61] - GPIOA[15:0] */ #define IRQID_GPIOA_0 61 #define IRQID_GPIOA_1 62 #define IRQID_GPIOA_2 63 #define IRQID_GPIOA_3 64 #define IRQID_GPIOA_4 65 #define IRQID_GPIOA_5 66 #define IRQID_GPIOA_6 67 #define IRQID_GPIOA_7 68 #define IRQID_GPIOA_8 69 #define IRQID_GPIOA_9 70 #define IRQID_GPIOA_10 71 #define IRQID_GPIOA_11 72 #define IRQID_GPIOA_12 73 #define IRQID_GPIOA_13 74 #define IRQID_GPIOA_14 75 #define IRQID_GPIOA_15 76 /* IRQID[96:77] - GPIOX[19:0] */ #define IRQID_GPIOX_0 77 #define IRQID_GPIOX_1 78 #define IRQID_GPIOX_2 79 #define IRQID_GPIOX_3 80 #define IRQID_GPIOX_4 81 #define IRQID_GPIOX_5 82 #define IRQID_GPIOX_6 83 #define IRQID_GPIOX_7 84 #define IRQID_GPIOX_8 85 #define IRQID_GPIOX_9 86 #define IRQID_GPIOX_10 87 #define IRQID_GPIOX_11 88 #define IRQID_GPIOX_12 89 #define IRQID_GPIOX_13 90 #define IRQID_GPIOX_14 91 #define IRQID_GPIOX_15 92 #define IRQID_GPIOX_16 93 #define IRQID_GPIOX_17 94 #define IRQID_GPIOX_18 95 #define IRQID_GPIOX_19 96 /* IRQID[99:97] - GPIOE[2:0] */ #define IRQID_GPIOE_0 97 #define IRQID_GPIOE_1 98 #define IRQID_GPIOE_2 99 #endif /* _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** AudioScience HPI driver Copyright (C) 1997-2011 AudioScience Inc. <[email protected]> */ /** \file hpi.h AudioScience Hardware Programming Interface (HPI) public API definition. The HPI is a low-level hardware abstraction layer to all AudioScience digital audio adapters (C) Copyright AudioScience Inc. 1998-2010 */ #ifndef _HPI_H_ #define _HPI_H_ #include <linux/types.h> #define HPI_BUILD_KERNEL_MODE /******************************************************************************/ /******** HPI API DEFINITIONS *****/ /******************************************************************************/ /*******************************************/ /** Audio format types \ingroup stream */ enum HPI_FORMATS { /** Used internally on adapter. */ HPI_FORMAT_MIXER_NATIVE = 0, /** 8-bit unsigned PCM. Windows equivalent is WAVE_FORMAT_PCM. */ HPI_FORMAT_PCM8_UNSIGNED = 1, /** 16-bit signed PCM. Windows equivalent is WAVE_FORMAT_PCM. */ HPI_FORMAT_PCM16_SIGNED = 2, /** MPEG-1 Layer-1. */ HPI_FORMAT_MPEG_L1 = 3, /** MPEG-1 Layer-2. Windows equivalent is WAVE_FORMAT_MPEG. The following table shows what combinations of mode and bitrate are possible: <table border=1 cellspacing=0 cellpadding=5> <tr> <td><p><b>Bitrate (kbs)</b></p> <td><p><b>Mono</b></p> <td><p><b>Stereo,<br>Joint Stereo or<br>Dual Channel</b></p> <tr><td>32<td>X<td>_ <tr><td>40<td>_<td>_ <tr><td>48<td>X<td>_ <tr><td>56<td>X<td>_ <tr><td>64<td>X<td>X <tr><td>80<td>X<td>_ <tr><td>96<td>X<td>X <tr><td>112<td>X<td>X <tr><td>128<td>X<td>X <tr><td>160<td>X<td>X <tr><td>192<td>X<td>X <tr><td>224<td>_<td>X <tr><td>256<td>-<td>X <tr><td>320<td>-<td>X <tr><td>384<td>_<td>X </table> */ HPI_FORMAT_MPEG_L2 = 4, /** MPEG-1 Layer-3. Windows equivalent is WAVE_FORMAT_MPEG. The following table shows what combinations of mode and bitrate are possible: <table border=1 cellspacing=0 cellpadding=5> <tr> <td><p><b>Bitrate (kbs)</b></p> <td><p><b>Mono<br>Stereo @ 8,<br>11.025 and<br>12kHz*</b></p> <td><p><b>Mono<br>Stereo @ 16,<br>22.050 and<br>24kHz*</b></p> <td><p><b>Mono<br>Stereo @ 32,<br>44.1 and<br>48kHz</b></p> <tr><td>16<td>X<td>X<td>_ <tr><td>24<td>X<td>X<td>_ <tr><td>32<td>X<td>X<td>X <tr><td>40<td>X<td>X<td>X <tr><td>48<td>X<td>X<td>X <tr><td>56<td>X<td>X<td>X <tr><td>64<td>X<td>X<td>X <tr><td>80<td>_<td>X<td>X <tr><td>96<td>_<td>X<td>X <tr><td>112<td>_<td>X<td>X <tr><td>128<td>_<td>X<td>X <tr><td>144<td>_<td>X<td>_ <tr><td>160<td>_<td>X<td>X <tr><td>192<td>_<td>_<td>X <tr><td>224<td>_<td>_<td>X <tr><td>256<td>-<td>_<td>X <tr><td>320<td>-<td>_<td>X </table> \b * Available on the ASI6000 series only */ HPI_FORMAT_MPEG_L3 = 5, /** Dolby AC-2. */ HPI_FORMAT_DOLBY_AC2 = 6, /** Dolbt AC-3. */ HPI_FORMAT_DOLBY_AC3 = 7, /** 16-bit PCM big-endian. */ HPI_FORMAT_PCM16_BIGENDIAN = 8, /** TAGIT-1 algorithm - hits. */ HPI_FORMAT_AA_TAGIT1_HITS = 9, /** TAGIT-1 algorithm - inserts. */ HPI_FORMAT_AA_TAGIT1_INSERTS = 10, /** 32-bit signed PCM. Windows equivalent is WAVE_FORMAT_PCM. Each sample is a 32bit word. The most significant 24 bits contain a 24-bit sample and the least significant 8 bits are set to 0. */ HPI_FORMAT_PCM32_SIGNED = 11, /** Raw bitstream - unknown format. */ HPI_FORMAT_RAW_BITSTREAM = 12, /** TAGIT-1 algorithm hits - extended. */ HPI_FORMAT_AA_TAGIT1_HITS_EX1 = 13, /** 32-bit PCM as an IEEE float. Windows equivalent is WAVE_FORMAT_IEEE_FLOAT. Each sample is a 32bit word in IEEE754 floating point format. The range is +1.0 to -1.0, which corresponds to digital fullscale. */ HPI_FORMAT_PCM32_FLOAT = 14, /** 24-bit PCM signed. Windows equivalent is WAVE_FORMAT_PCM. */ HPI_FORMAT_PCM24_SIGNED = 15, /** OEM format 1 - private. */ HPI_FORMAT_OEM1 = 16, /** OEM format 2 - private. */ HPI_FORMAT_OEM2 = 17, /** Undefined format. */ HPI_FORMAT_UNDEFINED = 0xffff }; /*******************************************/ /** Stream States \ingroup stream */ enum HPI_STREAM_STATES { /** State stopped - stream is stopped. */ HPI_STATE_STOPPED = 1, /** State playing - stream is playing audio. */ HPI_STATE_PLAYING = 2, /** State recording - stream is recording. */ HPI_STATE_RECORDING = 3, /** State drained - playing stream ran out of data to play. */ HPI_STATE_DRAINED = 4, /** State generate sine - to be implemented. */ HPI_STATE_SINEGEN = 5, /** State wait - used for inter-card sync to mean waiting for all cards to be ready. */ HPI_STATE_WAIT = 6 }; /*******************************************/ /** Source node types \ingroup mixer */ enum HPI_SOURCENODES { /** This define can be used instead of 0 to indicate that there is no valid source node. A control that exists on a destination node can be searched for using a source node value of either 0, or HPI_SOURCENODE_NONE */ HPI_SOURCENODE_NONE = 100, /** Out Stream (Play) node. */ HPI_SOURCENODE_OSTREAM = 101, /** Line in node - could be analog, AES/EBU or network. */ HPI_SOURCENODE_LINEIN = 102, HPI_SOURCENODE_AESEBU_IN = 103, /**< AES/EBU input node. */ HPI_SOURCENODE_TUNER = 104, /**< tuner node. */ HPI_SOURCENODE_RF = 105, /**< RF input node. */ HPI_SOURCENODE_CLOCK_SOURCE = 106, /**< clock source node. */ HPI_SOURCENODE_RAW_BITSTREAM = 107, /**< raw bitstream node. */ HPI_SOURCENODE_MICROPHONE = 108, /**< microphone node. */ /** Cobranet input node - Audio samples come from the Cobranet network and into the device. */ HPI_SOURCENODE_COBRANET = 109, HPI_SOURCENODE_ANALOG = 110, /**< analog input node. */ HPI_SOURCENODE_ADAPTER = 111, /**< adapter node. */ /** RTP stream input node - This node is a destination for packets of RTP audio samples from other devices. */ HPI_SOURCENODE_RTP_DESTINATION = 112, HPI_SOURCENODE_INTERNAL = 113, /**< node internal to the device. */ HPI_SOURCENODE_AVB = 114, /**< AVB input stream */ HPI_SOURCENODE_BLULINK = 115, /**< BLU-link input channel */ /* !!!Update this AND hpidebug.h if you add a new sourcenode type!!! */ HPI_SOURCENODE_LAST_INDEX = 115 /**< largest ID */ /* AX6 max sourcenode types = 15 */ }; /*******************************************/ /** Destination node types \ingroup mixer */ enum HPI_DESTNODES { /** This define can be used instead of 0 to indicate that there is no valid destination node. A control that exists on a source node can be searched for using a destination node value of either 0, or HPI_DESTNODE_NONE */ HPI_DESTNODE_NONE = 200, /** In Stream (Record) node. */ HPI_DESTNODE_ISTREAM = 201, HPI_DESTNODE_LINEOUT = 202, /**< line out node. */ HPI_DESTNODE_AESEBU_OUT = 203, /**< AES/EBU output node. */ HPI_DESTNODE_RF = 204, /**< RF output node. */ HPI_DESTNODE_SPEAKER = 205, /**< speaker output node. */ /** Cobranet output node - Audio samples from the device are sent out on the Cobranet network.*/ HPI_DESTNODE_COBRANET = 206, HPI_DESTNODE_ANALOG = 207, /**< analog output node. */ /** RTP stream output node - This node is a source for packets of RTP audio samples that are sent to other devices. */ HPI_DESTNODE_RTP_SOURCE = 208, HPI_DESTNODE_AVB = 209, /**< AVB output stream */ HPI_DESTNODE_INTERNAL = 210, /**< node internal to the device. */ HPI_DESTNODE_BLULINK = 211, /**< BLU-link output channel. */ /* !!!Update this AND hpidebug.h if you add a new destnode type!!! */ HPI_DESTNODE_LAST_INDEX = 211 /**< largest ID */ /* AX6 max destnode types = 15 */ }; /*******************************************/ /** Mixer control types \ingroup mixer */ enum HPI_CONTROLS { HPI_CONTROL_GENERIC = 0, /**< generic control. */ HPI_CONTROL_CONNECTION = 1, /**< A connection between nodes. */ HPI_CONTROL_VOLUME = 2, /**< volume control - works in dB_fs. */ HPI_CONTROL_METER = 3, /**< peak meter control. */ HPI_CONTROL_MUTE = 4, /*mute control - not used at present. */ HPI_CONTROL_MULTIPLEXER = 5, /**< multiplexer control. */ HPI_CONTROL_AESEBU_TRANSMITTER = 6, /**< AES/EBU transmitter control */ HPI_CONTROL_AESEBUTX = 6, /* HPI_CONTROL_AESEBU_TRANSMITTER */ HPI_CONTROL_AESEBU_RECEIVER = 7, /**< AES/EBU receiver control. */ HPI_CONTROL_AESEBURX = 7, /* HPI_CONTROL_AESEBU_RECEIVER */ HPI_CONTROL_LEVEL = 8, /**< level/trim control - works in d_bu. */ HPI_CONTROL_TUNER = 9, /**< tuner control. */ /* HPI_CONTROL_ONOFFSWITCH = 10 */ HPI_CONTROL_VOX = 11, /**< vox control. */ /* HPI_CONTROL_AES18_TRANSMITTER = 12 */ /* HPI_CONTROL_AES18_RECEIVER = 13 */ /* HPI_CONTROL_AES18_BLOCKGENERATOR = 14 */ HPI_CONTROL_CHANNEL_MODE = 15, /**< channel mode control. */ HPI_CONTROL_BITSTREAM = 16, /**< bitstream control. */ HPI_CONTROL_SAMPLECLOCK = 17, /**< sample clock control. */ HPI_CONTROL_MICROPHONE = 18, /**< microphone control. */ HPI_CONTROL_PARAMETRIC_EQ = 19, /**< parametric EQ control. */ HPI_CONTROL_EQUALIZER = 19, /*HPI_CONTROL_PARAMETRIC_EQ */ HPI_CONTROL_COMPANDER = 20, /**< compander control. */ HPI_CONTROL_COBRANET = 21, /**< cobranet control. */ HPI_CONTROL_TONEDETECTOR = 22, /**< tone detector control. */ HPI_CONTROL_SILENCEDETECTOR = 23, /**< silence detector control. */ HPI_CONTROL_PAD = 24, /**< tuner PAD control. */ HPI_CONTROL_SRC = 25, /**< samplerate converter control. */ HPI_CONTROL_UNIVERSAL = 26, /**< universal control. */ /* !!! Update this AND hpidebug.h if you add a new control type!!!*/ HPI_CONTROL_LAST_INDEX = 26 /**<highest control type ID */ /* WARNING types 256 or greater impact bit packing in all AX6 DSP code */ }; /*******************************************/ /** Adapter properties These are used in HPI_AdapterSetProperty() and HPI_AdapterGetProperty() \ingroup adapter */ enum HPI_ADAPTER_PROPERTIES { /** \internal Used in dwProperty field of HPI_AdapterSetProperty() and HPI_AdapterGetProperty(). This errata applies to all ASI6000 cards with both analog and digital outputs. The CS4224 A/D+D/A has a one sample delay between left and right channels on both its input (ADC) and output (DAC). More details are available in Cirrus Logic errata ER284B2. PDF available from www.cirrus.com, released by Cirrus in 2001. */ HPI_ADAPTER_PROPERTY_ERRATA_1 = 1, /** Adapter grouping property Indicates whether the adapter supports the grouping API (for ASIO and SSX2) */ HPI_ADAPTER_PROPERTY_GROUPING = 2, /** Driver SSX2 property Tells the kernel driver to turn on SSX2 stream mapping. This feature is not used by the DSP. In fact the call is completely processed by the driver and is not passed on to the DSP at all. */ HPI_ADAPTER_PROPERTY_ENABLE_SSX2 = 3, /** Adapter SSX2 property Indicates the state of the adapter's SSX2 setting. This setting is stored in non-volatile memory on the adapter. A typical call sequence would be to use HPI_ADAPTER_PROPERTY_SSX2_SETTING to set SSX2 on the adapter and then to reload the driver. The driver would query HPI_ADAPTER_PROPERTY_SSX2_SETTING during startup and if SSX2 is set, it would then call HPI_ADAPTER_PROPERTY_ENABLE_SSX2 to enable SSX2 stream mapping within the kernel level of the driver. */ HPI_ADAPTER_PROPERTY_SSX2_SETTING = 4, /** Enables/disables PCI(e) IRQ. A setting of 0 indicates that no interrupts are being generated. A DSP boot this property is set to 0. Setting to a non-zero value specifies the number of frames of audio that should be processed between interrupts. This property should be set to multiple of the mixer interval as read back from the HPI_ADAPTER_PROPERTY_INTERVAL property. */ HPI_ADAPTER_PROPERTY_IRQ_RATE = 5, /** Base number for readonly properties */ HPI_ADAPTER_PROPERTY_READONLYBASE = 256, /** Readonly adapter latency property. This property returns in the input and output latency in samples. Property 1 is the estimated input latency in samples, while Property 2 is that output latency in samples. */ HPI_ADAPTER_PROPERTY_LATENCY = 256, /** Readonly adapter granularity property. The granulariy is the smallest size chunk of stereo samples that is processed by the adapter. This property returns the record granularity in samples in Property 1. Property 2 returns the play granularity. */ HPI_ADAPTER_PROPERTY_GRANULARITY = 257, /** Readonly adapter number of current channels property. Property 1 is the number of record channels per record device. Property 2 is the number of play channels per playback device.*/ HPI_ADAPTER_PROPERTY_CURCHANNELS = 258, /** Readonly adapter software version. The SOFTWARE_VERSION property returns the version of the software running on the adapter as Major.Minor.Release. Property 1 contains Major in bits 15..8 and Minor in bits 7..0. Property 2 contains Release in bits 7..0. */ HPI_ADAPTER_PROPERTY_SOFTWARE_VERSION = 259, /** Readonly adapter MAC address MSBs. The MAC_ADDRESS_MSB property returns the most significant 32 bits of the MAC address. Property 1 contains bits 47..32 of the MAC address. Property 2 contains bits 31..16 of the MAC address. */ HPI_ADAPTER_PROPERTY_MAC_ADDRESS_MSB = 260, /** Readonly adapter MAC address LSBs The MAC_ADDRESS_LSB property returns the least significant 16 bits of the MAC address. Property 1 contains bits 15..0 of the MAC address. */ HPI_ADAPTER_PROPERTY_MAC_ADDRESS_LSB = 261, /** Readonly extended adapter type number The EXTENDED_ADAPTER_TYPE property returns the 4 digits of an extended adapter type, i.e ASI8920-0022, 0022 is the extended type. The digits are returned as ASCII characters rather than the hex digits that are returned for the main type Property 1 returns the 1st two (left most) digits, i.e "00" in the example above, the upper byte being the left most digit. Property 2 returns the 2nd two digits, i.e "22" in the example above*/ HPI_ADAPTER_PROPERTY_EXTENDED_ADAPTER_TYPE = 262, /** Readonly debug log buffer information */ HPI_ADAPTER_PROPERTY_LOGTABLEN = 263, HPI_ADAPTER_PROPERTY_LOGTABBEG = 264, /** Readonly adapter IP address For 192.168.1.101 Property 1 returns the 1st two (left most) digits, i.e 192*256 + 168 in the example above, the upper byte being the left most digit. Property 2 returns the 2nd two digits, i.e 1*256 + 101 in the example above, */ HPI_ADAPTER_PROPERTY_IP_ADDRESS = 265, /** Readonly adapter buffer processed count. Returns a buffer processed count that is incremented every time all buffers for all streams are updated. This is useful for checking completion of all stream operations across the adapter when using grouped streams. */ HPI_ADAPTER_PROPERTY_BUFFER_UPDATE_COUNT = 266, /** Readonly mixer and stream intervals These intervals are measured in mixer frames. To convert to time, divide by the adapter samplerate. The mixer interval is the number of frames processed in one mixer iteration. The stream update interval is the interval at which streams check for and process data, and BBM host buffer counters are updated. Property 1 is the mixer interval in mixer frames. Property 2 is the stream update interval in mixer frames. */ HPI_ADAPTER_PROPERTY_INTERVAL = 267, /** Adapter capabilities 1 Property 1 - adapter can do multichannel (SSX1) Property 2 - adapter can do stream grouping (supports SSX2) */ HPI_ADAPTER_PROPERTY_CAPS1 = 268, /** Adapter capabilities 2 Property 1 - adapter can do samplerate conversion (MRX) Property 2 - adapter can do timestretch (TSX) */ HPI_ADAPTER_PROPERTY_CAPS2 = 269, /** Readonly adapter sync header connection count. */ HPI_ADAPTER_PROPERTY_SYNC_HEADER_CONNECTIONS = 270, /** Readonly supports SSX2 property. Indicates the adapter supports SSX2 in some mode setting. The return value is true (1) or false (0). If the current adapter mode is MONO SSX2 is disabled, even though this property will return true. */ HPI_ADAPTER_PROPERTY_SUPPORTS_SSX2 = 271, /** Readonly supports PCI(e) IRQ. Indicates that the adapter in it's current mode supports interrupts across the host bus. Note, this does not imply that interrupts are enabled. Instead it indicates that they can be enabled. */ HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ = 272, /** Readonly supports firmware updating. Indicates that the adapter implements an interface to update firmware on the adapter. */ HPI_ADAPTER_PROPERTY_SUPPORTS_FW_UPDATE = 273, /** Readonly Firmware IDs Identifiy firmware independent of individual adapter type. May be used as a filter for firmware update images. Property 1 = Bootloader ID Property 2 = Main program ID */ HPI_ADAPTER_PROPERTY_FIRMWARE_ID = 274 }; /** Adapter mode commands Used in wQueryOrSet parameter of HPI_AdapterSetModeEx(). \ingroup adapter */ enum HPI_ADAPTER_MODE_CMDS { /** Set the mode to the given parameter */ HPI_ADAPTER_MODE_SET = 0, /** Return 0 or error depending whether mode is valid, but don't set the mode */ HPI_ADAPTER_MODE_QUERY = 1 }; /** Adapter Modes These are used by HPI_AdapterSetModeEx() \warning - more than 16 possible modes breaks a bitmask in the Windows WAVE DLL \ingroup adapter */ enum HPI_ADAPTER_MODES { /** 4 outstream mode. - ASI6114: 1 instream - ASI6044: 4 instreams - ASI6012: 1 instream - ASI6102: no instreams - ASI6022, ASI6122: 2 instreams - ASI5111, ASI5101: 2 instreams - ASI652x, ASI662x: 2 instreams - ASI654x, ASI664x: 4 instreams */ HPI_ADAPTER_MODE_4OSTREAM = 1, /** 6 outstream mode. - ASI6012: 1 instream, - ASI6022, ASI6122: 2 instreams - ASI652x, ASI662x: 4 instreams */ HPI_ADAPTER_MODE_6OSTREAM = 2, /** 8 outstream mode. - ASI6114: 8 instreams - ASI6118: 8 instreams - ASI6585: 8 instreams */ HPI_ADAPTER_MODE_8OSTREAM = 3, /** 16 outstream mode. - ASI6416 16 instreams - ASI6518, ASI6618 16 instreams - ASI6118 16 mono out and in streams */ HPI_ADAPTER_MODE_16OSTREAM = 4, /** one outstream mode. - ASI5111 1 outstream, 1 instream */ HPI_ADAPTER_MODE_1OSTREAM = 5, /** ASI504X mode 1. 12 outstream, 4 instream 0 to 48kHz sample rates (see ASI504X datasheet for more info). */ HPI_ADAPTER_MODE_1 = 6, /** ASI504X mode 2. 4 outstreams, 4 instreams at 0 to 192kHz sample rates (see ASI504X datasheet for more info). */ HPI_ADAPTER_MODE_2 = 7, /** ASI504X mode 3. 4 outstreams, 4 instreams at 0 to 192kHz sample rates (see ASI504X datasheet for more info). */ HPI_ADAPTER_MODE_3 = 8, /** ASI504X multichannel mode. 2 outstreams -> 4 line outs = 1 to 8 channel streams), 4 lineins -> 1 instream (1 to 8 channel streams) at 0-48kHz. For more info see the SSX Specification. */ HPI_ADAPTER_MODE_MULTICHANNEL = 9, /** 12 outstream mode. - ASI6514, ASI6614: 2 instreams - ASI6540,ASI6544: 8 instreams - ASI6640,ASI6644: 8 instreams */ HPI_ADAPTER_MODE_12OSTREAM = 10, /** 9 outstream mode. - ASI6044: 8 instreams */ HPI_ADAPTER_MODE_9OSTREAM = 11, /** mono mode. - ASI6416: 16 outstreams/instreams - ASI5402: 2 outstreams/instreams */ HPI_ADAPTER_MODE_MONO = 12, /** Low latency mode. - ASI6416/ASI6316: 1 16 channel outstream and instream */ HPI_ADAPTER_MODE_LOW_LATENCY = 13 }; /* Note, adapters can have more than one capability - encoding as bitfield is recommended. */ #define HPI_CAPABILITY_NONE (0) #define HPI_CAPABILITY_MPEG_LAYER3 (1) /* Set this equal to maximum capability index, Must not be greater than 32 - see axnvdef.h */ #define HPI_CAPABILITY_MAX 1 /* #define HPI_CAPABILITY_AAC 2 */ /******************************************* STREAM ATTRIBUTES ****/ /** MPEG Ancillary Data modes The mode for the ancillary data insertion or extraction to operate in. \ingroup stream */ enum HPI_MPEG_ANC_MODES { /** the MPEG frames have energy information stored in them (5 bytes per stereo frame, 3 per mono) */ HPI_MPEG_ANC_HASENERGY = 0, /** the entire ancillary data field is taken up by data from the Anc data buffer On encode, the encoder will insert the energy bytes before filling the remainder of the ancillary data space with data from the ancillary data buffer. */ HPI_MPEG_ANC_RAW = 1 }; /** Ancillary Data Alignment \ingroup instream */ enum HPI_ISTREAM_MPEG_ANC_ALIGNS { /** data is packed against the end of data, then padded to the end of frame */ HPI_MPEG_ANC_ALIGN_LEFT = 0, /** data is packed against the end of the frame */ HPI_MPEG_ANC_ALIGN_RIGHT = 1 }; /** MPEG modes MPEG modes - can be used optionally for HPI_FormatCreate() parameter dwAttributes. Using any mode setting other than HPI_MPEG_MODE_DEFAULT with single channel format will return an error. \ingroup stream */ enum HPI_MPEG_MODES { /** Causes the MPEG-1 Layer II bitstream to be recorded in single_channel mode when the number of channels is 1 and in stereo when the number of channels is 2. */ HPI_MPEG_MODE_DEFAULT = 0, /** Standard stereo without joint-stereo compression */ HPI_MPEG_MODE_STEREO = 1, /** Joint stereo */ HPI_MPEG_MODE_JOINTSTEREO = 2, /** Left and Right channels are completely independent */ HPI_MPEG_MODE_DUALCHANNEL = 3 }; /******************************************* MIXER ATTRIBUTES ****/ /* \defgroup mixer_flags Mixer flags for HPI_MIXER_GET_CONTROL_MULTIPLE_VALUES { */ #define HPI_MIXER_GET_CONTROL_MULTIPLE_CHANGED (0) #define HPI_MIXER_GET_CONTROL_MULTIPLE_RESET (1) /*}*/ /** Commands used by HPI_MixerStore() \ingroup mixer */ enum HPI_MIXER_STORE_COMMAND { /** Save all mixer control settings. */ HPI_MIXER_STORE_SAVE = 1, /** Restore all controls from saved. */ HPI_MIXER_STORE_RESTORE = 2, /** Delete saved control settings. */ HPI_MIXER_STORE_DELETE = 3, /** Enable auto storage of some control settings. */ HPI_MIXER_STORE_ENABLE = 4, /** Disable auto storage of some control settings. */ HPI_MIXER_STORE_DISABLE = 5, /** Unimplemented - save the attributes of a single control. */ HPI_MIXER_STORE_SAVE_SINGLE = 6 }; /****************************/ /* CONTROL ATTRIBUTE VALUES */ /****************************/ /** Used by mixer plugin enable functions E.g. HPI_ParametricEq_SetState() \ingroup mixer */ enum HPI_SWITCH_STATES { HPI_SWITCH_OFF = 0, /**< turn the mixer plugin on. */ HPI_SWITCH_ON = 1 /**< turn the mixer plugin off. */ }; /* Volume control special gain values */ /** volumes units are 100ths of a dB \ingroup volume */ #define HPI_UNITS_PER_dB 100 /** turns volume control OFF or MUTE \ingroup volume */ #define HPI_GAIN_OFF (-100 * HPI_UNITS_PER_dB) /** channel mask specifying all channels \ingroup volume */ #define HPI_BITMASK_ALL_CHANNELS (0xFFFFFFFF) /** value returned for no signal \ingroup meter */ #define HPI_METER_MINIMUM (-150 * HPI_UNITS_PER_dB) /** autofade profiles \ingroup volume */ enum HPI_VOLUME_AUTOFADES { /** log fade - dB attenuation changes linearly over time */ HPI_VOLUME_AUTOFADE_LOG = 2, /** linear fade - amplitude changes linearly */ HPI_VOLUME_AUTOFADE_LINEAR = 3 }; /** The physical encoding format of the AESEBU I/O. Used in HPI_Aesebu_Transmitter_SetFormat(), HPI_Aesebu_Receiver_SetFormat() along with related Get and Query functions \ingroup aestx */ enum HPI_AESEBU_FORMATS { /** AES/EBU physical format - AES/EBU balanced "professional" */ HPI_AESEBU_FORMAT_AESEBU = 1, /** AES/EBU physical format - S/PDIF unbalanced "consumer" */ HPI_AESEBU_FORMAT_SPDIF = 2 }; /** AES/EBU error status bits Returned by HPI_Aesebu_Receiver_GetErrorStatus() \ingroup aesrx */ enum HPI_AESEBU_ERRORS { /** bit0: 1 when PLL is not locked */ HPI_AESEBU_ERROR_NOT_LOCKED = 0x01, /** bit1: 1 when signal quality is poor */ HPI_AESEBU_ERROR_POOR_QUALITY = 0x02, /** bit2: 1 when there is a parity error */ HPI_AESEBU_ERROR_PARITY_ERROR = 0x04, /** bit3: 1 when there is a bi-phase coding violation */ HPI_AESEBU_ERROR_BIPHASE_VIOLATION = 0x08, /** bit4: 1 when the validity bit is high */ HPI_AESEBU_ERROR_VALIDITY = 0x10, /** bit5: 1 when the CRC error bit is high */ HPI_AESEBU_ERROR_CRC = 0x20 }; /** \addtogroup pad \{ */ /** The text string containing the station/channel combination. */ #define HPI_PAD_CHANNEL_NAME_LEN 16 /** The text string containing the artist. */ #define HPI_PAD_ARTIST_LEN 64 /** The text string containing the title. */ #define HPI_PAD_TITLE_LEN 64 /** The text string containing the comment. */ #define HPI_PAD_COMMENT_LEN 256 /** The PTY when the tuner has not received any PTY. */ #define HPI_PAD_PROGRAM_TYPE_INVALID 0xffff /** \} */ /** Data types for PTY string translation. \ingroup rds */ enum eHPI_RDS_type { HPI_RDS_DATATYPE_RDS = 0, /**< RDS bitstream.*/ HPI_RDS_DATATYPE_RBDS = 1 /**< RBDS bitstream.*/ }; /** Tuner bands Used for HPI_Tuner_SetBand(),HPI_Tuner_GetBand() \ingroup tuner */ enum HPI_TUNER_BAND { HPI_TUNER_BAND_AM = 1, /**< AM band */ HPI_TUNER_BAND_FM = 2, /**< FM band (mono) */ HPI_TUNER_BAND_TV_NTSC_M = 3, /**< NTSC-M TV band*/ HPI_TUNER_BAND_TV = 3, /* use TV_NTSC_M */ HPI_TUNER_BAND_FM_STEREO = 4, /**< FM band (stereo) */ HPI_TUNER_BAND_AUX = 5, /**< auxiliary input */ HPI_TUNER_BAND_TV_PAL_BG = 6, /**< PAL-B/G TV band*/ HPI_TUNER_BAND_TV_PAL_I = 7, /**< PAL-I TV band*/ HPI_TUNER_BAND_TV_PAL_DK = 8, /**< PAL-D/K TV band*/ HPI_TUNER_BAND_TV_SECAM_L = 9, /**< SECAM-L TV band*/ HPI_TUNER_BAND_DAB = 10, HPI_TUNER_BAND_LAST = 10 /**< the index of the last tuner band. */ }; /** Tuner mode attributes Used by HPI_Tuner_SetMode(), HPI_Tuner_GetMode() \ingroup tuner */ enum HPI_TUNER_MODES { HPI_TUNER_MODE_RSS = 1, /**< control RSS */ HPI_TUNER_MODE_RDS = 2 /**< control RBDS/RDS */ }; /** Tuner mode attribute values Used by HPI_Tuner_SetMode(), HPI_Tuner_GetMode() \ingroup tuner */ enum HPI_TUNER_MODE_VALUES { /* RSS attribute values */ HPI_TUNER_MODE_RSS_DISABLE = 0, /**< RSS disable */ HPI_TUNER_MODE_RSS_ENABLE = 1, /**< RSS enable */ /* RDS mode attributes */ HPI_TUNER_MODE_RDS_DISABLE = 0, /**< RDS - disabled */ HPI_TUNER_MODE_RDS_RDS = 1, /**< RDS - RDS mode */ HPI_TUNER_MODE_RDS_RBDS = 2 /**< RDS - RBDS mode */ }; /** Tuner Status Bits These bitfield values are returned by a call to HPI_Tuner_GetStatus(). Multiple fields are returned from a single call. \ingroup tuner */ enum HPI_TUNER_STATUS_BITS { HPI_TUNER_VIDEO_COLOR_PRESENT = 0x0001, /**< video color is present. */ HPI_TUNER_VIDEO_IS_60HZ = 0x0020, /**< 60 hz video detected. */ HPI_TUNER_VIDEO_HORZ_SYNC_MISSING = 0x0040, /**< video HSYNC is missing. */ HPI_TUNER_VIDEO_STATUS_VALID = 0x0100, /**< video status is valid. */ HPI_TUNER_DIGITAL = 0x0200, /**< tuner reports digital programming. */ HPI_TUNER_MULTIPROGRAM = 0x0400, /**< tuner reports multiple programs. */ HPI_TUNER_PLL_LOCKED = 0x1000, /**< the tuner's PLL is locked. */ HPI_TUNER_FM_STEREO = 0x2000 /**< tuner reports back FM stereo. */ }; /** Channel Modes Used for HPI_ChannelModeSet/Get() \ingroup channelmode */ enum HPI_CHANNEL_MODES { /** Left channel out = left channel in, Right channel out = right channel in. */ HPI_CHANNEL_MODE_NORMAL = 1, /** Left channel out = right channel in, Right channel out = left channel in. */ HPI_CHANNEL_MODE_SWAP = 2, /** Left channel out = left channel in, Right channel out = left channel in. */ HPI_CHANNEL_MODE_LEFT_TO_STEREO = 3, /** Left channel out = right channel in, Right channel out = right channel in.*/ HPI_CHANNEL_MODE_RIGHT_TO_STEREO = 4, /** Left channel out = (left channel in + right channel in)/2, Right channel out = mute. */ HPI_CHANNEL_MODE_STEREO_TO_LEFT = 5, /** Left channel out = mute, Right channel out = (right channel in + left channel in)/2. */ HPI_CHANNEL_MODE_STEREO_TO_RIGHT = 6, HPI_CHANNEL_MODE_LAST = 6 }; /** SampleClock source values \ingroup sampleclock */ enum HPI_SAMPLECLOCK_SOURCES { /** The sampleclock output is derived from its local samplerate generator. The local samplerate may be set using HPI_SampleClock_SetLocalRate(). */ HPI_SAMPLECLOCK_SOURCE_LOCAL = 1, /** The adapter is clocked from a dedicated AES/EBU SampleClock input.*/ HPI_SAMPLECLOCK_SOURCE_AESEBU_SYNC = 2, /** From external wordclock connector */ HPI_SAMPLECLOCK_SOURCE_WORD = 3, /** Board-to-board header */ HPI_SAMPLECLOCK_SOURCE_WORD_HEADER = 4, /** FUTURE - SMPTE clock. */ HPI_SAMPLECLOCK_SOURCE_SMPTE = 5, /** One of the aesebu inputs */ HPI_SAMPLECLOCK_SOURCE_AESEBU_INPUT = 6, /** From a network interface e.g. Cobranet or Livewire at either 48 or 96kHz */ HPI_SAMPLECLOCK_SOURCE_NETWORK = 8, /** From previous adjacent module (ASI2416 only)*/ HPI_SAMPLECLOCK_SOURCE_PREV_MODULE = 10, /** Blu link sample clock*/ HPI_SAMPLECLOCK_SOURCE_BLULINK = 11, /*! Update this if you add a new clock source.*/ HPI_SAMPLECLOCK_SOURCE_LAST = 11 }; /** Equalizer filter types. Used by HPI_ParametricEq_SetBand() \ingroup parmeq */ enum HPI_FILTER_TYPE { HPI_FILTER_TYPE_BYPASS = 0, /**< filter is turned off */ HPI_FILTER_TYPE_LOWSHELF = 1, /**< EQ low shelf */ HPI_FILTER_TYPE_HIGHSHELF = 2, /**< EQ high shelf */ HPI_FILTER_TYPE_EQ_BAND = 3, /**< EQ gain */ HPI_FILTER_TYPE_LOWPASS = 4, /**< standard low pass */ HPI_FILTER_TYPE_HIGHPASS = 5, /**< standard high pass */ HPI_FILTER_TYPE_BANDPASS = 6, /**< standard band pass */ HPI_FILTER_TYPE_BANDSTOP = 7 /**< standard band stop/notch */ }; /** Async Event sources \ingroup async */ enum ASYNC_EVENT_SOURCES { HPI_ASYNC_EVENT_GPIO = 1, /**< GPIO event. */ HPI_ASYNC_EVENT_SILENCE = 2, /**< silence event detected. */ HPI_ASYNC_EVENT_TONE = 3 /**< tone event detected. */ }; /*******************************************/ /** HPI Error codes Almost all HPI functions return an error code A return value of zero means there was no error. Otherwise one of these error codes is returned. Error codes can be converted to a descriptive string using HPI_GetErrorText() \note When a new error code is added HPI_GetErrorText() MUST be updated. \note Codes 1-100 are reserved for driver use \ingroup utility */ enum HPI_ERROR_CODES { /** Message type does not exist. */ HPI_ERROR_INVALID_TYPE = 100, /** Object type does not exist. */ HPI_ERROR_INVALID_OBJ = 101, /** Function does not exist. */ HPI_ERROR_INVALID_FUNC = 102, /** The specified object does not exist. */ HPI_ERROR_INVALID_OBJ_INDEX = 103, /** Trying to access an object that has not been opened yet. */ HPI_ERROR_OBJ_NOT_OPEN = 104, /** Trying to open an already open object. */ HPI_ERROR_OBJ_ALREADY_OPEN = 105, /** PCI, ISA resource not valid. */ HPI_ERROR_INVALID_RESOURCE = 106, /* HPI_ERROR_SUBSYSFINDADAPTERS_GETINFO= 107 */ /** Default response was never updated with actual error code. */ HPI_ERROR_INVALID_RESPONSE = 108, /** wSize field of response was not updated, indicating that the message was not processed. */ HPI_ERROR_PROCESSING_MESSAGE = 109, /** The network did not respond in a timely manner. */ HPI_ERROR_NETWORK_TIMEOUT = 110, /* An HPI handle is invalid (uninitialised?). */ HPI_ERROR_INVALID_HANDLE = 111, /** A function or attribute has not been implemented yet. */ HPI_ERROR_UNIMPLEMENTED = 112, /** There are too many clients attempting to access a network resource. */ HPI_ERROR_NETWORK_TOO_MANY_CLIENTS = 113, /** Response buffer passed to HPI_Message was smaller than returned response. wSpecificError field of hpi response contains the required size. */ HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL = 114, /** The returned response did not match the sent message */ HPI_ERROR_RESPONSE_MISMATCH = 115, /** A control setting that should have been cached was not. */ HPI_ERROR_CONTROL_CACHING = 116, /** A message buffer in the path to the adapter was smaller than the message size. wSpecificError field of hpi response contains the actual size. */ HPI_ERROR_MESSAGE_BUFFER_TOO_SMALL = 117, /* HPI_ERROR_TOO_MANY_ADAPTERS= 200 */ /** Bad adpater. */ HPI_ERROR_BAD_ADAPTER = 201, /** Adapter number out of range or not set properly. */ HPI_ERROR_BAD_ADAPTER_NUMBER = 202, /** 2 adapters with the same adapter number. */ HPI_ERROR_DUPLICATE_ADAPTER_NUMBER = 203, /** DSP code failed to bootload. Usually a DSP memory test failure. */ HPI_ERROR_DSP_BOOTLOAD = 204, /** Couldn't find or open the DSP code file. */ HPI_ERROR_DSP_FILE_NOT_FOUND = 206, /** Internal DSP hardware error. */ HPI_ERROR_DSP_HARDWARE = 207, /** Could not allocate memory */ HPI_ERROR_MEMORY_ALLOC = 208, /** Failed to correctly load/config PLD. (unused) */ HPI_ERROR_PLD_LOAD = 209, /** Unexpected end of file, block length too big etc. */ HPI_ERROR_DSP_FILE_FORMAT = 210, /** Found but could not open DSP code file. */ HPI_ERROR_DSP_FILE_ACCESS_DENIED = 211, /** First DSP code section header not found in DSP file. */ HPI_ERROR_DSP_FILE_NO_HEADER = 212, /* HPI_ERROR_DSP_FILE_READ_ERROR= 213, */ /** DSP code for adapter family not found. */ HPI_ERROR_DSP_SECTION_NOT_FOUND = 214, /** Other OS specific error opening DSP file. */ HPI_ERROR_DSP_FILE_OTHER_ERROR = 215, /** Sharing violation opening DSP code file. */ HPI_ERROR_DSP_FILE_SHARING_VIOLATION = 216, /** DSP code section header had size == 0. */ HPI_ERROR_DSP_FILE_NULL_HEADER = 217, /* HPI_ERROR_FLASH = 220, */ /** Flash has bad checksum */ HPI_ERROR_BAD_CHECKSUM = 221, HPI_ERROR_BAD_SEQUENCE = 222, HPI_ERROR_FLASH_ERASE = 223, HPI_ERROR_FLASH_PROGRAM = 224, HPI_ERROR_FLASH_VERIFY = 225, HPI_ERROR_FLASH_TYPE = 226, HPI_ERROR_FLASH_START = 227, HPI_ERROR_FLASH_READ = 228, HPI_ERROR_FLASH_READ_NO_FILE = 229, HPI_ERROR_FLASH_SIZE = 230, /** Reserved for OEMs. */ HPI_ERROR_RESERVED_1 = 290, /* HPI_ERROR_INVALID_STREAM = 300 use HPI_ERROR_INVALID_OBJ_INDEX */ /** Invalid compression format. */ HPI_ERROR_INVALID_FORMAT = 301, /** Invalid format samplerate */ HPI_ERROR_INVALID_SAMPLERATE = 302, /** Invalid format number of channels. */ HPI_ERROR_INVALID_CHANNELS = 303, /** Invalid format bitrate. */ HPI_ERROR_INVALID_BITRATE = 304, /** Invalid datasize used for stream read/write. */ HPI_ERROR_INVALID_DATASIZE = 305, /* HPI_ERROR_BUFFER_FULL = 306 use HPI_ERROR_INVALID_DATASIZE */ /* HPI_ERROR_BUFFER_EMPTY = 307 use HPI_ERROR_INVALID_DATASIZE */ /** Null data pointer used for stream read/write. */ HPI_ERROR_INVALID_DATA_POINTER = 308, /** Packet ordering error for stream read/write. */ HPI_ERROR_INVALID_PACKET_ORDER = 309, /** Object can't do requested operation in its current state, eg set format, change rec mux state while recording.*/ HPI_ERROR_INVALID_OPERATION = 310, /** Where a SRG is shared amongst streams, an incompatible samplerate is one that is different to any currently active stream. */ HPI_ERROR_INCOMPATIBLE_SAMPLERATE = 311, /** Adapter mode is illegal.*/ HPI_ERROR_BAD_ADAPTER_MODE = 312, /** There have been too many attempts to set the adapter's capabilities (using bad keys), the card should be returned to ASI if further capabilities updates are required */ HPI_ERROR_TOO_MANY_CAPABILITY_CHANGE_ATTEMPTS = 313, /** Streams on different adapters cannot be grouped. */ HPI_ERROR_NO_INTERADAPTER_GROUPS = 314, /** Streams on different DSPs cannot be grouped. */ HPI_ERROR_NO_INTERDSP_GROUPS = 315, /** Stream wait cancelled before threshold reached. */ HPI_ERROR_WAIT_CANCELLED = 316, /** A character string is invalid. */ HPI_ERROR_INVALID_STRING = 317, /** Invalid mixer node for this adapter. */ HPI_ERROR_INVALID_NODE = 400, /** Invalid control. */ HPI_ERROR_INVALID_CONTROL = 401, /** Invalid control value was passed. */ HPI_ERROR_INVALID_CONTROL_VALUE = 402, /** Control attribute not supported by this control. */ HPI_ERROR_INVALID_CONTROL_ATTRIBUTE = 403, /** Control is disabled. */ HPI_ERROR_CONTROL_DISABLED = 404, /** I2C transaction failed due to a missing ACK. */ HPI_ERROR_CONTROL_I2C_MISSING_ACK = 405, HPI_ERROR_I2C_MISSING_ACK = 405, /** Control is busy, or coming out of reset and cannot be accessed at this time. */ HPI_ERROR_CONTROL_NOT_READY = 407, /** Non volatile memory */ HPI_ERROR_NVMEM_BUSY = 450, HPI_ERROR_NVMEM_FULL = 451, HPI_ERROR_NVMEM_FAIL = 452, /** I2C */ HPI_ERROR_I2C_BAD_ADR = 460, /** Entity type did not match requested type */ HPI_ERROR_ENTITY_TYPE_MISMATCH = 470, /** Entity item count did not match requested count */ HPI_ERROR_ENTITY_ITEM_COUNT = 471, /** Entity type is not one of the valid types */ HPI_ERROR_ENTITY_TYPE_INVALID = 472, /** Entity role is not one of the valid roles */ HPI_ERROR_ENTITY_ROLE_INVALID = 473, /** Entity size doesn't match target size */ HPI_ERROR_ENTITY_SIZE_MISMATCH = 474, /* AES18 specific errors were 500..507 */ /** custom error to use for debugging */ HPI_ERROR_CUSTOM = 600, /** hpioct32.c can't obtain mutex */ HPI_ERROR_MUTEX_TIMEOUT = 700, /** Backend errors used to be greater than this. \deprecated Now, all backends return only errors defined here in hpi.h */ HPI_ERROR_BACKEND_BASE = 900, /** Communication with DSP failed */ HPI_ERROR_DSP_COMMUNICATION = 900 /* Note that the dsp communication error is set to this value so that it remains compatible with any software that expects such errors to be backend errors i.e. >= 900. Do not define any new error codes with values > 900. */ }; /** \defgroup maximums HPI maximum values \{ */ /** Maximum number of PCI HPI adapters */ #define HPI_MAX_ADAPTERS 20 /** Maximum number of in or out streams per adapter */ #define HPI_MAX_STREAMS 16 #define HPI_MAX_CHANNELS 2 /* per stream */ #define HPI_MAX_NODES 8 /* per mixer ? */ #define HPI_MAX_CONTROLS 4 /* per node ? */ /** maximum number of ancillary bytes per MPEG frame */ #define HPI_MAX_ANC_BYTES_PER_FRAME (64) #define HPI_STRING_LEN 16 /** Networked adapters have index >= 100 */ #define HPI_MIN_NETWORK_ADAPTER_IDX 100 /** Velocity units */ #define HPI_OSTREAM_VELOCITY_UNITS 4096 /** OutStream timescale units */ #define HPI_OSTREAM_TIMESCALE_UNITS 10000 /** OutStream timescale passthrough - turns timescaling on in passthough mode */ #define HPI_OSTREAM_TIMESCALE_PASSTHROUGH 99999 /**\}*/ /**************/ /* STRUCTURES */ #ifndef DISABLE_PRAGMA_PACK1 #pragma pack(push, 1) #endif /** Structure containing sample format information. See also HPI_FormatCreate(). */ struct hpi_format { u32 sample_rate; /**< 11025, 32000, 44100 ... */ u32 bit_rate; /**< for MPEG */ u32 attributes; /**< Stereo/JointStereo/Mono */ u16 mode_legacy; /**< Legacy ancillary mode or idle bit */ u16 unused; /**< Unused */ u16 channels; /**< 1,2..., (or ancillary mode or idle bit */ u16 format; /**< HPI_FORMAT_PCM16, _MPEG etc. see #HPI_FORMATS. */ }; struct hpi_anc_frame { u32 valid_bits_in_this_frame; u8 b_data[HPI_MAX_ANC_BYTES_PER_FRAME]; }; /** An object for containing a single async event. */ struct hpi_async_event { u16 event_type; /**< type of event. \sa async_event */ u16 sequence; /**< Sequence number, allows lost event detection */ u32 state; /**< New state */ u32 h_object; /**< handle to the object returning the event. */ union { struct { u16 index; /**< GPIO bit index. */ } gpio; struct { u16 node_index; /**< what node is the control on ? */ u16 node_type; /**< what type of node is the control on ? */ } control; } u; }; #ifndef DISABLE_PRAGMA_PACK1 #pragma pack(pop) #endif /*****************/ /* HPI FUNCTIONS */ /*****************/ /* Stream */ u16 hpi_stream_estimate_buffer_size(struct hpi_format *pF, u32 host_polling_rate_in_milli_seconds, u32 *recommended_buffer_size); /*************/ /* SubSystem */ /*************/ u16 hpi_subsys_get_version_ex(u32 *pversion_ex); u16 hpi_subsys_get_num_adapters(int *pn_num_adapters); u16 hpi_subsys_get_adapter(int iterator, u32 *padapter_index, u16 *pw_adapter_type); /***********/ /* Adapter */ /***********/ u16 hpi_adapter_open(u16 adapter_index); u16 hpi_adapter_close(u16 adapter_index); u16 hpi_adapter_get_info(u16 adapter_index, u16 *pw_num_outstreams, u16 *pw_num_instreams, u16 *pw_version, u32 *pserial_number, u16 *pw_adapter_type); u16 hpi_adapter_get_module_by_index(u16 adapter_index, u16 module_index, u16 *pw_num_outputs, u16 *pw_num_inputs, u16 *pw_version, u32 *pserial_number, u16 *pw_module_type, u32 *ph_module); u16 hpi_adapter_set_mode(u16 adapter_index, u32 adapter_mode); u16 hpi_adapter_set_mode_ex(u16 adapter_index, u32 adapter_mode, u16 query_or_set); u16 hpi_adapter_get_mode(u16 adapter_index, u32 *padapter_mode); u16 hpi_adapter_set_property(u16 adapter_index, u16 property, u16 paramter1, u16 paramter2); u16 hpi_adapter_get_property(u16 adapter_index, u16 property, u16 *pw_paramter1, u16 *pw_paramter2); u16 hpi_adapter_enumerate_property(u16 adapter_index, u16 index, u16 what_to_enumerate, u16 property_index, u32 *psetting); /*************/ /* OutStream */ /*************/ u16 hpi_outstream_open(u16 adapter_index, u16 outstream_index, u32 *ph_outstream); u16 hpi_outstream_close(u32 h_outstream); u16 hpi_outstream_get_info_ex(u32 h_outstream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_to_play, u32 *psamples_played, u32 *pauxiliary_data_to_play); u16 hpi_outstream_write_buf(u32 h_outstream, const u8 *pb_write_buf, u32 bytes_to_write, const struct hpi_format *p_format); u16 hpi_outstream_start(u32 h_outstream); u16 hpi_outstream_wait_start(u32 h_outstream); u16 hpi_outstream_stop(u32 h_outstream); u16 hpi_outstream_sinegen(u32 h_outstream); u16 hpi_outstream_reset(u32 h_outstream); u16 hpi_outstream_query_format(u32 h_outstream, struct hpi_format *p_format); u16 hpi_outstream_set_format(u32 h_outstream, struct hpi_format *p_format); u16 hpi_outstream_set_punch_in_out(u32 h_outstream, u32 punch_in_sample, u32 punch_out_sample); u16 hpi_outstream_set_velocity(u32 h_outstream, short velocity); u16 hpi_outstream_ancillary_reset(u32 h_outstream, u16 mode); u16 hpi_outstream_ancillary_get_info(u32 h_outstream, u32 *pframes_available); u16 hpi_outstream_ancillary_read(u32 h_outstream, struct hpi_anc_frame *p_anc_frame_buffer, u32 anc_frame_buffer_size_in_bytes, u32 number_of_ancillary_frames_to_read); u16 hpi_outstream_set_time_scale(u32 h_outstream, u32 time_scaleX10000); u16 hpi_outstream_host_buffer_allocate(u32 h_outstream, u32 size_in_bytes); u16 hpi_outstream_host_buffer_free(u32 h_outstream); u16 hpi_outstream_group_add(u32 h_outstream, u32 h_stream); u16 hpi_outstream_group_get_map(u32 h_outstream, u32 *poutstream_map, u32 *pinstream_map); u16 hpi_outstream_group_reset(u32 h_outstream); /************/ /* InStream */ /************/ u16 hpi_instream_open(u16 adapter_index, u16 instream_index, u32 *ph_instream); u16 hpi_instream_close(u32 h_instream); u16 hpi_instream_query_format(u32 h_instream, const struct hpi_format *p_format); u16 hpi_instream_set_format(u32 h_instream, const struct hpi_format *p_format); u16 hpi_instream_read_buf(u32 h_instream, u8 *pb_read_buf, u32 bytes_to_read); u16 hpi_instream_start(u32 h_instream); u16 hpi_instream_wait_start(u32 h_instream); u16 hpi_instream_stop(u32 h_instream); u16 hpi_instream_reset(u32 h_instream); u16 hpi_instream_get_info_ex(u32 h_instream, u16 *pw_state, u32 *pbuffer_size, u32 *pdata_recorded, u32 *psamples_recorded, u32 *pauxiliary_data_recorded); u16 hpi_instream_ancillary_reset(u32 h_instream, u16 bytes_per_frame, u16 mode, u16 alignment, u16 idle_bit); u16 hpi_instream_ancillary_get_info(u32 h_instream, u32 *pframe_space); u16 hpi_instream_ancillary_write(u32 h_instream, const struct hpi_anc_frame *p_anc_frame_buffer, u32 anc_frame_buffer_size_in_bytes, u32 number_of_ancillary_frames_to_write); u16 hpi_instream_host_buffer_allocate(u32 h_instream, u32 size_in_bytes); u16 hpi_instream_host_buffer_free(u32 h_instream); u16 hpi_instream_group_add(u32 h_instream, u32 h_stream); u16 hpi_instream_group_get_map(u32 h_instream, u32 *poutstream_map, u32 *pinstream_map); u16 hpi_instream_group_reset(u32 h_instream); /*********/ /* Mixer */ /*********/ u16 hpi_mixer_open(u16 adapter_index, u32 *ph_mixer); u16 hpi_mixer_close(u32 h_mixer); u16 hpi_mixer_get_control(u32 h_mixer, u16 src_node_type, u16 src_node_type_index, u16 dst_node_type, u16 dst_node_type_index, u16 control_type, u32 *ph_control); u16 hpi_mixer_get_control_by_index(u32 h_mixer, u16 control_index, u16 *pw_src_node_type, u16 *pw_src_node_index, u16 *pw_dst_node_type, u16 *pw_dst_node_index, u16 *pw_control_type, u32 *ph_control); u16 hpi_mixer_store(u32 h_mixer, enum HPI_MIXER_STORE_COMMAND command, u16 index); /************/ /* Controls */ /************/ /******************/ /* Volume control */ /******************/ u16 hpi_volume_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ); u16 hpi_volume_get_gain(u32 h_control, short an_gain0_01dB_out[HPI_MAX_CHANNELS] ); u16 hpi_volume_set_mute(u32 h_control, u32 mute); u16 hpi_volume_get_mute(u32 h_control, u32 *mute); #define hpi_volume_get_range hpi_volume_query_range u16 hpi_volume_query_range(u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB); u16 hpi_volume_query_channels(const u32 h_control, u32 *p_channels); u16 hpi_volume_auto_fade(u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms); u16 hpi_volume_auto_fade_profile(u32 h_control, short an_stop_gain0_01dB[HPI_MAX_CHANNELS], u32 duration_ms, u16 profile); u16 hpi_volume_query_auto_fade_profile(const u32 h_control, const u32 i, u16 *profile); /*****************/ /* Level control */ /*****************/ u16 hpi_level_query_range(u32 h_control, short *min_gain_01dB, short *max_gain_01dB, short *step_gain_01dB); u16 hpi_level_set_gain(u32 h_control, short an_gain0_01dB[HPI_MAX_CHANNELS] ); u16 hpi_level_get_gain(u32 h_control, short an_gain0_01dB_out[HPI_MAX_CHANNELS] ); /*****************/ /* Meter control */ /*****************/ u16 hpi_meter_query_channels(const u32 h_meter, u32 *p_channels); u16 hpi_meter_get_peak(u32 h_control, short an_peak0_01dB_out[HPI_MAX_CHANNELS] ); u16 hpi_meter_get_rms(u32 h_control, short an_peak0_01dB_out[HPI_MAX_CHANNELS] ); u16 hpi_meter_set_peak_ballistics(u32 h_control, u16 attack, u16 decay); u16 hpi_meter_set_rms_ballistics(u32 h_control, u16 attack, u16 decay); u16 hpi_meter_get_peak_ballistics(u32 h_control, u16 *attack, u16 *decay); u16 hpi_meter_get_rms_ballistics(u32 h_control, u16 *attack, u16 *decay); /************************/ /* ChannelMode control */ /************************/ u16 hpi_channel_mode_query_mode(const u32 h_mode, const u32 index, u16 *pw_mode); u16 hpi_channel_mode_set(u32 h_control, u16 mode); u16 hpi_channel_mode_get(u32 h_control, u16 *mode); /*****************/ /* Tuner control */ /*****************/ u16 hpi_tuner_query_band(const u32 h_tuner, const u32 index, u16 *pw_band); u16 hpi_tuner_set_band(u32 h_control, u16 band); u16 hpi_tuner_get_band(u32 h_control, u16 *pw_band); u16 hpi_tuner_query_frequency(const u32 h_tuner, const u32 index, const u16 band, u32 *pfreq); u16 hpi_tuner_set_frequency(u32 h_control, u32 freq_ink_hz); u16 hpi_tuner_get_frequency(u32 h_control, u32 *pw_freq_ink_hz); u16 hpi_tuner_get_rf_level(u32 h_control, short *pw_level); u16 hpi_tuner_get_raw_rf_level(u32 h_control, short *pw_level); u16 hpi_tuner_query_gain(const u32 h_tuner, const u32 index, u16 *pw_gain); u16 hpi_tuner_set_gain(u32 h_control, short gain); u16 hpi_tuner_get_gain(u32 h_control, short *pn_gain); u16 hpi_tuner_get_status(u32 h_control, u16 *pw_status_mask, u16 *pw_status); u16 hpi_tuner_set_mode(u32 h_control, u32 mode, u32 value); u16 hpi_tuner_get_mode(u32 h_control, u32 mode, u32 *pn_value); u16 hpi_tuner_get_rds(u32 h_control, char *p_rds_data); u16 hpi_tuner_query_deemphasis(const u32 h_tuner, const u32 index, const u16 band, u32 *pdeemphasis); u16 hpi_tuner_set_deemphasis(u32 h_control, u32 deemphasis); u16 hpi_tuner_get_deemphasis(u32 h_control, u32 *pdeemphasis); u16 hpi_tuner_query_program(const u32 h_tuner, u32 *pbitmap_program); u16 hpi_tuner_set_program(u32 h_control, u32 program); u16 hpi_tuner_get_program(u32 h_control, u32 *pprogram); u16 hpi_tuner_get_hd_radio_dsp_version(u32 h_control, char *psz_dsp_version, const u32 string_size); u16 hpi_tuner_get_hd_radio_sdk_version(u32 h_control, char *psz_sdk_version, const u32 string_size); u16 hpi_tuner_get_hd_radio_signal_quality(u32 h_control, u32 *pquality); u16 hpi_tuner_get_hd_radio_signal_blend(u32 h_control, u32 *pblend); u16 hpi_tuner_set_hd_radio_signal_blend(u32 h_control, const u32 blend); /***************/ /* PAD control */ /***************/ u16 hpi_pad_get_channel_name(u32 h_control, char *psz_string, const u32 string_length); u16 hpi_pad_get_artist(u32 h_control, char *psz_string, const u32 string_length); u16 hpi_pad_get_title(u32 h_control, char *psz_string, const u32 string_length); u16 hpi_pad_get_comment(u32 h_control, char *psz_string, const u32 string_length); u16 hpi_pad_get_program_type(u32 h_control, u32 *ppTY); u16 hpi_pad_get_rdsPI(u32 h_control, u32 *ppI); /****************************/ /* AES/EBU Receiver control */ /****************************/ u16 hpi_aesebu_receiver_query_format(const u32 h_aes_rx, const u32 index, u16 *pw_format); u16 hpi_aesebu_receiver_set_format(u32 h_control, u16 source); u16 hpi_aesebu_receiver_get_format(u32 h_control, u16 *pw_source); u16 hpi_aesebu_receiver_get_sample_rate(u32 h_control, u32 *psample_rate); u16 hpi_aesebu_receiver_get_user_data(u32 h_control, u16 index, u16 *pw_data); u16 hpi_aesebu_receiver_get_channel_status(u32 h_control, u16 index, u16 *pw_data); u16 hpi_aesebu_receiver_get_error_status(u32 h_control, u16 *pw_error_data); /*******************************/ /* AES/EBU Transmitter control */ /*******************************/ u16 hpi_aesebu_transmitter_set_sample_rate(u32 h_control, u32 sample_rate); u16 hpi_aesebu_transmitter_set_user_data(u32 h_control, u16 index, u16 data); u16 hpi_aesebu_transmitter_set_channel_status(u32 h_control, u16 index, u16 data); u16 hpi_aesebu_transmitter_get_channel_status(u32 h_control, u16 index, u16 *pw_data); u16 hpi_aesebu_transmitter_query_format(const u32 h_aes_tx, const u32 index, u16 *pw_format); u16 hpi_aesebu_transmitter_set_format(u32 h_control, u16 output_format); u16 hpi_aesebu_transmitter_get_format(u32 h_control, u16 *pw_output_format); /***********************/ /* Multiplexer control */ /***********************/ u16 hpi_multiplexer_set_source(u32 h_control, u16 source_node_type, u16 source_node_index); u16 hpi_multiplexer_get_source(u32 h_control, u16 *source_node_type, u16 *source_node_index); u16 hpi_multiplexer_query_source(u32 h_control, u16 index, u16 *source_node_type, u16 *source_node_index); /***************/ /* Vox control */ /***************/ u16 hpi_vox_set_threshold(u32 h_control, short an_gain0_01dB); u16 hpi_vox_get_threshold(u32 h_control, short *an_gain0_01dB); /*********************/ /* Bitstream control */ /*********************/ u16 hpi_bitstream_set_clock_edge(u32 h_control, u16 edge_type); u16 hpi_bitstream_set_data_polarity(u32 h_control, u16 polarity); u16 hpi_bitstream_get_activity(u32 h_control, u16 *pw_clk_activity, u16 *pw_data_activity); /***********************/ /* SampleClock control */ /***********************/ u16 hpi_sample_clock_query_source(const u32 h_clock, const u32 index, u16 *pw_source); u16 hpi_sample_clock_set_source(u32 h_control, u16 source); u16 hpi_sample_clock_get_source(u32 h_control, u16 *pw_source); u16 hpi_sample_clock_query_source_index(const u32 h_clock, const u32 index, const u32 source, u16 *pw_source_index); u16 hpi_sample_clock_set_source_index(u32 h_control, u16 source_index); u16 hpi_sample_clock_get_source_index(u32 h_control, u16 *pw_source_index); u16 hpi_sample_clock_get_sample_rate(u32 h_control, u32 *psample_rate); u16 hpi_sample_clock_query_local_rate(const u32 h_clock, const u32 index, u32 *psource); u16 hpi_sample_clock_set_local_rate(u32 h_control, u32 sample_rate); u16 hpi_sample_clock_get_local_rate(u32 h_control, u32 *psample_rate); u16 hpi_sample_clock_set_auto(u32 h_control, u32 enable); u16 hpi_sample_clock_get_auto(u32 h_control, u32 *penable); u16 hpi_sample_clock_set_local_rate_lock(u32 h_control, u32 lock); u16 hpi_sample_clock_get_local_rate_lock(u32 h_control, u32 *plock); /***********************/ /* Microphone control */ /***********************/ u16 hpi_microphone_set_phantom_power(u32 h_control, u16 on_off); u16 hpi_microphone_get_phantom_power(u32 h_control, u16 *pw_on_off); /********************************/ /* Parametric Equalizer control */ /********************************/ u16 hpi_parametric_eq_get_info(u32 h_control, u16 *pw_number_of_bands, u16 *pw_enabled); u16 hpi_parametric_eq_set_state(u32 h_control, u16 on_off); u16 hpi_parametric_eq_set_band(u32 h_control, u16 index, u16 type, u32 frequency_hz, short q100, short gain0_01dB); u16 hpi_parametric_eq_get_band(u32 h_control, u16 index, u16 *pn_type, u32 *pfrequency_hz, short *pnQ100, short *pn_gain0_01dB); u16 hpi_parametric_eq_get_coeffs(u32 h_control, u16 index, short coeffs[5] ); /*******************************/ /* Compressor Expander control */ /*******************************/ u16 hpi_compander_set_enable(u32 h_control, u32 on); u16 hpi_compander_get_enable(u32 h_control, u32 *pon); u16 hpi_compander_set_makeup_gain(u32 h_control, short makeup_gain0_01dB); u16 hpi_compander_get_makeup_gain(u32 h_control, short *pn_makeup_gain0_01dB); u16 hpi_compander_set_attack_time_constant(u32 h_control, u32 index, u32 attack); u16 hpi_compander_get_attack_time_constant(u32 h_control, u32 index, u32 *pw_attack); u16 hpi_compander_set_decay_time_constant(u32 h_control, u32 index, u32 decay); u16 hpi_compander_get_decay_time_constant(u32 h_control, u32 index, u32 *pw_decay); u16 hpi_compander_set_threshold(u32 h_control, u32 index, short threshold0_01dB); u16 hpi_compander_get_threshold(u32 h_control, u32 index, short *pn_threshold0_01dB); u16 hpi_compander_set_ratio(u32 h_control, u32 index, u32 ratio100); u16 hpi_compander_get_ratio(u32 h_control, u32 index, u32 *pw_ratio100); /********************/ /* Cobranet control */ /********************/ u16 hpi_cobranet_hmi_write(u32 h_control, u32 hmi_address, u32 byte_count, u8 *pb_data); u16 hpi_cobranet_hmi_read(u32 h_control, u32 hmi_address, u32 max_byte_count, u32 *pbyte_count, u8 *pb_data); u16 hpi_cobranet_hmi_get_status(u32 h_control, u32 *pstatus, u32 *preadable_size, u32 *pwriteable_size); u16 hpi_cobranet_get_ip_address(u32 h_control, u32 *pdw_ip_address); u16 hpi_cobranet_set_ip_address(u32 h_control, u32 dw_ip_address); u16 hpi_cobranet_get_static_ip_address(u32 h_control, u32 *pdw_ip_address); u16 hpi_cobranet_set_static_ip_address(u32 h_control, u32 dw_ip_address); u16 hpi_cobranet_get_macaddress(u32 h_control, u32 *p_mac_msbs, u32 *p_mac_lsbs); /*************************/ /* Tone Detector control */ /*************************/ u16 hpi_tone_detector_get_state(u32 hC, u32 *state); u16 hpi_tone_detector_set_enable(u32 hC, u32 enable); u16 hpi_tone_detector_get_enable(u32 hC, u32 *enable); u16 hpi_tone_detector_set_event_enable(u32 hC, u32 event_enable); u16 hpi_tone_detector_get_event_enable(u32 hC, u32 *event_enable); u16 hpi_tone_detector_set_threshold(u32 hC, int threshold); u16 hpi_tone_detector_get_threshold(u32 hC, int *threshold); u16 hpi_tone_detector_get_frequency(u32 hC, u32 index, u32 *frequency); /****************************/ /* Silence Detector control */ /****************************/ u16 hpi_silence_detector_get_state(u32 hC, u32 *state); u16 hpi_silence_detector_set_enable(u32 hC, u32 enable); u16 hpi_silence_detector_get_enable(u32 hC, u32 *enable); u16 hpi_silence_detector_set_event_enable(u32 hC, u32 event_enable); u16 hpi_silence_detector_get_event_enable(u32 hC, u32 *event_enable); u16 hpi_silence_detector_set_delay(u32 hC, u32 delay); u16 hpi_silence_detector_get_delay(u32 hC, u32 *delay); u16 hpi_silence_detector_set_threshold(u32 hC, int threshold); u16 hpi_silence_detector_get_threshold(u32 hC, int *threshold); /*********************/ /* Utility functions */ /*********************/ u16 hpi_format_create(struct hpi_format *p_format, u16 channels, u16 format, u32 sample_rate, u32 bit_rate, u32 attributes); #endif /*_HPI_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Device Tree Bindings for Cisco Meraki MX65 series (Alamo). * * Copyright (C) 2020-2021 Matthew Hagan <[email protected]> */ #include "bcm958625-meraki-mx6x-common.dtsi" / { keys { compatible = "gpio-keys-polled"; autorepeat; poll-interval = <20>; button-reset { label = "reset"; linux,code = <KEY_RESTART>; gpios = <&gpioa 8 GPIO_ACTIVE_LOW>; }; }; leds { compatible = "gpio-leds"; led-0 { /* green:wan1-left */ function = LED_FUNCTION_ACTIVITY; function-enumerator = <0>; color = <LED_COLOR_ID_GREEN>; gpios = <&gpioa 25 GPIO_ACTIVE_LOW>; }; led-1 { /* green:wan1-right */ function = LED_FUNCTION_ACTIVITY; function-enumerator = <1>; color = <LED_COLOR_ID_GREEN>; gpios = <&gpioa 24 GPIO_ACTIVE_LOW>; }; led-2 { /* green:wan2-left */ function = LED_FUNCTION_ACTIVITY; function-enumerator = <2>; color = <LED_COLOR_ID_GREEN>; gpios = <&gpioa 27 GPIO_ACTIVE_LOW>; }; led-3 { /* green:wan2-right */ function = LED_FUNCTION_ACTIVITY; function-enumerator = <3>; color = <LED_COLOR_ID_GREEN>; gpios = <&gpioa 26 GPIO_ACTIVE_LOW>; }; led-4 { /* amber:power */ function = LED_FUNCTION_FAULT; color = <LED_COLOR_ID_AMBER>; gpios = <&gpioa 3 GPIO_ACTIVE_HIGH>; }; led-5 { /* white:status */ function = LED_FUNCTION_STATUS; color = <LED_COLOR_ID_WHITE>; gpios = <&gpioa 31 GPIO_ACTIVE_HIGH>; }; }; }; &axi { mdio-mux@3f1c0 { compatible = "mdio-mux-mmioreg", "mdio-mux"; reg = <0x3f1c0 0x4>; mux-mask = <0x2000>; mdio-parent-bus = <&mdio_ext>; #address-cells = <1>; #size-cells = <0>; mdio@0 { reg = <0x0>; #address-cells = <1>; #size-cells = <0>; phy_port6: phy@0 { reg = <0>; }; phy_port7: phy@1 { reg = <1>; }; phy_port8: phy@2 { reg = <2>; }; phy_port9: phy@3 { reg = <3>; }; phy_port10: phy@4 { reg = <4>; }; switch@10 { compatible = "qca,qca8337"; reg = <0x10>; dsa,member = <1 0>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; ethernet = <&sgmii1>; phy-mode = "sgmii"; qca,sgmii-enable-pll; qca,sgmii-txclk-falling-edge; fixed-link { speed = <1000>; full-duplex; }; }; port@1 { reg = <1>; label = "lan8"; phy-handle = <&phy_port6>; }; port@2 { reg = <2>; label = "lan9"; phy-handle = <&phy_port7>; }; port@3 { reg = <3>; label = "lan10"; phy-handle = <&phy_port8>; }; port@4 { reg = <4>; label = "lan11"; phy-handle = <&phy_port9>; }; port@5 { reg = <5>; label = "lan12"; phy-handle = <&phy_port10>; }; }; }; }; mdio-mii@2000 { reg = <0x2000>; #address-cells = <1>; #size-cells = <0>; phy_port1: phy@0 { reg = <0>; }; phy_port2: phy@1 { reg = <1>; }; phy_port3: phy@2 { reg = <2>; }; phy_port4: phy@3 { reg = <3>; }; phy_port5: phy@4 { reg = <4>; }; switch@10 { compatible = "qca,qca8337"; reg = <0x10>; dsa,member = <2 0>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; ethernet = <&sgmii0>; phy-mode = "sgmii"; qca,sgmii-enable-pll; qca,sgmii-txclk-falling-edge; fixed-link { speed = <1000>; full-duplex; }; }; port@1 { reg = <1>; label = "lan3"; phy-handle = <&phy_port1>; }; port@2 { reg = <2>; label = "lan4"; phy-handle = <&phy_port2>; }; port@3 { reg = <3>; label = "lan5"; phy-handle = <&phy_port3>; }; port@4 { reg = <4>; label = "lan6"; phy-handle = <&phy_port4>; }; port@5 { reg = <5>; label = "lan7"; phy-handle = <&phy_port5>; }; }; }; }; }; }; &srab { compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; status = "okay"; dsa,member = <0 0>; ports { port@0 { label = "wan1"; reg = <0>; }; port@1 { label = "wan2"; reg = <1>; }; sgmii0: port@4 { label = "sw0"; reg = <4>; fixed-link { speed = <1000>; full-duplex; }; }; sgmii1: port@5 { label = "sw1"; reg = <5>; fixed-link { speed = <1000>; full-duplex; }; }; port@8 { ethernet = <&amac2>; reg = <8>; fixed-link { speed = <1000>; full-duplex; }; }; }; };
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H #include "btree_cache.h" #include "btree_iter.h" #include "btree_update.h" #include "buckets.h" #include "error.h" #include "super.h" static inline u64 swab40(u64 x) { return (((x & 0x00000000ffULL) << 32)| ((x & 0x000000ff00ULL) << 16)| ((x & 0x0000ff0000ULL) >> 0)| ((x & 0x00ff000000ULL) >> 16)| ((x & 0xff00000000ULL) >> 32)); } int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags); void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *); void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_backpointer_swab(struct bkey_s); #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \ .key_validate = bch2_backpointer_validate, \ .val_to_text = bch2_backpointer_k_to_text, \ .swab = bch2_backpointer_swab, \ .min_val_size = 32, \ }) #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10 /* * Convert from pos in backpointer btree to pos of corresponding bucket in alloc * btree: */ static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos) { u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT; return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector)); } static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode); if (ca) *bucket = bp_pos_to_bucket(ca, bp_pos); rcu_read_unlock(); return ca != NULL; } static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket), c, "backpointer for missing device %llu", bp_pos.inode); } static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) { return POS(bucket.inode, (bucket_to_sector(ca, bucket.offset) << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset); } /* * Convert from pos in alloc btree + bucket offset to pos in backpointer btree: */ static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) { struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset); EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret))); return ret; } int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *, struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool); static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, struct bch_dev *ca, struct bpos bucket, struct bch_backpointer bp, struct bkey_s_c orig_k, bool insert) { if (unlikely(bch2_backpointers_no_use_write_buffer)) return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert); struct bkey_i_backpointer bp_k; bkey_backpointer_init(&bp_k.k_i); bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); bp_k.v = bp; if (!insert) { bp_k.k.type = KEY_TYPE_deleted; set_bkey_val_u64s(&bp_k.k, 0); } return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i); } static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry) { switch (k.k->type) { case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr_v2: return BCH_DATA_btree; case KEY_TYPE_extent: case KEY_TYPE_reflink_v: return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user; case KEY_TYPE_stripe: { const struct bch_extent_ptr *ptr = &entry->ptr; struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); BUG_ON(ptr < s.v->ptrs || ptr >= s.v->ptrs + s.v->nr_blocks); return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant ? BCH_DATA_parity : BCH_DATA_user; } default: BUG(); } } static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, struct bpos *bucket_pos, struct bch_backpointer *bp, u64 sectors) { u32 bucket_offset; *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset); *bp = (struct bch_backpointer) { .btree_id = btree_id, .level = level, .data_type = bch2_bkey_ptr_data_type(k, p, entry), .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset, .bucket_len = sectors, .pos = k.k->p, }; } static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, struct bpos *bucket_pos, struct bch_backpointer *bp) { u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p); __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors); } int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int, struct bpos *, struct bch_backpointer *, unsigned); struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *, struct bpos, struct bch_backpointer, unsigned); struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *, struct bpos, struct bch_backpointer); int bch2_check_btree_backpointers(struct bch_fs *); int bch2_check_extents_to_backpointers(struct bch_fs *); int bch2_check_backpointers_to_extents(struct bch_fs *); #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2018 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ #define ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ /* ***************************************** * DMA_IF_W_N_DOWN_CH1 (Prototype: RTR_CTRL) ***************************************** */ #define mmDMA_IF_W_N_DOWN_CH1_PERM_SEL 0x4C2108 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_0 0x4C2114 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_1 0x4C2118 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_2 0x4C211C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_3 0x4C2120 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_4 0x4C2124 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_5 0x4C2128 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_6 0x4C212C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_7 0x4C2130 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_8 0x4C2134 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_9 0x4C2138 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_10 0x4C213C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_11 0x4C2140 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_12 0x4C2144 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_13 0x4C2148 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_14 0x4C214C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_15 0x4C2150 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_16 0x4C2154 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_17 0x4C2158 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_18 0x4C215C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_19 0x4C2160 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_20 0x4C2164 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_21 0x4C2168 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_22 0x4C216C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_23 0x4C2170 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_24 0x4C2174 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_25 0x4C2178 #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_26 0x4C217C #define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_27 0x4C2180 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_0 0x4C2184 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_1 0x4C2188 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_2 0x4C218C #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_3 0x4C2190 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_4 0x4C2194 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_5 0x4C2198 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_6 0x4C219C #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_7 0x4C21A0 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_8 0x4C21A4 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_9 0x4C21A8 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_10 0x4C21AC #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_11 0x4C21B0 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_12 0x4C21B4 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_13 0x4C21B8 #define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_14 0x4C21BC #define mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN 0x4C226C #define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN 0x4C2274 #define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT 0x4C2278 #define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST 0x4C227C #define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4C2280 #define mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN 0x4C2284 #define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_EN 0x4C2288 #define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_SAT 0x4C228C #define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_RST 0x4C2290 #define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4C2294 #define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN 0x4C229C #define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT 0x4C22A0 #define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST 0x4C22A4 #define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4C22AC #define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RED 0x4C22B4 #define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN 0x4C22EC #define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN 0x4C22F0 #define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4C22F4 #define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4C22F8 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4C2404 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4C2408 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4C240C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4C2410 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4C2414 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4C2418 #define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4C241C #define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4C2420 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4C2424 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4C2428 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4C242C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4C2430 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4C2434 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4C2438 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0 0x4C2450 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1 0x4C2454 #define mmDMA_IF_W_N_DOWN_CH1_NON_LIN_EN 0x4C2480 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_0 0x4C2500 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_1 0x4C2504 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_2 0x4C2508 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_3 0x4C250C #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_4 0x4C2510 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4C2514 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4C2520 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4C2524 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4C2528 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4C252C #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4C2530 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4C2534 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4C2538 #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4C253C #define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4C2540 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4C2550 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4C2554 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4C2558 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4C255C #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4C2560 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4C2564 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4C2568 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4C256C #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4C2570 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4C2574 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4C2578 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4C257C #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4C2580 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4C2584 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4C2588 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4C258C #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4C2590 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4C2594 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4C2598 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4C25E4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4C25E8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4C25EC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4C25F0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4C25F4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4C25F8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4C25FC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4C2600 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4C2604 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4C2608 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4C260C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4C2610 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4C2614 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4C2618 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4C261C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4C2620 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4C2624 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4C2628 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4C262C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4C2630 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4C2634 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4C2638 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4C263C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4C2640 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4C2644 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4C2648 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4C264C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4C2650 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4C2654 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4C2658 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4C265C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4C2660 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4C2664 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4C2668 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4C266C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4C2670 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4C2674 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4C2678 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4C267C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4C2680 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4C2684 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4C2688 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4C268C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4C2690 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4C2694 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4C2698 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4C269C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4C26A0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4C26A4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4C26A8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4C26AC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4C26B0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4C26B4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4C26B8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4C26BC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4C26C0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4C26C4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4C26C8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4C26CC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4C26D0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4C26D4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4C26D8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4C26DC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4C26E0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4C26E4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4C26E8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4C26EC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4C26F0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4C26F4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4C26F8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4C26FC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4C2700 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4C2704 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4C2708 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4C270C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4C2710 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4C2714 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4C2718 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4C271C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4C2720 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4C2724 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4C2728 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4C272C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4C2730 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4C2734 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4C2738 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4C273C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4C2740 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4C2744 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4C2748 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4C274C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4C2750 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4C2754 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4C2758 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4C275C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4C2760 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4C2764 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4C2768 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4C276C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4C2770 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4C2774 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4C2778 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4C277C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4C2780 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4C2784 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4C2788 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4C278C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4C2790 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4C2794 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4C2798 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4C279C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4C27A0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4C27A4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4C27A8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4C27AC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4C27B0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4C27B4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4C27B8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4C27BC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4C27C0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4C27C4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4C27C8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4C27CC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4C27D0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4C27D4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4C27D8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4C27DC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4C27E0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4C2824 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4C2828 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4C282C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4C2830 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4C2834 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4C2838 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4C283C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4C2840 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4C2844 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4C2848 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4C284C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4C2850 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4C2854 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4C2858 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4C285C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4C2860 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4C2864 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4C2868 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4C286C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4C2870 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4C2874 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4C2878 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4C287C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4C2880 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4C2884 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4C2888 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4C288C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4C2890 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4C2894 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4C2898 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4C289C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4C28A0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4C28A4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4C28A8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4C28AC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4C28B0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4C28B4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4C28B8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4C28BC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4C28C0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4C28C4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4C28C8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4C28CC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4C28D0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4C28D4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4C28D8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4C28DC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4C28E0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4C28E4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4C28E8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4C28EC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4C28F0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4C28F4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4C28F8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4C28FC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4C2900 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4C2904 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4C2908 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4C290C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4C2910 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4C2914 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4C2918 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4C291C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4C2920 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4C2924 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4C2928 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4C292C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4C2930 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4C2934 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4C2938 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4C293C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4C2940 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4C2944 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4C2948 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4C294C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4C2950 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4C2954 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4C2958 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4C295C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4C2960 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4C2964 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4C2968 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4C296C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4C2970 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4C2974 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4C2978 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4C297C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4C2980 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4C2984 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4C2988 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4C298C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4C2990 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4C2994 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4C2998 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4C299C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4C29A0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4C29A4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4C29A8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4C29AC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4C29B0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4C29B4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4C29B8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4C29BC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4C29C0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4C29C4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4C29C8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4C29CC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4C29D0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4C29D4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4C29D8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4C29DC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4C29E0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4C29E4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4C29E8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4C29EC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4C29F0 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4C29F4 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4C29F8 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4C29FC #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4C2A00 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4C2A04 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4C2A08 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4C2A0C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4C2A10 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4C2A14 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4C2A18 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4C2A1C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4C2A20 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4C2A64 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4C2A68 #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4C2A6C #define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4C2A70 #define mmDMA_IF_W_N_DOWN_CH1_RGL_CFG 0x4C2B64 #define mmDMA_IF_W_N_DOWN_CH1_RGL_SHIFT 0x4C2B68 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4C2B6C #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4C2B70 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4C2B74 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4C2B78 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4C2B7C #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4C2B80 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4C2B84 #define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4C2B88 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_0 0x4C2BAC #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_1 0x4C2BB0 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_2 0x4C2BB4 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_3 0x4C2BB8 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_4 0x4C2BBC #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_5 0x4C2BC0 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_6 0x4C2BC4 #define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_7 0x4C2BC8 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_0 0x4C2BEC #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_1 0x4C2BF0 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_2 0x4C2BF4 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_3 0x4C2BF8 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_4 0x4C2BFC #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_5 0x4C2C00 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_6 0x4C2C04 #define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_7 0x4C2C08 #define mmDMA_IF_W_N_DOWN_CH1_RGL_WDT 0x4C2C2C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C2C30 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C2C34 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C2C38 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C2C3C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C2C40 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C2C44 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C2C48 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C2C4C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4C2C50 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4C2C54 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4C2C58 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4C2C5C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4C2C60 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4C2C64 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4C2C68 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4C2C6C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C2C70 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C2C74 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C2C78 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C2C7C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C2C80 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C2C84 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C2C88 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C2C8C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4C2C90 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4C2C94 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4C2C98 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4C2C9C #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4C2CA0 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4C2CA4 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4C2CA8 #define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4C2CAC #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4C2CB0 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4C2CB4 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4C2CB8 #define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4C2CBC #endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ */
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2016 The fiat-crypto Authors. * Copyright (C) 2018-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved. * * This is a machine-generated formally verified implementation of Curve25519 * ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally * machine generated, it has been tweaked to be suitable for use in the kernel. * It is optimized for 32-bit machines and machines that cannot work efficiently * with 128-bit integer types. */ #include <linux/unaligned.h> #include <crypto/curve25519.h> #include <linux/string.h> /* fe means field element. Here the field is \Z/(2^255-19). An element t, * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 * t[3]+2^102 t[4]+...+2^230 t[9]. * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. * Multiplication and carrying produce fe from fe_loose. */ typedef struct fe { u32 v[10]; } fe; /* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc * Addition and subtraction produce fe_loose from (fe, fe). */ typedef struct fe_loose { u32 v[10]; } fe_loose; static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s) { /* Ignores top bit of s. */ u32 a0 = get_unaligned_le32(s); u32 a1 = get_unaligned_le32(s+4); u32 a2 = get_unaligned_le32(s+8); u32 a3 = get_unaligned_le32(s+12); u32 a4 = get_unaligned_le32(s+16); u32 a5 = get_unaligned_le32(s+20); u32 a6 = get_unaligned_le32(s+24); u32 a7 = get_unaligned_le32(s+28); h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ h[4] = (a3>> 6); /* (32- 6) = 26 */ h[5] = a4&((1<<25)-1); /* 25 */ h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ } static __always_inline void fe_frombytes(fe *h, const u8 *s) { fe_frombytes_impl(h->v, s); } static __always_inline u8 /*bool*/ addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) { /* This function extracts 25 bits of result and 1 bit of carry * (26 total), so a 32-bit intermediate is sufficient. */ u32 x = a + b + c; *low = x & ((1 << 25) - 1); return (x >> 25) & 1; } static __always_inline u8 /*bool*/ addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) { /* This function extracts 26 bits of result and 1 bit of carry * (27 total), so a 32-bit intermediate is sufficient. */ u32 x = a + b + c; *low = x & ((1 << 26) - 1); return (x >> 26) & 1; } static __always_inline u8 /*bool*/ subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) { /* This function extracts 25 bits of result and 1 bit of borrow * (26 total), so a 32-bit intermediate is sufficient. */ u32 x = a - b - c; *low = x & ((1 << 25) - 1); return x >> 31; } static __always_inline u8 /*bool*/ subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) { /* This function extracts 26 bits of result and 1 bit of borrow *(27 total), so a 32-bit intermediate is sufficient. */ u32 x = a - b - c; *low = x & ((1 << 26) - 1); return x >> 31; } static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) { t = -!!t; /* all set if nonzero, 0 if 0 */ return (t&nz) | ((~t)&z); } static __always_inline void fe_freeze(u32 out[10], const u32 in1[10]) { { const u32 x17 = in1[9]; { const u32 x18 = in1[8]; { const u32 x16 = in1[7]; { const u32 x14 = in1[6]; { const u32 x12 = in1[5]; { const u32 x10 = in1[4]; { const u32 x8 = in1[3]; { const u32 x6 = in1[2]; { const u32 x4 = in1[1]; { const u32 x2 = in1[0]; { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff); { u32 x50 = (x49 & 0x3ffffed); { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); { u32 x54 = (x49 & 0x1ffffff); { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); { u32 x58 = (x49 & 0x3ffffff); { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); { u32 x62 = (x49 & 0x1ffffff); { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); { u32 x66 = (x49 & 0x3ffffff); { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); { u32 x70 = (x49 & 0x1ffffff); { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); { u32 x74 = (x49 & 0x3ffffff); { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); { u32 x78 = (x49 & 0x1ffffff); { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); { u32 x82 = (x49 & 0x3ffffff); { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); { u32 x86 = (x49 & 0x1ffffff); { u32 x88; addcarryx_u25(x85, x47, x86, &x88); out[0] = x52; out[1] = x56; out[2] = x60; out[3] = x64; out[4] = x68; out[5] = x72; out[6] = x76; out[7] = x80; out[8] = x84; out[9] = x88; }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} } static __always_inline void fe_tobytes(u8 s[32], const fe *f) { u32 h[10]; fe_freeze(h, f->v); s[0] = h[0] >> 0; s[1] = h[0] >> 8; s[2] = h[0] >> 16; s[3] = (h[0] >> 24) | (h[1] << 2); s[4] = h[1] >> 6; s[5] = h[1] >> 14; s[6] = (h[1] >> 22) | (h[2] << 3); s[7] = h[2] >> 5; s[8] = h[2] >> 13; s[9] = (h[2] >> 21) | (h[3] << 5); s[10] = h[3] >> 3; s[11] = h[3] >> 11; s[12] = (h[3] >> 19) | (h[4] << 6); s[13] = h[4] >> 2; s[14] = h[4] >> 10; s[15] = h[4] >> 18; s[16] = h[5] >> 0; s[17] = h[5] >> 8; s[18] = h[5] >> 16; s[19] = (h[5] >> 24) | (h[6] << 1); s[20] = h[6] >> 7; s[21] = h[6] >> 15; s[22] = (h[6] >> 23) | (h[7] << 3); s[23] = h[7] >> 5; s[24] = h[7] >> 13; s[25] = (h[7] >> 21) | (h[8] << 4); s[26] = h[8] >> 4; s[27] = h[8] >> 12; s[28] = (h[8] >> 20) | (h[9] << 6); s[29] = h[9] >> 2; s[30] = h[9] >> 10; s[31] = h[9] >> 18; } /* h = f */ static __always_inline void fe_copy(fe *h, const fe *f) { memmove(h, f, sizeof(u32) * 10); } static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) { memmove(h, f, sizeof(u32) * 10); } /* h = 0 */ static __always_inline void fe_0(fe *h) { memset(h, 0, sizeof(u32) * 10); } /* h = 1 */ static __always_inline void fe_1(fe *h) { memset(h, 0, sizeof(u32) * 10); h->v[0] = 1; } static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) { { const u32 x20 = in1[9]; { const u32 x21 = in1[8]; { const u32 x19 = in1[7]; { const u32 x17 = in1[6]; { const u32 x15 = in1[5]; { const u32 x13 = in1[4]; { const u32 x11 = in1[3]; { const u32 x9 = in1[2]; { const u32 x7 = in1[1]; { const u32 x5 = in1[0]; { const u32 x38 = in2[9]; { const u32 x39 = in2[8]; { const u32 x37 = in2[7]; { const u32 x35 = in2[6]; { const u32 x33 = in2[5]; { const u32 x31 = in2[4]; { const u32 x29 = in2[3]; { const u32 x27 = in2[2]; { const u32 x25 = in2[1]; { const u32 x23 = in2[0]; out[0] = (x5 + x23); out[1] = (x7 + x25); out[2] = (x9 + x27); out[3] = (x11 + x29); out[4] = (x13 + x31); out[5] = (x15 + x33); out[6] = (x17 + x35); out[7] = (x19 + x37); out[8] = (x21 + x39); out[9] = (x20 + x38); }}}}}}}}}}}}}}}}}}}} } /* h = f + g * Can overlap h with f or g. */ static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) { fe_add_impl(h->v, f->v, g->v); } static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) { { const u32 x20 = in1[9]; { const u32 x21 = in1[8]; { const u32 x19 = in1[7]; { const u32 x17 = in1[6]; { const u32 x15 = in1[5]; { const u32 x13 = in1[4]; { const u32 x11 = in1[3]; { const u32 x9 = in1[2]; { const u32 x7 = in1[1]; { const u32 x5 = in1[0]; { const u32 x38 = in2[9]; { const u32 x39 = in2[8]; { const u32 x37 = in2[7]; { const u32 x35 = in2[6]; { const u32 x33 = in2[5]; { const u32 x31 = in2[4]; { const u32 x29 = in2[3]; { const u32 x27 = in2[2]; { const u32 x25 = in2[1]; { const u32 x23 = in2[0]; out[0] = ((0x7ffffda + x5) - x23); out[1] = ((0x3fffffe + x7) - x25); out[2] = ((0x7fffffe + x9) - x27); out[3] = ((0x3fffffe + x11) - x29); out[4] = ((0x7fffffe + x13) - x31); out[5] = ((0x3fffffe + x15) - x33); out[6] = ((0x7fffffe + x17) - x35); out[7] = ((0x3fffffe + x19) - x37); out[8] = ((0x7fffffe + x21) - x39); out[9] = ((0x3fffffe + x20) - x38); }}}}}}}}}}}}}}}}}}}} } /* h = f - g * Can overlap h with f or g. */ static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) { fe_sub_impl(h->v, f->v, g->v); } static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) { { const u32 x20 = in1[9]; { const u32 x21 = in1[8]; { const u32 x19 = in1[7]; { const u32 x17 = in1[6]; { const u32 x15 = in1[5]; { const u32 x13 = in1[4]; { const u32 x11 = in1[3]; { const u32 x9 = in1[2]; { const u32 x7 = in1[1]; { const u32 x5 = in1[0]; { const u32 x38 = in2[9]; { const u32 x39 = in2[8]; { const u32 x37 = in2[7]; { const u32 x35 = in2[6]; { const u32 x33 = in2[5]; { const u32 x31 = in2[4]; { const u32 x29 = in2[3]; { const u32 x27 = in2[2]; { const u32 x25 = in2[1]; { const u32 x23 = in2[0]; { u64 x40 = ((u64)x23 * x5); { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); { u64 x58 = ((u64)(0x2 * x38) * x20); { u64 x59 = (x48 + (x58 << 0x4)); { u64 x60 = (x59 + (x58 << 0x1)); { u64 x61 = (x60 + x58); { u64 x62 = (x47 + (x57 << 0x4)); { u64 x63 = (x62 + (x57 << 0x1)); { u64 x64 = (x63 + x57); { u64 x65 = (x46 + (x56 << 0x4)); { u64 x66 = (x65 + (x56 << 0x1)); { u64 x67 = (x66 + x56); { u64 x68 = (x45 + (x55 << 0x4)); { u64 x69 = (x68 + (x55 << 0x1)); { u64 x70 = (x69 + x55); { u64 x71 = (x44 + (x54 << 0x4)); { u64 x72 = (x71 + (x54 << 0x1)); { u64 x73 = (x72 + x54); { u64 x74 = (x43 + (x53 << 0x4)); { u64 x75 = (x74 + (x53 << 0x1)); { u64 x76 = (x75 + x53); { u64 x77 = (x42 + (x52 << 0x4)); { u64 x78 = (x77 + (x52 << 0x1)); { u64 x79 = (x78 + x52); { u64 x80 = (x41 + (x51 << 0x4)); { u64 x81 = (x80 + (x51 << 0x1)); { u64 x82 = (x81 + x51); { u64 x83 = (x40 + (x50 << 0x4)); { u64 x84 = (x83 + (x50 << 0x1)); { u64 x85 = (x84 + x50); { u64 x86 = (x85 >> 0x1a); { u32 x87 = ((u32)x85 & 0x3ffffff); { u64 x88 = (x86 + x82); { u64 x89 = (x88 >> 0x19); { u32 x90 = ((u32)x88 & 0x1ffffff); { u64 x91 = (x89 + x79); { u64 x92 = (x91 >> 0x1a); { u32 x93 = ((u32)x91 & 0x3ffffff); { u64 x94 = (x92 + x76); { u64 x95 = (x94 >> 0x19); { u32 x96 = ((u32)x94 & 0x1ffffff); { u64 x97 = (x95 + x73); { u64 x98 = (x97 >> 0x1a); { u32 x99 = ((u32)x97 & 0x3ffffff); { u64 x100 = (x98 + x70); { u64 x101 = (x100 >> 0x19); { u32 x102 = ((u32)x100 & 0x1ffffff); { u64 x103 = (x101 + x67); { u64 x104 = (x103 >> 0x1a); { u32 x105 = ((u32)x103 & 0x3ffffff); { u64 x106 = (x104 + x64); { u64 x107 = (x106 >> 0x19); { u32 x108 = ((u32)x106 & 0x1ffffff); { u64 x109 = (x107 + x61); { u64 x110 = (x109 >> 0x1a); { u32 x111 = ((u32)x109 & 0x3ffffff); { u64 x112 = (x110 + x49); { u64 x113 = (x112 >> 0x19); { u32 x114 = ((u32)x112 & 0x1ffffff); { u64 x115 = (x87 + (0x13 * x113)); { u32 x116 = (u32) (x115 >> 0x1a); { u32 x117 = ((u32)x115 & 0x3ffffff); { u32 x118 = (x116 + x90); { u32 x119 = (x118 >> 0x19); { u32 x120 = (x118 & 0x1ffffff); out[0] = x117; out[1] = x120; out[2] = (x119 + x93); out[3] = x96; out[4] = x99; out[5] = x102; out[6] = x105; out[7] = x108; out[8] = x111; out[9] = x114; }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} } static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) { fe_mul_impl(h->v, f->v, g->v); } static __always_inline void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) { fe_mul_impl(h->v, f->v, g->v); } static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10]) { { const u32 x17 = in1[9]; { const u32 x18 = in1[8]; { const u32 x16 = in1[7]; { const u32 x14 = in1[6]; { const u32 x12 = in1[5]; { const u32 x10 = in1[4]; { const u32 x8 = in1[3]; { const u32 x6 = in1[2]; { const u32 x4 = in1[1]; { const u32 x2 = in1[0]; { u64 x19 = ((u64)x2 * x2); { u64 x20 = ((u64)(0x2 * x2) * x4); { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6))); { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8))); { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10)); { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12))); { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12))); { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16))); { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12)))))); { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17))); { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17))))); { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17))); { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17)))))); { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17))); { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17))); { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17))); { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17)); { u64 x36 = ((u64)(0x2 * x18) * x17); { u64 x37 = ((u64)(0x2 * x17) * x17); { u64 x38 = (x27 + (x37 << 0x4)); { u64 x39 = (x38 + (x37 << 0x1)); { u64 x40 = (x39 + x37); { u64 x41 = (x26 + (x36 << 0x4)); { u64 x42 = (x41 + (x36 << 0x1)); { u64 x43 = (x42 + x36); { u64 x44 = (x25 + (x35 << 0x4)); { u64 x45 = (x44 + (x35 << 0x1)); { u64 x46 = (x45 + x35); { u64 x47 = (x24 + (x34 << 0x4)); { u64 x48 = (x47 + (x34 << 0x1)); { u64 x49 = (x48 + x34); { u64 x50 = (x23 + (x33 << 0x4)); { u64 x51 = (x50 + (x33 << 0x1)); { u64 x52 = (x51 + x33); { u64 x53 = (x22 + (x32 << 0x4)); { u64 x54 = (x53 + (x32 << 0x1)); { u64 x55 = (x54 + x32); { u64 x56 = (x21 + (x31 << 0x4)); { u64 x57 = (x56 + (x31 << 0x1)); { u64 x58 = (x57 + x31); { u64 x59 = (x20 + (x30 << 0x4)); { u64 x60 = (x59 + (x30 << 0x1)); { u64 x61 = (x60 + x30); { u64 x62 = (x19 + (x29 << 0x4)); { u64 x63 = (x62 + (x29 << 0x1)); { u64 x64 = (x63 + x29); { u64 x65 = (x64 >> 0x1a); { u32 x66 = ((u32)x64 & 0x3ffffff); { u64 x67 = (x65 + x61); { u64 x68 = (x67 >> 0x19); { u32 x69 = ((u32)x67 & 0x1ffffff); { u64 x70 = (x68 + x58); { u64 x71 = (x70 >> 0x1a); { u32 x72 = ((u32)x70 & 0x3ffffff); { u64 x73 = (x71 + x55); { u64 x74 = (x73 >> 0x19); { u32 x75 = ((u32)x73 & 0x1ffffff); { u64 x76 = (x74 + x52); { u64 x77 = (x76 >> 0x1a); { u32 x78 = ((u32)x76 & 0x3ffffff); { u64 x79 = (x77 + x49); { u64 x80 = (x79 >> 0x19); { u32 x81 = ((u32)x79 & 0x1ffffff); { u64 x82 = (x80 + x46); { u64 x83 = (x82 >> 0x1a); { u32 x84 = ((u32)x82 & 0x3ffffff); { u64 x85 = (x83 + x43); { u64 x86 = (x85 >> 0x19); { u32 x87 = ((u32)x85 & 0x1ffffff); { u64 x88 = (x86 + x40); { u64 x89 = (x88 >> 0x1a); { u32 x90 = ((u32)x88 & 0x3ffffff); { u64 x91 = (x89 + x28); { u64 x92 = (x91 >> 0x19); { u32 x93 = ((u32)x91 & 0x1ffffff); { u64 x94 = (x66 + (0x13 * x92)); { u32 x95 = (u32) (x94 >> 0x1a); { u32 x96 = ((u32)x94 & 0x3ffffff); { u32 x97 = (x95 + x69); { u32 x98 = (x97 >> 0x19); { u32 x99 = (x97 & 0x1ffffff); out[0] = x96; out[1] = x99; out[2] = (x98 + x72); out[3] = x75; out[4] = x78; out[5] = x81; out[6] = x84; out[7] = x87; out[8] = x90; out[9] = x93; }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} } static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) { fe_sqr_impl(h->v, f->v); } static __always_inline void fe_sq_tt(fe *h, const fe *f) { fe_sqr_impl(h->v, f->v); } static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) { fe t0; fe t1; fe t2; fe t3; int i; fe_sq_tl(&t0, z); fe_sq_tt(&t1, &t0); for (i = 1; i < 2; ++i) fe_sq_tt(&t1, &t1); fe_mul_tlt(&t1, z, &t1); fe_mul_ttt(&t0, &t0, &t1); fe_sq_tt(&t2, &t0); fe_mul_ttt(&t1, &t1, &t2); fe_sq_tt(&t2, &t1); for (i = 1; i < 5; ++i) fe_sq_tt(&t2, &t2); fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t2, &t1); for (i = 1; i < 10; ++i) fe_sq_tt(&t2, &t2); fe_mul_ttt(&t2, &t2, &t1); fe_sq_tt(&t3, &t2); for (i = 1; i < 20; ++i) fe_sq_tt(&t3, &t3); fe_mul_ttt(&t2, &t3, &t2); fe_sq_tt(&t2, &t2); for (i = 1; i < 10; ++i) fe_sq_tt(&t2, &t2); fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t2, &t1); for (i = 1; i < 50; ++i) fe_sq_tt(&t2, &t2); fe_mul_ttt(&t2, &t2, &t1); fe_sq_tt(&t3, &t2); for (i = 1; i < 100; ++i) fe_sq_tt(&t3, &t3); fe_mul_ttt(&t2, &t3, &t2); fe_sq_tt(&t2, &t2); for (i = 1; i < 50; ++i) fe_sq_tt(&t2, &t2); fe_mul_ttt(&t1, &t2, &t1); fe_sq_tt(&t1, &t1); for (i = 1; i < 5; ++i) fe_sq_tt(&t1, &t1); fe_mul_ttt(out, &t1, &t0); } static __always_inline void fe_invert(fe *out, const fe *z) { fe_loose l; fe_copy_lt(&l, z); fe_loose_invert(out, &l); } /* Replace (f,g) with (g,f) if b == 1; * replace (f,g) with (f,g) if b == 0. * * Preconditions: b in {0,1} */ static noinline void fe_cswap(fe *f, fe *g, unsigned int b) { unsigned i; b = 0 - b; for (i = 0; i < 10; i++) { u32 x = f->v[i] ^ g->v[i]; x &= b; f->v[i] ^= x; g->v[i] ^= x; } } /* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10]) { { const u32 x20 = in1[9]; { const u32 x21 = in1[8]; { const u32 x19 = in1[7]; { const u32 x17 = in1[6]; { const u32 x15 = in1[5]; { const u32 x13 = in1[4]; { const u32 x11 = in1[3]; { const u32 x9 = in1[2]; { const u32 x7 = in1[1]; { const u32 x5 = in1[0]; { const u32 x38 = 0; { const u32 x39 = 0; { const u32 x37 = 0; { const u32 x35 = 0; { const u32 x33 = 0; { const u32 x31 = 0; { const u32 x29 = 0; { const u32 x27 = 0; { const u32 x25 = 0; { const u32 x23 = 121666; { u64 x40 = ((u64)x23 * x5); { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); { u64 x58 = ((u64)(0x2 * x38) * x20); { u64 x59 = (x48 + (x58 << 0x4)); { u64 x60 = (x59 + (x58 << 0x1)); { u64 x61 = (x60 + x58); { u64 x62 = (x47 + (x57 << 0x4)); { u64 x63 = (x62 + (x57 << 0x1)); { u64 x64 = (x63 + x57); { u64 x65 = (x46 + (x56 << 0x4)); { u64 x66 = (x65 + (x56 << 0x1)); { u64 x67 = (x66 + x56); { u64 x68 = (x45 + (x55 << 0x4)); { u64 x69 = (x68 + (x55 << 0x1)); { u64 x70 = (x69 + x55); { u64 x71 = (x44 + (x54 << 0x4)); { u64 x72 = (x71 + (x54 << 0x1)); { u64 x73 = (x72 + x54); { u64 x74 = (x43 + (x53 << 0x4)); { u64 x75 = (x74 + (x53 << 0x1)); { u64 x76 = (x75 + x53); { u64 x77 = (x42 + (x52 << 0x4)); { u64 x78 = (x77 + (x52 << 0x1)); { u64 x79 = (x78 + x52); { u64 x80 = (x41 + (x51 << 0x4)); { u64 x81 = (x80 + (x51 << 0x1)); { u64 x82 = (x81 + x51); { u64 x83 = (x40 + (x50 << 0x4)); { u64 x84 = (x83 + (x50 << 0x1)); { u64 x85 = (x84 + x50); { u64 x86 = (x85 >> 0x1a); { u32 x87 = ((u32)x85 & 0x3ffffff); { u64 x88 = (x86 + x82); { u64 x89 = (x88 >> 0x19); { u32 x90 = ((u32)x88 & 0x1ffffff); { u64 x91 = (x89 + x79); { u64 x92 = (x91 >> 0x1a); { u32 x93 = ((u32)x91 & 0x3ffffff); { u64 x94 = (x92 + x76); { u64 x95 = (x94 >> 0x19); { u32 x96 = ((u32)x94 & 0x1ffffff); { u64 x97 = (x95 + x73); { u64 x98 = (x97 >> 0x1a); { u32 x99 = ((u32)x97 & 0x3ffffff); { u64 x100 = (x98 + x70); { u64 x101 = (x100 >> 0x19); { u32 x102 = ((u32)x100 & 0x1ffffff); { u64 x103 = (x101 + x67); { u64 x104 = (x103 >> 0x1a); { u32 x105 = ((u32)x103 & 0x3ffffff); { u64 x106 = (x104 + x64); { u64 x107 = (x106 >> 0x19); { u32 x108 = ((u32)x106 & 0x1ffffff); { u64 x109 = (x107 + x61); { u64 x110 = (x109 >> 0x1a); { u32 x111 = ((u32)x109 & 0x3ffffff); { u64 x112 = (x110 + x49); { u64 x113 = (x112 >> 0x19); { u32 x114 = ((u32)x112 & 0x1ffffff); { u64 x115 = (x87 + (0x13 * x113)); { u32 x116 = (u32) (x115 >> 0x1a); { u32 x117 = ((u32)x115 & 0x3ffffff); { u32 x118 = (x116 + x90); { u32 x119 = (x118 >> 0x19); { u32 x120 = (x118 & 0x1ffffff); out[0] = x117; out[1] = x120; out[2] = (x119 + x93); out[3] = x96; out[4] = x99; out[5] = x102; out[6] = x105; out[7] = x108; out[8] = x111; out[9] = x114; }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} } static __always_inline void fe_mul121666(fe *h, const fe_loose *f) { fe_mul_121666_impl(h->v, f->v); } void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], const u8 scalar[CURVE25519_KEY_SIZE], const u8 point[CURVE25519_KEY_SIZE]) { fe x1, x2, z2, x3, z3; fe_loose x2l, z2l, x3l; unsigned swap = 0; int pos; u8 e[32]; memcpy(e, scalar, 32); curve25519_clamp_secret(e); /* The following implementation was transcribed to Coq and proven to * correspond to unary scalar multiplication in affine coordinates given * that x1 != 0 is the x coordinate of some point on the curve. It was * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was * quantified over the underlying field, so it applies to Curve25519 * itself and the quadratic twist of Curve25519. It was not proven in * Coq that prime-field arithmetic correctly simulates extension-field * arithmetic on prime-field values. The decoding of the byte array * representation of e was not considered. * * Specification of Montgomery curves in affine coordinates: * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27> * * Proof that these form a group that is isomorphic to a Weierstrass * curve: * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35> * * Coq transcription and correctness proof of the loop * (where scalarbits=255): * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118> * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278> * preconditions: 0 <= e < 2^255 (not necessarily e < order), * fe_invert(0) = 0 */ fe_frombytes(&x1, point); fe_1(&x2); fe_0(&z2); fe_copy(&x3, &x1); fe_1(&z3); for (pos = 254; pos >= 0; --pos) { fe tmp0, tmp1; fe_loose tmp0l, tmp1l; /* loop invariant as of right before the test, for the case * where x1 != 0: * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 * is nonzero * let r := e >> (pos+1) in the following equalities of * projective points: * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) * x1 is the nonzero x coordinate of the nonzero * point (r*P-(r+1)*P) */ unsigned b = 1 & (e[pos / 8] >> (pos & 7)); swap ^= b; fe_cswap(&x2, &x3, swap); fe_cswap(&z2, &z3, swap); swap = b; /* Coq transcription of ladderstep formula (called from * transcribed loop): * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89> * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131> * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217> * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147> */ fe_sub(&tmp0l, &x3, &z3); fe_sub(&tmp1l, &x2, &z2); fe_add(&x2l, &x2, &z2); fe_add(&z2l, &x3, &z3); fe_mul_tll(&z3, &tmp0l, &x2l); fe_mul_tll(&z2, &z2l, &tmp1l); fe_sq_tl(&tmp0, &tmp1l); fe_sq_tl(&tmp1, &x2l); fe_add(&x3l, &z3, &z2); fe_sub(&z2l, &z3, &z2); fe_mul_ttt(&x2, &tmp1, &tmp0); fe_sub(&tmp1l, &tmp1, &tmp0); fe_sq_tl(&z2, &z2l); fe_mul121666(&z3, &tmp1l); fe_sq_tl(&x3, &x3l); fe_add(&tmp0l, &tmp0, &z3); fe_mul_ttt(&z3, &x1, &z2); fe_mul_tll(&z2, &tmp1l, &tmp0l); } /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) * else (x2, z2) */ fe_cswap(&x2, &x3, swap); fe_cswap(&z2, &z3, swap); fe_invert(&z2, &z2); fe_mul_ttt(&x2, &x2, &z2); fe_tobytes(out, &x2); memzero_explicit(&x1, sizeof(x1)); memzero_explicit(&x2, sizeof(x2)); memzero_explicit(&z2, sizeof(z2)); memzero_explicit(&x3, sizeof(x3)); memzero_explicit(&z3, sizeof(z3)); memzero_explicit(&x2l, sizeof(x2l)); memzero_explicit(&z2l, sizeof(z2l)); memzero_explicit(&x3l, sizeof(x3l)); memzero_explicit(&e, sizeof(e)); }
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Copyright 2017-2022 Toradex */ /* Colibri AD0 to AD3 */ &adc1 { status = "okay"; }; /* Colibri SSP */ &ecspi3 { cs-gpios = < &gpio4 11 GPIO_ACTIVE_LOW /* SODIMM 86 / regular SSPFRM as UNO_SPI_CS or */ &gpio4 23 GPIO_ACTIVE_LOW /* SODIMM 65 / already muxed pinctrl_gpio2 as SPI_CE0_N */ &gpio4 22 GPIO_ACTIVE_LOW /* SODIMM 85 / already muxed pinctrl_gpio2 as SPI_CE1_N */ >; status = "okay"; }; /* Colibri Fast Ethernet */ &fec1 { status = "okay"; }; /* Colibri I2C: I2C3_SDA/SCL on SODIMM 194/196 */ &i2c4 { status = "okay"; }; /* Colibri PWM<A> */ &pwm1 { status = "okay"; }; /* Colibri PWM<B> */ &pwm2 { status = "okay"; }; /* Colibri PWM<C> */ &pwm3 { status = "okay"; }; /* Colibri PWM<D> */ &pwm4 { status = "okay"; }; /* M41T0M6 real time clock */ &rtc { status = "okay"; }; /* Colibri UART_A */ &uart1 { status = "okay"; }; /* Colibri UART_B */ &uart2 { status = "okay"; }; /* Colibri UART_C */ &uart3 { status = "okay"; }; /* Colibri USBC */ &usbotg1 { disable-over-current; status = "okay"; }; /* Colibri MMC/SD */ &usdhc1 { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for CS4231 sound chips found on Sparcs. * Copyright (C) 2002, 2008 David S. Miller <[email protected]> * * Based entirely upon drivers/sbus/audio/cs4231.c which is: * Copyright (C) 1996, 1997, 1998 Derrick J Brashear ([email protected]) * and also sound/isa/cs423x/cs4231_lib.c which is: * Copyright (c) by Jaroslav Kysela <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/control.h> #include <sound/timer.h> #include <sound/initval.h> #include <sound/pcm_params.h> #ifdef CONFIG_SBUS #define SBUS_SUPPORT #endif #if defined(CONFIG_PCI) && defined(CONFIG_SPARC64) #define EBUS_SUPPORT #include <linux/pci.h> #include <asm/ebus_dma.h> #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ /* Enable this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Sun CS4231 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Sun CS4231 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Sun CS4231 soundcard."); MODULE_AUTHOR("Jaroslav Kysela, Derrick J. Brashear and David S. Miller"); MODULE_DESCRIPTION("Sun CS4231"); MODULE_LICENSE("GPL"); #ifdef SBUS_SUPPORT struct sbus_dma_info { spinlock_t lock; /* DMA access lock */ int dir; void __iomem *regs; }; #endif struct snd_cs4231; struct cs4231_dma_control { void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); void (*enable)(struct cs4231_dma_control *dma_cont, int on); int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); unsigned int (*address)(struct cs4231_dma_control *dma_cont); #ifdef EBUS_SUPPORT struct ebus_dma_info ebus_info; #endif #ifdef SBUS_SUPPORT struct sbus_dma_info sbus_info; #endif }; struct snd_cs4231 { spinlock_t lock; /* registers access lock */ void __iomem *port; struct cs4231_dma_control p_dma; struct cs4231_dma_control c_dma; u32 flags; #define CS4231_FLAG_EBUS 0x00000001 #define CS4231_FLAG_PLAYBACK 0x00000002 #define CS4231_FLAG_CAPTURE 0x00000004 struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *playback_substream; unsigned int p_periods_sent; struct snd_pcm_substream *capture_substream; unsigned int c_periods_sent; struct snd_timer *timer; unsigned short mode; #define CS4231_MODE_NONE 0x0000 #define CS4231_MODE_PLAY 0x0001 #define CS4231_MODE_RECORD 0x0002 #define CS4231_MODE_TIMER 0x0004 #define CS4231_MODE_OPEN (CS4231_MODE_PLAY | CS4231_MODE_RECORD | \ CS4231_MODE_TIMER) unsigned char image[32]; /* registers image */ int mce_bit; int calibrate_mute; struct mutex mce_mutex; /* mutex for mce register */ struct mutex open_mutex; /* mutex for ALSA open/close */ struct platform_device *op; unsigned int irq[2]; unsigned int regs_size; struct snd_cs4231 *next; }; /* Eventually we can use sound/isa/cs423x/cs4231_lib.c directly, but for * now.... -DaveM */ /* IO ports */ #include <sound/cs4231-regs.h> /* XXX offsets are different than PC ISA chips... */ #define CS4231U(chip, x) ((chip)->port + ((c_d_c_CS4231##x) << 2)) /* SBUS DMA register defines. */ #define APCCSR 0x10UL /* APC DMA CSR */ #define APCCVA 0x20UL /* APC Capture DMA Address */ #define APCCC 0x24UL /* APC Capture Count */ #define APCCNVA 0x28UL /* APC Capture DMA Next Address */ #define APCCNC 0x2cUL /* APC Capture Next Count */ #define APCPVA 0x30UL /* APC Play DMA Address */ #define APCPC 0x34UL /* APC Play Count */ #define APCPNVA 0x38UL /* APC Play DMA Next Address */ #define APCPNC 0x3cUL /* APC Play Next Count */ /* Defines for SBUS DMA-routines */ #define APCVA 0x0UL /* APC DMA Address */ #define APCC 0x4UL /* APC Count */ #define APCNVA 0x8UL /* APC DMA Next Address */ #define APCNC 0xcUL /* APC Next Count */ #define APC_PLAY 0x30UL /* Play registers start at 0x30 */ #define APC_RECORD 0x20UL /* Record registers start at 0x20 */ /* APCCSR bits */ #define APC_INT_PENDING 0x800000 /* Interrupt Pending */ #define APC_PLAY_INT 0x400000 /* Playback interrupt */ #define APC_CAPT_INT 0x200000 /* Capture interrupt */ #define APC_GENL_INT 0x100000 /* General interrupt */ #define APC_XINT_ENA 0x80000 /* General ext int. enable */ #define APC_XINT_PLAY 0x40000 /* Playback ext intr */ #define APC_XINT_CAPT 0x20000 /* Capture ext intr */ #define APC_XINT_GENL 0x10000 /* Error ext intr */ #define APC_XINT_EMPT 0x8000 /* Pipe empty interrupt (0 write to pva) */ #define APC_XINT_PEMP 0x4000 /* Play pipe empty (pva and pnva not set) */ #define APC_XINT_PNVA 0x2000 /* Playback NVA dirty */ #define APC_XINT_PENA 0x1000 /* play pipe empty Int enable */ #define APC_XINT_COVF 0x800 /* Cap data dropped on floor */ #define APC_XINT_CNVA 0x400 /* Capture NVA dirty */ #define APC_XINT_CEMP 0x200 /* Capture pipe empty (cva and cnva not set) */ #define APC_XINT_CENA 0x100 /* Cap. pipe empty int enable */ #define APC_PPAUSE 0x80 /* Pause the play DMA */ #define APC_CPAUSE 0x40 /* Pause the capture DMA */ #define APC_CDC_RESET 0x20 /* CODEC RESET */ #define APC_PDMA_READY 0x08 /* Play DMA Go */ #define APC_CDMA_READY 0x04 /* Capture DMA Go */ #define APC_CHIP_RESET 0x01 /* Reset the chip */ /* EBUS DMA register offsets */ #define EBDMA_CSR 0x00UL /* Control/Status */ #define EBDMA_ADDR 0x04UL /* DMA Address */ #define EBDMA_COUNT 0x08UL /* DMA Count */ /* * Some variables */ static const unsigned char freq_bits[14] = { /* 5510 */ 0x00 | CS4231_XTAL2, /* 6620 */ 0x0E | CS4231_XTAL2, /* 8000 */ 0x00 | CS4231_XTAL1, /* 9600 */ 0x0E | CS4231_XTAL1, /* 11025 */ 0x02 | CS4231_XTAL2, /* 16000 */ 0x02 | CS4231_XTAL1, /* 18900 */ 0x04 | CS4231_XTAL2, /* 22050 */ 0x06 | CS4231_XTAL2, /* 27042 */ 0x04 | CS4231_XTAL1, /* 32000 */ 0x06 | CS4231_XTAL1, /* 33075 */ 0x0C | CS4231_XTAL2, /* 37800 */ 0x08 | CS4231_XTAL2, /* 44100 */ 0x0A | CS4231_XTAL2, /* 48000 */ 0x0C | CS4231_XTAL1 }; static const unsigned int rates[14] = { 5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050, 27042, 32000, 33075, 37800, 44100, 48000 }; static const struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, }; static int snd_cs4231_xrate(struct snd_pcm_runtime *runtime) { return snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); } static const unsigned char snd_cs4231_original_image[32] = { 0x00, /* 00/00 - lic */ 0x00, /* 01/01 - ric */ 0x9f, /* 02/02 - la1ic */ 0x9f, /* 03/03 - ra1ic */ 0x9f, /* 04/04 - la2ic */ 0x9f, /* 05/05 - ra2ic */ 0xbf, /* 06/06 - loc */ 0xbf, /* 07/07 - roc */ 0x20, /* 08/08 - pdfr */ CS4231_AUTOCALIB, /* 09/09 - ic */ 0x00, /* 0a/10 - pc */ 0x00, /* 0b/11 - ti */ CS4231_MODE2, /* 0c/12 - mi */ 0x00, /* 0d/13 - lbc */ 0x00, /* 0e/14 - pbru */ 0x00, /* 0f/15 - pbrl */ 0x80, /* 10/16 - afei */ 0x01, /* 11/17 - afeii */ 0x9f, /* 12/18 - llic */ 0x9f, /* 13/19 - rlic */ 0x00, /* 14/20 - tlb */ 0x00, /* 15/21 - thb */ 0x00, /* 16/22 - la3mic/reserved */ 0x00, /* 17/23 - ra3mic/reserved */ 0x00, /* 18/24 - afs */ 0x00, /* 19/25 - lamoc/version */ 0x00, /* 1a/26 - mioc */ 0x00, /* 1b/27 - ramoc/reserved */ 0x20, /* 1c/28 - cdfr */ 0x00, /* 1d/29 - res4 */ 0x00, /* 1e/30 - cbru */ 0x00, /* 1f/31 - cbrl */ }; static u8 __cs4231_readb(struct snd_cs4231 *cp, void __iomem *reg_addr) { if (cp->flags & CS4231_FLAG_EBUS) return readb(reg_addr); else return sbus_readb(reg_addr); } static void __cs4231_writeb(struct snd_cs4231 *cp, u8 val, void __iomem *reg_addr) { if (cp->flags & CS4231_FLAG_EBUS) return writeb(val, reg_addr); else return sbus_writeb(val, reg_addr); } /* * Basic I/O functions */ static void snd_cs4231_ready(struct snd_cs4231 *chip) { int timeout; for (timeout = 250; timeout > 0; timeout--) { int val = __cs4231_readb(chip, CS4231U(chip, REGSEL)); if ((val & CS4231_INIT) == 0) break; udelay(100); } } static void snd_cs4231_dout(struct snd_cs4231 *chip, unsigned char reg, unsigned char value) { snd_cs4231_ready(chip); #ifdef CONFIG_SND_DEBUG if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) dev_dbg(chip->card->dev, "out: auto calibration time out - reg = 0x%x, value = 0x%x\n", reg, value); #endif __cs4231_writeb(chip, chip->mce_bit | reg, CS4231U(chip, REGSEL)); wmb(); __cs4231_writeb(chip, value, CS4231U(chip, REG)); mb(); } static inline void snd_cs4231_outm(struct snd_cs4231 *chip, unsigned char reg, unsigned char mask, unsigned char value) { unsigned char tmp = (chip->image[reg] & mask) | value; chip->image[reg] = tmp; if (!chip->calibrate_mute) snd_cs4231_dout(chip, reg, tmp); } static void snd_cs4231_out(struct snd_cs4231 *chip, unsigned char reg, unsigned char value) { snd_cs4231_dout(chip, reg, value); chip->image[reg] = value; mb(); } static unsigned char snd_cs4231_in(struct snd_cs4231 *chip, unsigned char reg) { snd_cs4231_ready(chip); #ifdef CONFIG_SND_DEBUG if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) dev_dbg(chip->card->dev, "in: auto calibration time out - reg = 0x%x\n", reg); #endif __cs4231_writeb(chip, chip->mce_bit | reg, CS4231U(chip, REGSEL)); mb(); return __cs4231_readb(chip, CS4231U(chip, REG)); } /* * CS4231 detection / MCE routines */ static void snd_cs4231_busy_wait(struct snd_cs4231 *chip) { int timeout; /* looks like this sequence is proper for CS4231A chip (GUS MAX) */ for (timeout = 5; timeout > 0; timeout--) __cs4231_readb(chip, CS4231U(chip, REGSEL)); /* end of cleanup sequence */ for (timeout = 500; timeout > 0; timeout--) { int val = __cs4231_readb(chip, CS4231U(chip, REGSEL)); if ((val & CS4231_INIT) == 0) break; msleep(1); } } static void snd_cs4231_mce_up(struct snd_cs4231 *chip) { unsigned long flags; int timeout; spin_lock_irqsave(&chip->lock, flags); snd_cs4231_ready(chip); #ifdef CONFIG_SND_DEBUG if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) dev_dbg(chip->card->dev, "mce_up - auto calibration time out (0)\n"); #endif chip->mce_bit |= CS4231_MCE; timeout = __cs4231_readb(chip, CS4231U(chip, REGSEL)); if (timeout == 0x80) dev_dbg(chip->card->dev, "mce_up [%p]: serious init problem - codec still busy\n", chip->port); if (!(timeout & CS4231_MCE)) __cs4231_writeb(chip, chip->mce_bit | (timeout & 0x1f), CS4231U(chip, REGSEL)); spin_unlock_irqrestore(&chip->lock, flags); } static void snd_cs4231_mce_down(struct snd_cs4231 *chip) { unsigned long flags, timeout; int reg; snd_cs4231_busy_wait(chip); spin_lock_irqsave(&chip->lock, flags); #ifdef CONFIG_SND_DEBUG if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) dev_dbg(chip->card->dev, "mce_down [%p] - auto calibration time out (0)\n", CS4231U(chip, REGSEL)); #endif chip->mce_bit &= ~CS4231_MCE; reg = __cs4231_readb(chip, CS4231U(chip, REGSEL)); __cs4231_writeb(chip, chip->mce_bit | (reg & 0x1f), CS4231U(chip, REGSEL)); if (reg == 0x80) dev_dbg(chip->card->dev, "mce_down [%p]: serious init problem - codec still busy\n", chip->port); if ((reg & CS4231_MCE) == 0) { spin_unlock_irqrestore(&chip->lock, flags); return; } /* * Wait for auto-calibration (AC) process to finish, i.e. ACI to go low. */ timeout = jiffies + msecs_to_jiffies(250); do { spin_unlock_irqrestore(&chip->lock, flags); msleep(1); spin_lock_irqsave(&chip->lock, flags); reg = snd_cs4231_in(chip, CS4231_TEST_INIT); reg &= CS4231_CALIB_IN_PROGRESS; } while (reg && time_before(jiffies, timeout)); spin_unlock_irqrestore(&chip->lock, flags); if (reg) dev_err(chip->card->dev, "mce_down - auto calibration time out (2)\n"); } static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont, struct snd_pcm_substream *substream, unsigned int *periods_sent) { struct snd_pcm_runtime *runtime = substream->runtime; while (1) { unsigned int period_size = snd_pcm_lib_period_bytes(substream); unsigned int offset = period_size * (*periods_sent); if (WARN_ON(period_size >= (1 << 24))) return; if (dma_cont->request(dma_cont, runtime->dma_addr + offset, period_size)) return; (*periods_sent) = ((*periods_sent) + 1) % runtime->periods; } } static void cs4231_dma_trigger(struct snd_pcm_substream *substream, unsigned int what, int on) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct cs4231_dma_control *dma_cont; if (what & CS4231_PLAYBACK_ENABLE) { dma_cont = &chip->p_dma; if (on) { dma_cont->prepare(dma_cont, 0); dma_cont->enable(dma_cont, 1); snd_cs4231_advance_dma(dma_cont, chip->playback_substream, &chip->p_periods_sent); } else { dma_cont->enable(dma_cont, 0); } } if (what & CS4231_RECORD_ENABLE) { dma_cont = &chip->c_dma; if (on) { dma_cont->prepare(dma_cont, 1); dma_cont->enable(dma_cont, 1); snd_cs4231_advance_dma(dma_cont, chip->capture_substream, &chip->c_periods_sent); } else { dma_cont->enable(dma_cont, 0); } } } static int snd_cs4231_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); int result = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_STOP: { unsigned int what = 0; struct snd_pcm_substream *s; unsigned long flags; snd_pcm_group_for_each_entry(s, substream) { if (s == chip->playback_substream) { what |= CS4231_PLAYBACK_ENABLE; snd_pcm_trigger_done(s, substream); } else if (s == chip->capture_substream) { what |= CS4231_RECORD_ENABLE; snd_pcm_trigger_done(s, substream); } } spin_lock_irqsave(&chip->lock, flags); if (cmd == SNDRV_PCM_TRIGGER_START) { cs4231_dma_trigger(substream, what, 1); chip->image[CS4231_IFACE_CTRL] |= what; } else { cs4231_dma_trigger(substream, what, 0); chip->image[CS4231_IFACE_CTRL] &= ~what; } snd_cs4231_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock_irqrestore(&chip->lock, flags); break; } default: result = -EINVAL; break; } return result; } /* * CODEC I/O */ static unsigned char snd_cs4231_get_rate(unsigned int rate) { int i; for (i = 0; i < 14; i++) if (rate == rates[i]) return freq_bits[i]; return freq_bits[13]; } static unsigned char snd_cs4231_get_format(struct snd_cs4231 *chip, int format, int channels) { unsigned char rformat; rformat = CS4231_LINEAR_8; switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: rformat = CS4231_ULAW_8; break; case SNDRV_PCM_FORMAT_A_LAW: rformat = CS4231_ALAW_8; break; case SNDRV_PCM_FORMAT_S16_LE: rformat = CS4231_LINEAR_16; break; case SNDRV_PCM_FORMAT_S16_BE: rformat = CS4231_LINEAR_16_BIG; break; case SNDRV_PCM_FORMAT_IMA_ADPCM: rformat = CS4231_ADPCM_16; break; } if (channels > 1) rformat |= CS4231_STEREO; return rformat; } static void snd_cs4231_calibrate_mute(struct snd_cs4231 *chip, int mute) { unsigned long flags; mute = mute ? 1 : 0; spin_lock_irqsave(&chip->lock, flags); if (chip->calibrate_mute == mute) { spin_unlock_irqrestore(&chip->lock, flags); return; } if (!mute) { snd_cs4231_dout(chip, CS4231_LEFT_INPUT, chip->image[CS4231_LEFT_INPUT]); snd_cs4231_dout(chip, CS4231_RIGHT_INPUT, chip->image[CS4231_RIGHT_INPUT]); snd_cs4231_dout(chip, CS4231_LOOPBACK, chip->image[CS4231_LOOPBACK]); } snd_cs4231_dout(chip, CS4231_AUX1_LEFT_INPUT, mute ? 0x80 : chip->image[CS4231_AUX1_LEFT_INPUT]); snd_cs4231_dout(chip, CS4231_AUX1_RIGHT_INPUT, mute ? 0x80 : chip->image[CS4231_AUX1_RIGHT_INPUT]); snd_cs4231_dout(chip, CS4231_AUX2_LEFT_INPUT, mute ? 0x80 : chip->image[CS4231_AUX2_LEFT_INPUT]); snd_cs4231_dout(chip, CS4231_AUX2_RIGHT_INPUT, mute ? 0x80 : chip->image[CS4231_AUX2_RIGHT_INPUT]); snd_cs4231_dout(chip, CS4231_LEFT_OUTPUT, mute ? 0x80 : chip->image[CS4231_LEFT_OUTPUT]); snd_cs4231_dout(chip, CS4231_RIGHT_OUTPUT, mute ? 0x80 : chip->image[CS4231_RIGHT_OUTPUT]); snd_cs4231_dout(chip, CS4231_LEFT_LINE_IN, mute ? 0x80 : chip->image[CS4231_LEFT_LINE_IN]); snd_cs4231_dout(chip, CS4231_RIGHT_LINE_IN, mute ? 0x80 : chip->image[CS4231_RIGHT_LINE_IN]); snd_cs4231_dout(chip, CS4231_MONO_CTRL, mute ? 0xc0 : chip->image[CS4231_MONO_CTRL]); chip->calibrate_mute = mute; spin_unlock_irqrestore(&chip->lock, flags); } static void snd_cs4231_playback_format(struct snd_cs4231 *chip, struct snd_pcm_hw_params *params, unsigned char pdfr) { unsigned long flags; mutex_lock(&chip->mce_mutex); snd_cs4231_calibrate_mute(chip, 1); snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT, (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) ? (pdfr & 0xf0) | (chip->image[CS4231_REC_FORMAT] & 0x0f) : pdfr); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); snd_cs4231_calibrate_mute(chip, 0); mutex_unlock(&chip->mce_mutex); } static void snd_cs4231_capture_format(struct snd_cs4231 *chip, struct snd_pcm_hw_params *params, unsigned char cdfr) { unsigned long flags; mutex_lock(&chip->mce_mutex); snd_cs4231_calibrate_mute(chip, 1); snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) { snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT, ((chip->image[CS4231_PLAYBK_FORMAT]) & 0xf0) | (cdfr & 0x0f)); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); } snd_cs4231_out(chip, CS4231_REC_FORMAT, cdfr); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); snd_cs4231_calibrate_mute(chip, 0); mutex_unlock(&chip->mce_mutex); } /* * Timer interface */ static unsigned long snd_cs4231_timer_resolution(struct snd_timer *timer) { struct snd_cs4231 *chip = snd_timer_chip(timer); return chip->image[CS4231_PLAYBK_FORMAT] & 1 ? 9969 : 9920; } static int snd_cs4231_timer_start(struct snd_timer *timer) { unsigned long flags; unsigned int ticks; struct snd_cs4231 *chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->lock, flags); ticks = timer->sticks; if ((chip->image[CS4231_ALT_FEATURE_1] & CS4231_TIMER_ENABLE) == 0 || (unsigned char)(ticks >> 8) != chip->image[CS4231_TIMER_HIGH] || (unsigned char)ticks != chip->image[CS4231_TIMER_LOW]) { snd_cs4231_out(chip, CS4231_TIMER_HIGH, chip->image[CS4231_TIMER_HIGH] = (unsigned char) (ticks >> 8)); snd_cs4231_out(chip, CS4231_TIMER_LOW, chip->image[CS4231_TIMER_LOW] = (unsigned char) ticks); snd_cs4231_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1] | CS4231_TIMER_ENABLE); } spin_unlock_irqrestore(&chip->lock, flags); return 0; } static int snd_cs4231_timer_stop(struct snd_timer *timer) { unsigned long flags; struct snd_cs4231 *chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->lock, flags); chip->image[CS4231_ALT_FEATURE_1] &= ~CS4231_TIMER_ENABLE; snd_cs4231_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1]); spin_unlock_irqrestore(&chip->lock, flags); return 0; } static void snd_cs4231_init(struct snd_cs4231 *chip) { unsigned long flags; snd_cs4231_mce_down(chip); #ifdef SNDRV_DEBUG_MCE pr_debug("init: (1)\n"); #endif snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO | CS4231_CALIB_MODE); chip->image[CS4231_IFACE_CTRL] |= CS4231_AUTOCALIB; snd_cs4231_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); #ifdef SNDRV_DEBUG_MCE pr_debug("init: (2)\n"); #endif snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_ALT_FEATURE_1, chip->image[CS4231_ALT_FEATURE_1]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); #ifdef SNDRV_DEBUG_MCE pr_debug("init: (3) - afei = 0x%x\n", chip->image[CS4231_ALT_FEATURE_1]); #endif spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_ALT_FEATURE_2, chip->image[CS4231_ALT_FEATURE_2]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_PLAYBK_FORMAT, chip->image[CS4231_PLAYBK_FORMAT]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); #ifdef SNDRV_DEBUG_MCE pr_debug("init: (4)\n"); #endif snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_REC_FORMAT, chip->image[CS4231_REC_FORMAT]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); #ifdef SNDRV_DEBUG_MCE pr_debug("init: (5)\n"); #endif } static int snd_cs4231_open(struct snd_cs4231 *chip, unsigned int mode) { unsigned long flags; mutex_lock(&chip->open_mutex); if ((chip->mode & mode)) { mutex_unlock(&chip->open_mutex); return -EAGAIN; } if (chip->mode & CS4231_MODE_OPEN) { chip->mode |= mode; mutex_unlock(&chip->open_mutex); return 0; } /* ok. now enable and ack CODEC IRQ */ spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ | CS4231_RECORD_IRQ | CS4231_TIMER_IRQ); snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0); __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ snd_cs4231_out(chip, CS4231_IRQ_STATUS, CS4231_PLAYBACK_IRQ | CS4231_RECORD_IRQ | CS4231_TIMER_IRQ); snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0); spin_unlock_irqrestore(&chip->lock, flags); chip->mode = mode; mutex_unlock(&chip->open_mutex); return 0; } static void snd_cs4231_close(struct snd_cs4231 *chip, unsigned int mode) { unsigned long flags; mutex_lock(&chip->open_mutex); chip->mode &= ~mode; if (chip->mode & CS4231_MODE_OPEN) { mutex_unlock(&chip->open_mutex); return; } snd_cs4231_calibrate_mute(chip, 1); /* disable IRQ */ spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0); __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ /* now disable record & playback */ if (chip->image[CS4231_IFACE_CTRL] & (CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO)) { spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_up(chip); spin_lock_irqsave(&chip->lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO | CS4231_RECORD_ENABLE | CS4231_RECORD_PIO); snd_cs4231_out(chip, CS4231_IFACE_CTRL, chip->image[CS4231_IFACE_CTRL]); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_down(chip); spin_lock_irqsave(&chip->lock, flags); } /* clear IRQ again */ snd_cs4231_out(chip, CS4231_IRQ_STATUS, 0); __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); /* clear IRQ */ spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_calibrate_mute(chip, 0); chip->mode = 0; mutex_unlock(&chip->open_mutex); } /* * timer open/close */ static int snd_cs4231_timer_open(struct snd_timer *timer) { struct snd_cs4231 *chip = snd_timer_chip(timer); snd_cs4231_open(chip, CS4231_MODE_TIMER); return 0; } static int snd_cs4231_timer_close(struct snd_timer *timer) { struct snd_cs4231 *chip = snd_timer_chip(timer); snd_cs4231_close(chip, CS4231_MODE_TIMER); return 0; } static const struct snd_timer_hardware snd_cs4231_timer_table = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 9945, .ticks = 65535, .open = snd_cs4231_timer_open, .close = snd_cs4231_timer_close, .c_resolution = snd_cs4231_timer_resolution, .start = snd_cs4231_timer_start, .stop = snd_cs4231_timer_stop, }; /* * ok.. exported functions.. */ static int snd_cs4231_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); unsigned char new_pdfr; new_pdfr = snd_cs4231_get_format(chip, params_format(hw_params), params_channels(hw_params)) | snd_cs4231_get_rate(params_rate(hw_params)); snd_cs4231_playback_format(chip, hw_params, new_pdfr); return 0; } static int snd_cs4231_playback_prepare(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned long flags; int ret = 0; spin_lock_irqsave(&chip->lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_PLAYBACK_ENABLE | CS4231_PLAYBACK_PIO); if (WARN_ON(runtime->period_size > 0xffff + 1)) { ret = -EINVAL; goto out; } chip->p_periods_sent = 0; out: spin_unlock_irqrestore(&chip->lock, flags); return ret; } static int snd_cs4231_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); unsigned char new_cdfr; new_cdfr = snd_cs4231_get_format(chip, params_format(hw_params), params_channels(hw_params)) | snd_cs4231_get_rate(params_rate(hw_params)); snd_cs4231_capture_format(chip, hw_params, new_cdfr); return 0; } static int snd_cs4231_capture_prepare(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE | CS4231_RECORD_PIO); chip->c_periods_sent = 0; spin_unlock_irqrestore(&chip->lock, flags); return 0; } static void snd_cs4231_overrange(struct snd_cs4231 *chip) { unsigned long flags; unsigned char res; spin_lock_irqsave(&chip->lock, flags); res = snd_cs4231_in(chip, CS4231_TEST_INIT); spin_unlock_irqrestore(&chip->lock, flags); /* detect overrange only above 0dB; may be user selectable? */ if (res & (0x08 | 0x02)) chip->capture_substream->runtime->overrange++; } static void snd_cs4231_play_callback(struct snd_cs4231 *chip) { if (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) { snd_pcm_period_elapsed(chip->playback_substream); snd_cs4231_advance_dma(&chip->p_dma, chip->playback_substream, &chip->p_periods_sent); } } static void snd_cs4231_capture_callback(struct snd_cs4231 *chip) { if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) { snd_pcm_period_elapsed(chip->capture_substream); snd_cs4231_advance_dma(&chip->c_dma, chip->capture_substream, &chip->c_periods_sent); } } static snd_pcm_uframes_t snd_cs4231_playback_pointer( struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct cs4231_dma_control *dma_cont = &chip->p_dma; size_t ptr; if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE)) return 0; ptr = dma_cont->address(dma_cont); if (ptr != 0) ptr -= substream->runtime->dma_addr; return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_cs4231_capture_pointer( struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct cs4231_dma_control *dma_cont = &chip->c_dma; size_t ptr; if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE)) return 0; ptr = dma_cont->address(dma_cont); if (ptr != 0) ptr -= substream->runtime->dma_addr; return bytes_to_frames(substream->runtime, ptr); } static int snd_cs4231_probe(struct snd_cs4231 *chip) { unsigned long flags; int i; int id = 0; int vers = 0; unsigned char *ptr; for (i = 0; i < 50; i++) { mb(); if (__cs4231_readb(chip, CS4231U(chip, REGSEL)) & CS4231_INIT) msleep(2); else { spin_lock_irqsave(&chip->lock, flags); snd_cs4231_out(chip, CS4231_MISC_INFO, CS4231_MODE2); id = snd_cs4231_in(chip, CS4231_MISC_INFO) & 0x0f; vers = snd_cs4231_in(chip, CS4231_VERSION); spin_unlock_irqrestore(&chip->lock, flags); if (id == 0x0a) break; /* this is valid value */ } } dev_dbg(chip->card->dev, "cs4231: port = %p, id = 0x%x\n", chip->port, id); if (id != 0x0a) return -ENODEV; /* no valid device found */ spin_lock_irqsave(&chip->lock, flags); /* clear any pendings IRQ */ __cs4231_readb(chip, CS4231U(chip, STATUS)); __cs4231_writeb(chip, 0, CS4231U(chip, STATUS)); mb(); spin_unlock_irqrestore(&chip->lock, flags); chip->image[CS4231_MISC_INFO] = CS4231_MODE2; chip->image[CS4231_IFACE_CTRL] = chip->image[CS4231_IFACE_CTRL] & ~CS4231_SINGLE_DMA; chip->image[CS4231_ALT_FEATURE_1] = 0x80; chip->image[CS4231_ALT_FEATURE_2] = 0x01; if (vers & 0x20) chip->image[CS4231_ALT_FEATURE_2] |= 0x02; ptr = (unsigned char *) &chip->image; snd_cs4231_mce_down(chip); spin_lock_irqsave(&chip->lock, flags); for (i = 0; i < 32; i++) /* ok.. fill all CS4231 registers */ snd_cs4231_out(chip, i, *ptr++); spin_unlock_irqrestore(&chip->lock, flags); snd_cs4231_mce_up(chip); snd_cs4231_mce_down(chip); mdelay(2); return 0; /* all things are ok.. */ } static const struct snd_pcm_hardware snd_cs4231_playback = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5510, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 32 * 1024, .period_bytes_min = 64, .period_bytes_max = 32 * 1024, .periods_min = 1, .periods_max = 1024, }; static const struct snd_pcm_hardware snd_cs4231_capture = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5510, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 32 * 1024, .period_bytes_min = 64, .period_bytes_max = 32 * 1024, .periods_min = 1, .periods_max = 1024, }; static int snd_cs4231_playback_open(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; runtime->hw = snd_cs4231_playback; err = snd_cs4231_open(chip, CS4231_MODE_PLAY); if (err < 0) return err; chip->playback_substream = substream; chip->p_periods_sent = 0; snd_pcm_set_sync(substream); snd_cs4231_xrate(runtime); return 0; } static int snd_cs4231_capture_open(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; runtime->hw = snd_cs4231_capture; err = snd_cs4231_open(chip, CS4231_MODE_RECORD); if (err < 0) return err; chip->capture_substream = substream; chip->c_periods_sent = 0; snd_pcm_set_sync(substream); snd_cs4231_xrate(runtime); return 0; } static int snd_cs4231_playback_close(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); snd_cs4231_close(chip, CS4231_MODE_PLAY); chip->playback_substream = NULL; return 0; } static int snd_cs4231_capture_close(struct snd_pcm_substream *substream) { struct snd_cs4231 *chip = snd_pcm_substream_chip(substream); snd_cs4231_close(chip, CS4231_MODE_RECORD); chip->capture_substream = NULL; return 0; } /* XXX We can do some power-management, in particular on EBUS using * XXX the audio AUXIO register... */ static const struct snd_pcm_ops snd_cs4231_playback_ops = { .open = snd_cs4231_playback_open, .close = snd_cs4231_playback_close, .hw_params = snd_cs4231_playback_hw_params, .prepare = snd_cs4231_playback_prepare, .trigger = snd_cs4231_trigger, .pointer = snd_cs4231_playback_pointer, }; static const struct snd_pcm_ops snd_cs4231_capture_ops = { .open = snd_cs4231_capture_open, .close = snd_cs4231_capture_close, .hw_params = snd_cs4231_capture_hw_params, .prepare = snd_cs4231_capture_prepare, .trigger = snd_cs4231_trigger, .pointer = snd_cs4231_capture_pointer, }; static int snd_cs4231_pcm(struct snd_card *card) { struct snd_cs4231 *chip = card->private_data; struct snd_pcm *pcm; int err; err = snd_pcm_new(card, "CS4231", 0, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs4231_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cs4231_capture_ops); /* global setup */ pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; strcpy(pcm->name, "CS4231"); snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->op->dev, 64 * 1024, 128 * 1024); chip->pcm = pcm; return 0; } static int snd_cs4231_timer(struct snd_card *card) { struct snd_cs4231 *chip = card->private_data; struct snd_timer *timer; struct snd_timer_id tid; int err; /* Timer initialization */ tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = card->number; tid.device = 0; tid.subdevice = 0; err = snd_timer_new(card, "CS4231", &tid, &timer); if (err < 0) return err; strcpy(timer->name, "CS4231"); timer->private_data = chip; timer->hw = snd_cs4231_timer_table; chip->timer = timer; return 0; } /* * MIXER part */ static int snd_cs4231_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static const char * const texts[4] = { "Line", "CD", "Mic", "Mix" }; return snd_ctl_enum_info(uinfo, 2, 4, texts); } static int snd_cs4231_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); ucontrol->value.enumerated.item[0] = (chip->image[CS4231_LEFT_INPUT] & CS4231_MIXS_ALL) >> 6; ucontrol->value.enumerated.item[1] = (chip->image[CS4231_RIGHT_INPUT] & CS4231_MIXS_ALL) >> 6; spin_unlock_irqrestore(&chip->lock, flags); return 0; } static int snd_cs4231_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; unsigned short left, right; int change; if (ucontrol->value.enumerated.item[0] > 3 || ucontrol->value.enumerated.item[1] > 3) return -EINVAL; left = ucontrol->value.enumerated.item[0] << 6; right = ucontrol->value.enumerated.item[1] << 6; spin_lock_irqsave(&chip->lock, flags); left = (chip->image[CS4231_LEFT_INPUT] & ~CS4231_MIXS_ALL) | left; right = (chip->image[CS4231_RIGHT_INPUT] & ~CS4231_MIXS_ALL) | right; change = left != chip->image[CS4231_LEFT_INPUT] || right != chip->image[CS4231_RIGHT_INPUT]; snd_cs4231_out(chip, CS4231_LEFT_INPUT, left); snd_cs4231_out(chip, CS4231_RIGHT_INPUT, right); spin_unlock_irqrestore(&chip->lock, flags); return change; } static int snd_cs4231_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = (mask == 1) ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_cs4231_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irqsave(&chip->lock, flags); ucontrol->value.integer.value[0] = (chip->image[reg] >> shift) & mask; spin_unlock_irqrestore(&chip->lock, flags); if (invert) ucontrol->value.integer.value[0] = (mask - ucontrol->value.integer.value[0]); return 0; } static int snd_cs4231_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned short val; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val <<= shift; spin_lock_irqsave(&chip->lock, flags); val = (chip->image[reg] & ~(mask << shift)) | val; change = val != chip->image[reg]; snd_cs4231_out(chip, reg, val); spin_unlock_irqrestore(&chip->lock, flags); return change; } static int snd_cs4231_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_cs4231_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; spin_lock_irqsave(&chip->lock, flags); ucontrol->value.integer.value[0] = (chip->image[left_reg] >> shift_left) & mask; ucontrol->value.integer.value[1] = (chip->image[right_reg] >> shift_right) & mask; spin_unlock_irqrestore(&chip->lock, flags); if (invert) { ucontrol->value.integer.value[0] = (mask - ucontrol->value.integer.value[0]); ucontrol->value.integer.value[1] = (mask - ucontrol->value.integer.value[1]); } return 0; } static int snd_cs4231_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs4231 *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned short val1, val2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irqsave(&chip->lock, flags); val1 = (chip->image[left_reg] & ~(mask << shift_left)) | val1; val2 = (chip->image[right_reg] & ~(mask << shift_right)) | val2; change = val1 != chip->image[left_reg]; change |= val2 != chip->image[right_reg]; snd_cs4231_out(chip, left_reg, val1); snd_cs4231_out(chip, right_reg, val2); spin_unlock_irqrestore(&chip->lock, flags); return change; } #define CS4231_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), .index = (xindex), \ .info = snd_cs4231_info_single, \ .get = snd_cs4231_get_single, .put = snd_cs4231_put_single, \ .private_value = (reg) | ((shift) << 8) | ((mask) << 16) | ((invert) << 24) } #define CS4231_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, \ shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), .index = (xindex), \ .info = snd_cs4231_info_double, \ .get = snd_cs4231_get_double, .put = snd_cs4231_put_double, \ .private_value = (left_reg) | ((right_reg) << 8) | ((shift_left) << 16) | \ ((shift_right) << 19) | ((mask) << 24) | ((invert) << 22) } static const struct snd_kcontrol_new snd_cs4231_controls[] = { CS4231_DOUBLE("PCM Playback Switch", 0, CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 7, 7, 1, 1), CS4231_DOUBLE("PCM Playback Volume", 0, CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 63, 1), CS4231_DOUBLE("Line Playback Switch", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1), CS4231_DOUBLE("Line Playback Volume", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 31, 1), CS4231_DOUBLE("Aux Playback Switch", 0, CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 7, 7, 1, 1), CS4231_DOUBLE("Aux Playback Volume", 0, CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 0, 0, 31, 1), CS4231_DOUBLE("Aux Playback Switch", 1, CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 7, 7, 1, 1), CS4231_DOUBLE("Aux Playback Volume", 1, CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 0, 0, 31, 1), CS4231_SINGLE("Mono Playback Switch", 0, CS4231_MONO_CTRL, 7, 1, 1), CS4231_SINGLE("Mono Playback Volume", 0, CS4231_MONO_CTRL, 0, 15, 1), CS4231_SINGLE("Mono Output Playback Switch", 0, CS4231_MONO_CTRL, 6, 1, 1), CS4231_SINGLE("Mono Output Playback Bypass", 0, CS4231_MONO_CTRL, 5, 1, 0), CS4231_DOUBLE("Capture Volume", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 0, 0, 15, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_cs4231_info_mux, .get = snd_cs4231_get_mux, .put = snd_cs4231_put_mux, }, CS4231_DOUBLE("Mic Boost", 0, CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 5, 5, 1, 0), CS4231_SINGLE("Loopback Capture Switch", 0, CS4231_LOOPBACK, 0, 1, 0), CS4231_SINGLE("Loopback Capture Volume", 0, CS4231_LOOPBACK, 2, 63, 1), /* SPARC specific uses of XCTL{0,1} general purpose outputs. */ CS4231_SINGLE("Line Out Switch", 0, CS4231_PIN_CTRL, 6, 1, 1), CS4231_SINGLE("Headphone Out Switch", 0, CS4231_PIN_CTRL, 7, 1, 1) }; static int snd_cs4231_mixer(struct snd_card *card) { struct snd_cs4231 *chip = card->private_data; int err, idx; if (snd_BUG_ON(!chip || !chip->pcm)) return -EINVAL; strcpy(card->mixername, chip->pcm->name); for (idx = 0; idx < ARRAY_SIZE(snd_cs4231_controls); idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_cs4231_controls[idx], chip)); if (err < 0) return err; } return 0; } static int dev; static int cs4231_attach_begin(struct platform_device *op, struct snd_card **rcard) { struct snd_card *card; struct snd_cs4231 *chip; int err; *rcard = NULL; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_new(&op->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_cs4231), &card); if (err < 0) return err; strcpy(card->driver, "CS4231"); strcpy(card->shortname, "Sun CS4231"); chip = card->private_data; chip->card = card; *rcard = card; return 0; } static int cs4231_attach_finish(struct snd_card *card) { struct snd_cs4231 *chip = card->private_data; int err; err = snd_cs4231_pcm(card); if (err < 0) goto out_err; err = snd_cs4231_mixer(card); if (err < 0) goto out_err; err = snd_cs4231_timer(card); if (err < 0) goto out_err; err = snd_card_register(card); if (err < 0) goto out_err; dev_set_drvdata(&chip->op->dev, chip); dev++; return 0; out_err: snd_card_free(card); return err; } #ifdef SBUS_SUPPORT static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id) { unsigned long flags; unsigned char status; u32 csr; struct snd_cs4231 *chip = dev_id; /*This is IRQ is not raised by the cs4231*/ if (!(__cs4231_readb(chip, CS4231U(chip, STATUS)) & CS4231_GLOBALIRQ)) return IRQ_NONE; /* ACK the APC interrupt. */ csr = sbus_readl(chip->port + APCCSR); sbus_writel(csr, chip->port + APCCSR); if ((csr & APC_PDMA_READY) && (csr & APC_PLAY_INT) && (csr & APC_XINT_PNVA) && !(csr & APC_XINT_EMPT)) snd_cs4231_play_callback(chip); if ((csr & APC_CDMA_READY) && (csr & APC_CAPT_INT) && (csr & APC_XINT_CNVA) && !(csr & APC_XINT_EMPT)) snd_cs4231_capture_callback(chip); status = snd_cs4231_in(chip, CS4231_IRQ_STATUS); if (status & CS4231_TIMER_IRQ) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer->sticks); } if ((status & CS4231_RECORD_IRQ) && (csr & APC_CDMA_READY)) snd_cs4231_overrange(chip); /* ACK the CS4231 interrupt. */ spin_lock_irqsave(&chip->lock, flags); snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0); spin_unlock_irqrestore(&chip->lock, flags); return IRQ_HANDLED; } /* * SBUS DMA routines */ static int sbus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len) { unsigned long flags; u32 test, csr; int err; struct sbus_dma_info *base = &dma_cont->sbus_info; if (len >= (1 << 24)) return -EINVAL; spin_lock_irqsave(&base->lock, flags); csr = sbus_readl(base->regs + APCCSR); err = -EINVAL; test = APC_CDMA_READY; if (base->dir == APC_PLAY) test = APC_PDMA_READY; if (!(csr & test)) goto out; err = -EBUSY; test = APC_XINT_CNVA; if (base->dir == APC_PLAY) test = APC_XINT_PNVA; if (!(csr & test)) goto out; err = 0; sbus_writel(bus_addr, base->regs + base->dir + APCNVA); sbus_writel(len, base->regs + base->dir + APCNC); out: spin_unlock_irqrestore(&base->lock, flags); return err; } static void sbus_dma_prepare(struct cs4231_dma_control *dma_cont, int d) { unsigned long flags; u32 csr, test; struct sbus_dma_info *base = &dma_cont->sbus_info; spin_lock_irqsave(&base->lock, flags); csr = sbus_readl(base->regs + APCCSR); test = APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA | APC_XINT_PLAY | APC_XINT_PEMP | APC_XINT_GENL | APC_XINT_PENA; if (base->dir == APC_RECORD) test = APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA | APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL; csr |= test; sbus_writel(csr, base->regs + APCCSR); spin_unlock_irqrestore(&base->lock, flags); } static void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on) { unsigned long flags; u32 csr, shift; struct sbus_dma_info *base = &dma_cont->sbus_info; spin_lock_irqsave(&base->lock, flags); if (!on) { sbus_writel(0, base->regs + base->dir + APCNC); sbus_writel(0, base->regs + base->dir + APCNVA); if (base->dir == APC_PLAY) { sbus_writel(0, base->regs + base->dir + APCC); sbus_writel(0, base->regs + base->dir + APCVA); } udelay(1200); } csr = sbus_readl(base->regs + APCCSR); shift = 0; if (base->dir == APC_PLAY) shift = 1; if (on) csr &= ~(APC_CPAUSE << shift); else csr |= (APC_CPAUSE << shift); sbus_writel(csr, base->regs + APCCSR); if (on) csr |= (APC_CDMA_READY << shift); else csr &= ~(APC_CDMA_READY << shift); sbus_writel(csr, base->regs + APCCSR); spin_unlock_irqrestore(&base->lock, flags); } static unsigned int sbus_dma_addr(struct cs4231_dma_control *dma_cont) { struct sbus_dma_info *base = &dma_cont->sbus_info; return sbus_readl(base->regs + base->dir + APCVA); } /* * Init and exit routines */ static int snd_cs4231_sbus_free(struct snd_cs4231 *chip) { struct platform_device *op = chip->op; if (chip->irq[0]) free_irq(chip->irq[0], chip); if (chip->port) of_iounmap(&op->resource[0], chip->port, chip->regs_size); return 0; } static int snd_cs4231_sbus_dev_free(struct snd_device *device) { struct snd_cs4231 *cp = device->device_data; return snd_cs4231_sbus_free(cp); } static const struct snd_device_ops snd_cs4231_sbus_dev_ops = { .dev_free = snd_cs4231_sbus_dev_free, }; static int snd_cs4231_sbus_create(struct snd_card *card, struct platform_device *op, int dev) { struct snd_cs4231 *chip = card->private_data; int err; spin_lock_init(&chip->lock); spin_lock_init(&chip->c_dma.sbus_info.lock); spin_lock_init(&chip->p_dma.sbus_info.lock); mutex_init(&chip->mce_mutex); mutex_init(&chip->open_mutex); chip->op = op; chip->regs_size = resource_size(&op->resource[0]); memcpy(&chip->image, &snd_cs4231_original_image, sizeof(snd_cs4231_original_image)); chip->port = of_ioremap(&op->resource[0], 0, chip->regs_size, "cs4231"); if (!chip->port) { dev_dbg(chip->card->dev, "cs4231-%d: Unable to map chip registers.\n", dev); return -EIO; } chip->c_dma.sbus_info.regs = chip->port; chip->p_dma.sbus_info.regs = chip->port; chip->c_dma.sbus_info.dir = APC_RECORD; chip->p_dma.sbus_info.dir = APC_PLAY; chip->p_dma.prepare = sbus_dma_prepare; chip->p_dma.enable = sbus_dma_enable; chip->p_dma.request = sbus_dma_request; chip->p_dma.address = sbus_dma_addr; chip->c_dma.prepare = sbus_dma_prepare; chip->c_dma.enable = sbus_dma_enable; chip->c_dma.request = sbus_dma_request; chip->c_dma.address = sbus_dma_addr; if (request_irq(op->archdata.irqs[0], snd_cs4231_sbus_interrupt, IRQF_SHARED, "cs4231", chip)) { dev_dbg(chip->card->dev, "cs4231-%d: Unable to grab SBUS IRQ %d\n", dev, op->archdata.irqs[0]); snd_cs4231_sbus_free(chip); return -EBUSY; } chip->irq[0] = op->archdata.irqs[0]; if (snd_cs4231_probe(chip) < 0) { snd_cs4231_sbus_free(chip); return -ENODEV; } snd_cs4231_init(chip); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &snd_cs4231_sbus_dev_ops); if (err < 0) { snd_cs4231_sbus_free(chip); return err; } return 0; } static int cs4231_sbus_probe(struct platform_device *op) { struct resource *rp = &op->resource[0]; struct snd_card *card; int err; err = cs4231_attach_begin(op, &card); if (err) return err; sprintf(card->longname, "%s at 0x%02lx:0x%016Lx, irq %d", card->shortname, rp->flags & 0xffL, (unsigned long long)rp->start, op->archdata.irqs[0]); err = snd_cs4231_sbus_create(card, op, dev); if (err < 0) { snd_card_free(card); return err; } return cs4231_attach_finish(card); } #endif #ifdef EBUS_SUPPORT static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, void *cookie) { struct snd_cs4231 *chip = cookie; snd_cs4231_play_callback(chip); } static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p, int event, void *cookie) { struct snd_cs4231 *chip = cookie; snd_cs4231_capture_callback(chip); } /* * EBUS DMA wrappers */ static int _ebus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len) { return ebus_dma_request(&dma_cont->ebus_info, bus_addr, len); } static void _ebus_dma_enable(struct cs4231_dma_control *dma_cont, int on) { ebus_dma_enable(&dma_cont->ebus_info, on); } static void _ebus_dma_prepare(struct cs4231_dma_control *dma_cont, int dir) { ebus_dma_prepare(&dma_cont->ebus_info, dir); } static unsigned int _ebus_dma_addr(struct cs4231_dma_control *dma_cont) { return ebus_dma_addr(&dma_cont->ebus_info); } /* * Init and exit routines */ static int snd_cs4231_ebus_free(struct snd_cs4231 *chip) { struct platform_device *op = chip->op; if (chip->c_dma.ebus_info.regs) { ebus_dma_unregister(&chip->c_dma.ebus_info); of_iounmap(&op->resource[2], chip->c_dma.ebus_info.regs, 0x10); } if (chip->p_dma.ebus_info.regs) { ebus_dma_unregister(&chip->p_dma.ebus_info); of_iounmap(&op->resource[1], chip->p_dma.ebus_info.regs, 0x10); } if (chip->port) of_iounmap(&op->resource[0], chip->port, 0x10); return 0; } static int snd_cs4231_ebus_dev_free(struct snd_device *device) { struct snd_cs4231 *cp = device->device_data; return snd_cs4231_ebus_free(cp); } static const struct snd_device_ops snd_cs4231_ebus_dev_ops = { .dev_free = snd_cs4231_ebus_dev_free, }; static int snd_cs4231_ebus_create(struct snd_card *card, struct platform_device *op, int dev) { struct snd_cs4231 *chip = card->private_data; int err; spin_lock_init(&chip->lock); spin_lock_init(&chip->c_dma.ebus_info.lock); spin_lock_init(&chip->p_dma.ebus_info.lock); mutex_init(&chip->mce_mutex); mutex_init(&chip->open_mutex); chip->flags |= CS4231_FLAG_EBUS; chip->op = op; memcpy(&chip->image, &snd_cs4231_original_image, sizeof(snd_cs4231_original_image)); strcpy(chip->c_dma.ebus_info.name, "cs4231(capture)"); chip->c_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; chip->c_dma.ebus_info.callback = snd_cs4231_ebus_capture_callback; chip->c_dma.ebus_info.client_cookie = chip; chip->c_dma.ebus_info.irq = op->archdata.irqs[0]; strcpy(chip->p_dma.ebus_info.name, "cs4231(play)"); chip->p_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER; chip->p_dma.ebus_info.callback = snd_cs4231_ebus_play_callback; chip->p_dma.ebus_info.client_cookie = chip; chip->p_dma.ebus_info.irq = op->archdata.irqs[1]; chip->p_dma.prepare = _ebus_dma_prepare; chip->p_dma.enable = _ebus_dma_enable; chip->p_dma.request = _ebus_dma_request; chip->p_dma.address = _ebus_dma_addr; chip->c_dma.prepare = _ebus_dma_prepare; chip->c_dma.enable = _ebus_dma_enable; chip->c_dma.request = _ebus_dma_request; chip->c_dma.address = _ebus_dma_addr; chip->port = of_ioremap(&op->resource[0], 0, 0x10, "cs4231"); chip->p_dma.ebus_info.regs = of_ioremap(&op->resource[1], 0, 0x10, "cs4231_pdma"); chip->c_dma.ebus_info.regs = of_ioremap(&op->resource[2], 0, 0x10, "cs4231_cdma"); if (!chip->port || !chip->p_dma.ebus_info.regs || !chip->c_dma.ebus_info.regs) { snd_cs4231_ebus_free(chip); dev_dbg(chip->card->dev, "cs4231-%d: Unable to map chip registers.\n", dev); return -EIO; } if (ebus_dma_register(&chip->c_dma.ebus_info)) { snd_cs4231_ebus_free(chip); dev_dbg(chip->card->dev, "cs4231-%d: Unable to register EBUS capture DMA\n", dev); return -EBUSY; } if (ebus_dma_irq_enable(&chip->c_dma.ebus_info, 1)) { snd_cs4231_ebus_free(chip); dev_dbg(chip->card->dev, "cs4231-%d: Unable to enable EBUS capture IRQ\n", dev); return -EBUSY; } if (ebus_dma_register(&chip->p_dma.ebus_info)) { snd_cs4231_ebus_free(chip); dev_dbg(chip->card->dev, "cs4231-%d: Unable to register EBUS play DMA\n", dev); return -EBUSY; } if (ebus_dma_irq_enable(&chip->p_dma.ebus_info, 1)) { snd_cs4231_ebus_free(chip); dev_dbg(chip->card->dev, "cs4231-%d: Unable to enable EBUS play IRQ\n", dev); return -EBUSY; } if (snd_cs4231_probe(chip) < 0) { snd_cs4231_ebus_free(chip); return -ENODEV; } snd_cs4231_init(chip); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &snd_cs4231_ebus_dev_ops); if (err < 0) { snd_cs4231_ebus_free(chip); return err; } return 0; } static int cs4231_ebus_probe(struct platform_device *op) { struct snd_card *card; int err; err = cs4231_attach_begin(op, &card); if (err) return err; sprintf(card->longname, "%s at 0x%llx, irq %d", card->shortname, op->resource[0].start, op->archdata.irqs[0]); err = snd_cs4231_ebus_create(card, op, dev); if (err < 0) { snd_card_free(card); return err; } return cs4231_attach_finish(card); } #endif static int cs4231_probe(struct platform_device *op) { #ifdef EBUS_SUPPORT if (of_node_name_eq(op->dev.of_node->parent, "ebus")) return cs4231_ebus_probe(op); #endif #ifdef SBUS_SUPPORT if (of_node_name_eq(op->dev.of_node->parent, "sbus") || of_node_name_eq(op->dev.of_node->parent, "sbi")) return cs4231_sbus_probe(op); #endif return -ENODEV; } static void cs4231_remove(struct platform_device *op) { struct snd_cs4231 *chip = dev_get_drvdata(&op->dev); snd_card_free(chip->card); } static const struct of_device_id cs4231_match[] = { { .name = "SUNW,CS4231", }, { .name = "audio", .compatible = "SUNW,CS4231", }, {}, }; MODULE_DEVICE_TABLE(of, cs4231_match); static struct platform_driver cs4231_driver = { .driver = { .name = "audio", .of_match_table = cs4231_match, }, .probe = cs4231_probe, .remove = cs4231_remove, }; module_platform_driver(cs4231_driver);
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2017 NXP * Copyright 2016 Freescale Semiconductor, Inc. */ #include <linux/bitfield.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #include <linux/slab.h> #define COUNTER_CNTL 0x0 #define COUNTER_READ 0x20 #define COUNTER_DPCR1 0x30 #define COUNTER_MUX_CNTL 0x50 #define COUNTER_MASK_COMP 0x54 #define CNTL_OVER 0x1 #define CNTL_CLEAR 0x2 #define CNTL_EN 0x4 #define CNTL_EN_MASK 0xFFFFFFFB #define CNTL_CLEAR_MASK 0xFFFFFFFD #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CP_SHIFT 16 #define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT) #define CNTL_CSV_SHIFT 24 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) #define READ_PORT_SHIFT 0 #define READ_PORT_MASK (0x7 << READ_PORT_SHIFT) #define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */ #define WRITE_PORT_SHIFT 8 #define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT) #define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */ #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4 /* For removing bias if cycle counter CNTL.CP is set to 0xf0 */ #define CYCLES_COUNTER_MASK 0x0FFFFFFF #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) #define DDR_PERF_DEV_NAME "imx8_ddr" #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" static DEFINE_IDA(ddr_ida); /* DDR Perf hardware feature */ #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ #define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */ struct fsl_ddr_devtype_data { unsigned int quirks; /* quirks needed for different DDR Perf core */ const char *identifier; /* system PMU identifier for userspace */ }; static const struct fsl_ddr_devtype_data imx8_devtype_data; static const struct fsl_ddr_devtype_data imx8m_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER, }; static const struct fsl_ddr_devtype_data imx8mq_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER, .identifier = "i.MX8MQ", }; static const struct fsl_ddr_devtype_data imx8mm_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER, .identifier = "i.MX8MM", }; static const struct fsl_ddr_devtype_data imx8mn_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER, .identifier = "i.MX8MN", }; static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, .identifier = "i.MX8MP", }; static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = { .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER, .identifier = "i.MX8DXL", }; static const struct of_device_id imx_ddr_pmu_dt_ids[] = { { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data}, { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data}, { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data}, { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); struct ddr_pmu { struct pmu pmu; void __iomem *base; unsigned int cpu; struct hlist_node node; struct device *dev; struct perf_event *events[NUM_COUNTERS]; enum cpuhp_state cpuhp_state; const struct fsl_ddr_devtype_data *devtype_data; int irq; int id; int active_counter; }; static ssize_t ddr_perf_identifier_show(struct device *dev, struct device_attribute *attr, char *page) { struct ddr_pmu *pmu = dev_get_drvdata(dev); return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); } static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct ddr_pmu *pmu = dev_get_drvdata(dev); if (!pmu->devtype_data->identifier) return 0; return attr->mode; }; static struct device_attribute ddr_perf_identifier_attr = __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL); static struct attribute *ddr_perf_identifier_attrs[] = { &ddr_perf_identifier_attr.attr, NULL, }; static const struct attribute_group ddr_perf_identifier_attr_group = { .attrs = ddr_perf_identifier_attrs, .is_visible = ddr_perf_identifier_attr_visible, }; enum ddr_perf_filter_capabilities { PERF_CAP_AXI_ID_FILTER = 0, PERF_CAP_AXI_ID_FILTER_ENHANCED, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER, PERF_CAP_AXI_ID_FEAT_MAX, }; static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) { u32 quirks = pmu->devtype_data->quirks; switch (cap) { case PERF_CAP_AXI_ID_FILTER: return !!(quirks & DDR_CAP_AXI_ID_FILTER); case PERF_CAP_AXI_ID_FILTER_ENHANCED: quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER: return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER); default: WARN(1, "unknown filter cap %d\n", cap); } return 0; } static ssize_t ddr_perf_filter_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ddr_pmu *pmu = dev_get_drvdata(dev); struct dev_ext_attribute *ea = container_of(attr, struct dev_ext_attribute, attr); int cap = (long)ea->var; return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap)); } #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ (&((struct dev_ext_attribute) { \ __ATTR(_name, 0444, _func, NULL), (void *)_var \ }).attr.attr) #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) static struct attribute *ddr_perf_filter_cap_attr[] = { PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER), NULL, }; static const struct attribute_group ddr_perf_filter_cap_attr_group = { .name = "caps", .attrs = ddr_perf_filter_cap_attr, }; static ssize_t ddr_perf_cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ddr_pmu *pmu = dev_get_drvdata(dev); return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); } static struct device_attribute ddr_perf_cpumask_attr = __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); static struct attribute *ddr_perf_cpumask_attrs[] = { &ddr_perf_cpumask_attr.attr, NULL, }; static const struct attribute_group ddr_perf_cpumask_attr_group = { .attrs = ddr_perf_cpumask_attrs, }; static ssize_t ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, char *page) { struct perf_pmu_events_attr *pmu_attr; pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); } #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id) static struct attribute *ddr_perf_events_attrs[] = { IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), NULL, }; static const struct attribute_group ddr_perf_events_attr_group = { .name = "events", .attrs = ddr_perf_events_attrs, }; PMU_FORMAT_ATTR(event, "config:0-7"); PMU_FORMAT_ATTR(axi_id, "config1:0-15"); PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); PMU_FORMAT_ATTR(axi_port, "config2:0-2"); PMU_FORMAT_ATTR(axi_channel, "config2:3-3"); static struct attribute *ddr_perf_format_attrs[] = { &format_attr_event.attr, &format_attr_axi_id.attr, &format_attr_axi_mask.attr, &format_attr_axi_port.attr, &format_attr_axi_channel.attr, NULL, }; static const struct attribute_group ddr_perf_format_attr_group = { .name = "format", .attrs = ddr_perf_format_attrs, }; static const struct attribute_group *attr_groups[] = { &ddr_perf_events_attr_group, &ddr_perf_format_attr_group, &ddr_perf_cpumask_attr_group, &ddr_perf_filter_cap_attr_group, &ddr_perf_identifier_attr_group, NULL, }; static bool ddr_perf_is_filtered(struct perf_event *event) { return event->attr.config == 0x41 || event->attr.config == 0x42; } static u32 ddr_perf_filter_val(struct perf_event *event) { return event->attr.config1; } static bool ddr_perf_filters_compatible(struct perf_event *a, struct perf_event *b) { if (!ddr_perf_is_filtered(a)) return true; if (!ddr_perf_is_filtered(b)) return true; return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); } static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) { unsigned int filt; struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && ddr_perf_is_filtered(event); } static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) { int i; /* * Always map cycle event to counter 0 * Cycles counter is dedicated for cycle event * can't used for the other events */ if (event == EVENT_CYCLES_ID) { if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) return EVENT_CYCLES_COUNTER; else return -ENOENT; } for (i = 1; i < NUM_COUNTERS; i++) { if (pmu->events[i] == NULL) return i; } return -ENOENT; } static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) { pmu->events[counter] = NULL; } static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) { struct perf_event *event = pmu->events[counter]; void __iomem *base = pmu->base; /* * return bytes instead of bursts from ddr transaction for * axid-read and axid-write event if PMU core supports enhanced * filter. */ base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : COUNTER_READ; return readl_relaxed(base + counter * 4); } static int ddr_perf_event_init(struct perf_event *event) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct perf_event *sibling; if (event->attr.type != event->pmu->type) return -ENOENT; if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; if (event->cpu < 0) { dev_warn(pmu->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } /* * We must NOT create groups containing mixed PMUs, although software * events are acceptable (for example to create a CCN group * periodically read when a hrtimer aka cpu-clock leader triggers). */ if (event->group_leader->pmu != event->pmu && !is_software_event(event->group_leader)) return -EINVAL; if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { if (!ddr_perf_filters_compatible(event, event->group_leader)) return -EINVAL; for_each_sibling_event(sibling, event->group_leader) { if (!ddr_perf_filters_compatible(event, sibling)) return -EINVAL; } } for_each_sibling_event(sibling, event->group_leader) { if (sibling->pmu != event->pmu && !is_software_event(sibling)) return -EINVAL; } event->cpu = pmu->cpu; hwc->idx = -1; return 0; } static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, int counter, bool enable) { u8 reg = counter * 4 + COUNTER_CNTL; int val; if (enable) { /* * cycle counter is special which should firstly write 0 then * write 1 into CLEAR bit to clear it. Other counters only * need write 0 into CLEAR bit and it turns out to be 1 by * hardware. Below enable flow is harmless for all counters. */ writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; val |= FIELD_PREP(CNTL_CSV_MASK, config); /* * On i.MX8MP we need to bias the cycle counter to overflow more often. * We do this by initializing bits [23:16] of the counter value via the * COUNTER_CTRL Counter Parameter (CP) field. */ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { if (counter == EVENT_CYCLES_COUNTER) val |= FIELD_PREP(CNTL_CP_MASK, 0xf0); } writel(val, pmu->base + reg); } else { /* Disable counter */ val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; writel(val, pmu->base + reg); } } static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter) { int val; val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL); return val & CNTL_OVER; } static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter) { u8 reg = counter * 4 + COUNTER_CNTL; int val; val = readl_relaxed(pmu->base + reg); val &= ~CNTL_CLEAR; writel(val, pmu->base + reg); val |= CNTL_CLEAR; writel(val, pmu->base + reg); } static void ddr_perf_event_update(struct perf_event *event) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 new_raw_count; int counter = hwc->idx; int ret; new_raw_count = ddr_perf_read_counter(pmu, counter); /* Remove the bias applied in ddr_perf_counter_enable(). */ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { if (counter == EVENT_CYCLES_COUNTER) new_raw_count &= CYCLES_COUNTER_MASK; } local64_add(new_raw_count, &event->count); /* * For legacy SoCs: event counter continue counting when overflow, * no need to clear the counter. * For new SoCs: event counter stop counting when overflow, need * clear counter to let it count again. */ if (counter != EVENT_CYCLES_COUNTER) { ret = ddr_perf_counter_overflow(pmu, counter); if (ret) dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n", event->attr.config); } /* clear counter every time for both cycle counter and event counter */ ddr_perf_counter_clear(pmu, counter); } static void ddr_perf_event_start(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; local64_set(&hwc->prev_count, 0); ddr_perf_counter_enable(pmu, event->attr.config, counter, true); if (!pmu->active_counter++) ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, true); hwc->state = 0; } static int ddr_perf_event_add(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter; int cfg = event->attr.config; int cfg1 = event->attr.config1; int cfg2 = event->attr.config2; if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { int i; for (i = 1; i < NUM_COUNTERS; i++) { if (pmu->events[i] && !ddr_perf_filters_compatible(event, pmu->events[i])) return -EINVAL; } if (ddr_perf_is_filtered(event)) { /* revert axi id masking(axi_mask) value */ cfg1 ^= AXI_MASKING_REVERT; writel(cfg1, pmu->base + COUNTER_DPCR1); } } counter = ddr_perf_alloc_counter(pmu, cfg); if (counter < 0) { dev_dbg(pmu->dev, "There are not enough counters\n"); return -EOPNOTSUPP; } if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) { if (ddr_perf_is_filtered(event)) { /* revert axi id masking(axi_mask) value */ cfg1 ^= AXI_MASKING_REVERT; writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4)); if (cfg == 0x41) { /* revert axi read channel(axi_channel) value */ cfg2 ^= READ_CHANNEL_REVERT; cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2); } else { /* revert axi write channel(axi_channel) value */ cfg2 ^= WRITE_CHANNEL_REVERT; cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2); } writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4)); } } pmu->events[counter] = event; hwc->idx = counter; hwc->state |= PERF_HES_STOPPED; if (flags & PERF_EF_START) ddr_perf_event_start(event, flags); return 0; } static void ddr_perf_event_stop(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; ddr_perf_counter_enable(pmu, event->attr.config, counter, false); ddr_perf_event_update(event); if (!--pmu->active_counter) ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, false); hwc->state |= PERF_HES_STOPPED; } static void ddr_perf_event_del(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; ddr_perf_event_stop(event, PERF_EF_UPDATE); ddr_perf_free_counter(pmu, counter); hwc->idx = -1; } static void ddr_perf_pmu_enable(struct pmu *pmu) { } static void ddr_perf_pmu_disable(struct pmu *pmu) { } static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, struct device *dev) { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { .module = THIS_MODULE, .parent = dev, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, .event_init = ddr_perf_event_init, .add = ddr_perf_event_add, .del = ddr_perf_event_del, .start = ddr_perf_event_start, .stop = ddr_perf_event_stop, .read = ddr_perf_event_update, .pmu_enable = ddr_perf_pmu_enable, .pmu_disable = ddr_perf_pmu_disable, }, .base = base, .dev = dev, }; pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL); return pmu->id; } static irqreturn_t ddr_perf_irq_handler(int irq, void *p) { int i; struct ddr_pmu *pmu = (struct ddr_pmu *) p; struct perf_event *event; /* all counter will stop if cycle counter disabled */ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, false); /* * When the cycle counter overflows, all counters are stopped, * and an IRQ is raised. If any other counter overflows, it * continues counting, and no IRQ is raised. But for new SoCs, * such as i.MX8MP, event counter would stop when overflow, so * we need use cycle counter to stop overflow of event counter. * * Cycles occur at least 4 times as often as other events, so we * can update all events on a cycle counter overflow and not * lose events. * */ for (i = 0; i < NUM_COUNTERS; i++) { if (!pmu->events[i]) continue; event = pmu->events[i]; ddr_perf_event_update(event); } ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, true); return IRQ_HANDLED; } static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) { struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); int target; if (cpu != pmu->cpu) return 0; target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) return 0; perf_pmu_migrate_context(&pmu->pmu, cpu, target); pmu->cpu = target; WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu))); return 0; } static int ddr_perf_probe(struct platform_device *pdev) { struct ddr_pmu *pmu; struct device_node *np; void __iomem *base; char *name; int num; int ret; int irq; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); np = pdev->dev.of_node; pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); if (!pmu) return -ENOMEM; num = ddr_perf_init(pmu, base, &pdev->dev); platform_set_drvdata(pdev, pmu); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num); if (!name) { ret = -ENOMEM; goto cpuhp_state_err; } pmu->devtype_data = of_device_get_match_data(&pdev->dev); pmu->cpu = raw_smp_processor_id(); ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME, NULL, ddr_perf_offline_cpu); if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); if (ret) { dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); goto cpuhp_instance_err; } /* Request irq */ irq = of_irq_get(np, 0); if (irq < 0) { dev_err(&pdev->dev, "Failed to get irq: %d", irq); ret = irq; goto ddr_perf_err; } ret = devm_request_irq(&pdev->dev, irq, ddr_perf_irq_handler, IRQF_NOBALANCING | IRQF_NO_THREAD, DDR_CPUHP_CB_NAME, pmu); if (ret < 0) { dev_err(&pdev->dev, "Request irq failed: %d", ret); goto ddr_perf_err; } pmu->irq = irq; ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)); if (ret) { dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); goto ddr_perf_err; } ret = perf_pmu_register(&pmu->pmu, name, -1); if (ret) goto ddr_perf_err; return 0; ddr_perf_err: cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); cpuhp_instance_err: cpuhp_remove_multi_state(pmu->cpuhp_state); cpuhp_state_err: ida_free(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; } static void ddr_perf_remove(struct platform_device *pdev) { struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); cpuhp_remove_multi_state(pmu->cpuhp_state); perf_pmu_unregister(&pmu->pmu); ida_free(&ddr_ida, pmu->id); } static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, }; module_platform_driver(imx_ddr_pmu_driver); MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver"); MODULE_LICENSE("GPL v2");
/* * Cryptographic API. * * Glue code for the SHA512 Secure Hash Algorithm assembler * implementation using supplemental SSE3 / AVX / AVX2 instructions. * * This file is based on sha512_generic.c * * Copyright (C) 2013 Intel Corporation * Author: Tim Chen <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> #include <asm/cpu_device_id.h> #include <asm/simd.h> asmlinkage void sha512_transform_ssse3(struct sha512_state *state, const u8 *data, int blocks); static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha512_block_fn *sha512_xform) { struct sha512_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable() || (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) return crypto_sha512_update(desc, data, len); /* * Make sure struct sha512_state begins directly with the SHA512 * 512-bit internal state, as this is what the asm functions expect. */ BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); kernel_fpu_begin(); sha512_base_do_update(desc, data, len, sha512_xform); kernel_fpu_end(); return 0; } static int sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out, sha512_block_fn *sha512_xform) { if (!crypto_simd_usable()) return crypto_sha512_finup(desc, data, len, out); kernel_fpu_begin(); if (len) sha512_base_do_update(desc, data, len, sha512_xform); sha512_base_do_finalize(desc, sha512_xform); kernel_fpu_end(); return sha512_base_finish(desc, out); } static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return sha512_update(desc, data, len, sha512_transform_ssse3); } static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return sha512_finup(desc, data, len, out, sha512_transform_ssse3); } /* Add padding and return the message digest. */ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) { return sha512_ssse3_finup(desc, NULL, 0, out); } static struct shash_alg sha512_ssse3_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_ssse3_update, .final = sha512_ssse3_final, .finup = sha512_ssse3_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-ssse3", .cra_priority = 150, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_ssse3_update, .final = sha512_ssse3_final, .finup = sha512_ssse3_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-ssse3", .cra_priority = 150, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int register_sha512_ssse3(void) { if (boot_cpu_has(X86_FEATURE_SSSE3)) return crypto_register_shashes(sha512_ssse3_algs, ARRAY_SIZE(sha512_ssse3_algs)); return 0; } static void unregister_sha512_ssse3(void) { if (boot_cpu_has(X86_FEATURE_SSSE3)) crypto_unregister_shashes(sha512_ssse3_algs, ARRAY_SIZE(sha512_ssse3_algs)); } asmlinkage void sha512_transform_avx(struct sha512_state *state, const u8 *data, int blocks); static bool avx_usable(void) { if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { if (boot_cpu_has(X86_FEATURE_AVX)) pr_info("AVX detected but unusable.\n"); return false; } return true; } static int sha512_avx_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return sha512_update(desc, data, len, sha512_transform_avx); } static int sha512_avx_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return sha512_finup(desc, data, len, out, sha512_transform_avx); } /* Add padding and return the message digest. */ static int sha512_avx_final(struct shash_desc *desc, u8 *out) { return sha512_avx_finup(desc, NULL, 0, out); } static struct shash_alg sha512_avx_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_avx_update, .final = sha512_avx_final, .finup = sha512_avx_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-avx", .cra_priority = 160, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_avx_update, .final = sha512_avx_final, .finup = sha512_avx_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-avx", .cra_priority = 160, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int register_sha512_avx(void) { if (avx_usable()) return crypto_register_shashes(sha512_avx_algs, ARRAY_SIZE(sha512_avx_algs)); return 0; } static void unregister_sha512_avx(void) { if (avx_usable()) crypto_unregister_shashes(sha512_avx_algs, ARRAY_SIZE(sha512_avx_algs)); } asmlinkage void sha512_transform_rorx(struct sha512_state *state, const u8 *data, int blocks); static int sha512_avx2_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return sha512_update(desc, data, len, sha512_transform_rorx); } static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return sha512_finup(desc, data, len, out, sha512_transform_rorx); } /* Add padding and return the message digest. */ static int sha512_avx2_final(struct shash_desc *desc, u8 *out) { return sha512_avx2_finup(desc, NULL, 0, out); } static struct shash_alg sha512_avx2_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_avx2_update, .final = sha512_avx2_final, .finup = sha512_avx2_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-avx2", .cra_priority = 170, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_avx2_update, .final = sha512_avx2_final, .finup = sha512_avx2_finup, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-avx2", .cra_priority = 170, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static bool avx2_usable(void) { if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) return true; return false; } static int register_sha512_avx2(void) { if (avx2_usable()) return crypto_register_shashes(sha512_avx2_algs, ARRAY_SIZE(sha512_avx2_algs)); return 0; } static const struct x86_cpu_id module_cpu_ids[] = { X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); static void unregister_sha512_avx2(void) { if (avx2_usable()) crypto_unregister_shashes(sha512_avx2_algs, ARRAY_SIZE(sha512_avx2_algs)); } static int __init sha512_ssse3_mod_init(void) { if (!x86_match_cpu(module_cpu_ids)) return -ENODEV; if (register_sha512_ssse3()) goto fail; if (register_sha512_avx()) { unregister_sha512_ssse3(); goto fail; } if (register_sha512_avx2()) { unregister_sha512_avx(); unregister_sha512_ssse3(); goto fail; } return 0; fail: return -ENODEV; } static void __exit sha512_ssse3_mod_fini(void) { unregister_sha512_avx2(); unregister_sha512_avx(); unregister_sha512_ssse3(); } module_init(sha512_ssse3_mod_init); module_exit(sha512_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS_CRYPTO("sha512"); MODULE_ALIAS_CRYPTO("sha512-ssse3"); MODULE_ALIAS_CRYPTO("sha512-avx"); MODULE_ALIAS_CRYPTO("sha512-avx2"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha384-ssse3"); MODULE_ALIAS_CRYPTO("sha384-avx"); MODULE_ALIAS_CRYPTO("sha384-avx2");
/* * QorIQ I2C device tree stub [ controller @ offset 0x118000 ] * * Copyright 2011 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ i2c@118000 { #address-cells = <1>; #size-cells = <0>; cell-index = <0>; compatible = "fsl-i2c"; reg = <0x118000 0x100>; interrupts = <38 2 0 0>; dfsrr; }; i2c@118100 { #address-cells = <1>; #size-cells = <0>; cell-index = <1>; compatible = "fsl-i2c"; reg = <0x118100 0x100>; interrupts = <38 2 0 0>; dfsrr; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. */ #ifndef __SOC_QCOM_TCS_H__ #define __SOC_QCOM_TCS_H__ #define MAX_RPMH_PAYLOAD 16 /** * rpmh_state: state for the request * * RPMH_SLEEP_STATE: State of the resource when the processor subsystem * is powered down. There is no client using the * resource actively. * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously * requested before the processor was powered down. * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state * is aggregated immediately. */ enum rpmh_state { RPMH_SLEEP_STATE, RPMH_WAKE_ONLY_STATE, RPMH_ACTIVE_ONLY_STATE, }; /** * struct tcs_cmd: an individual request to RPMH. * * @addr: the address of the resource slv_id:18:16 | offset:0:15 * @data: the resource state request * @wait: ensure that this command is complete before returning. * Setting "wait" here only makes sense during rpmh_write_batch() for * active-only transfers, this is because: * rpmh_write() - Always waits. * (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl) * rpmh_write_async() - Never waits. * (There's no request completion callback) */ struct tcs_cmd { u32 addr; u32 data; u32 wait; }; /** * struct tcs_request: A set of tcs_cmds sent together in a TCS * * @state: state for the request. * @wait_for_compl: wait until we get a response from the h/w accelerator * (same as setting cmd->wait for all commands in the request) * @num_cmds: the number of @cmds in this request * @cmds: an array of tcs_cmds */ struct tcs_request { enum rpmh_state state; u32 wait_for_compl; u32 num_cmds; struct tcs_cmd *cmds; }; #define BCM_TCS_CMD_COMMIT_SHFT 30 #define BCM_TCS_CMD_COMMIT_MASK 0x40000000 #define BCM_TCS_CMD_VALID_SHFT 29 #define BCM_TCS_CMD_VALID_MASK 0x20000000 #define BCM_TCS_CMD_VOTE_X_SHFT 14 #define BCM_TCS_CMD_VOTE_MASK 0x3fff #define BCM_TCS_CMD_VOTE_Y_SHFT 0 #define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000 /* Construct a Bus Clock Manager (BCM) specific TCS command */ #define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \ ((valid) << BCM_TCS_CMD_VALID_SHFT) | \ ((cpu_to_le32(vote_x) & \ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \ ((cpu_to_le32(vote_y) & \ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) #endif /* __SOC_QCOM_TCS_H__ */
/* bnx2x_sriov.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation * Copyright 2014 QLogic Corporation * All rights reserved * * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other QLogic software provided under a * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <[email protected]> * Written by: Shmulik Ravid * Ariel Elior <[email protected]> */ #ifndef BNX2X_SRIOV_H #define BNX2X_SRIOV_H #include "bnx2x_vfpf.h" #include "bnx2x.h" enum sample_bulletin_result { PFVF_BULLETIN_UNCHANGED, PFVF_BULLETIN_UPDATED, PFVF_BULLETIN_CRC_ERR }; #ifdef CONFIG_BNX2X_SRIOV extern struct workqueue_struct *bnx2x_iov_wq; /* The bnx2x device structure holds vfdb structure described below. * The VF array is indexed by the relative vfid. */ #define BNX2X_VF_MAX_QUEUES 16 #define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 struct bnx2x_sriov { u32 first_vf_in_pf; /* standard SRIOV capability fields, mostly for debugging */ int pos; /* capability position */ int nres; /* number of resources */ u32 cap; /* SR-IOV Capabilities */ u16 ctrl; /* SR-IOV Control */ u16 total; /* total VFs associated with the PF */ u16 initial; /* initial VFs associated with the PF */ u16 nr_virtfn; /* number of VFs available */ u16 offset; /* first VF Routing ID offset */ u16 stride; /* following VF stride */ u32 pgsz; /* page size for BAR alignment */ u8 link; /* Function Dependency Link */ }; /* bars */ struct bnx2x_vf_bar { u64 bar; u32 size; }; struct bnx2x_vf_bar_info { struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; u8 nr_bars; }; /* vf queue (used both for rx or tx) */ struct bnx2x_vf_queue { struct eth_context *cxt; /* MACs object */ struct bnx2x_vlan_mac_obj mac_obj; /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; /* VLAN-MACs object */ struct bnx2x_vlan_mac_obj vlan_mac_obj; unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ struct bnx2x_queue_sp_obj sp_obj; u32 cid; u16 index; u16 sb_idx; bool is_leading; bool sp_initialized; }; /* struct bnx2x_vf_queue_construct_params - prepare queue construction * parameters: q-init, q-setup and SB index */ struct bnx2x_vf_queue_construct_params { struct bnx2x_queue_state_params qstate; struct bnx2x_queue_setup_params prep_qsetup; }; /* forward */ struct bnx2x_virtf; /* VFOP definitions */ struct bnx2x_vf_mac_vlan_filter { int type; #define BNX2X_VF_FILTER_MAC BIT(0) #define BNX2X_VF_FILTER_VLAN BIT(1) #define BNX2X_VF_FILTER_VLAN_MAC \ (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ bool add; bool applied; u8 *mac; u16 vid; }; struct bnx2x_vf_mac_vlan_filters { int count; struct bnx2x_vf_mac_vlan_filter filters[]; }; /* vf context */ struct bnx2x_virtf { u16 cfg_flags; #define VF_CFG_STATS_COALESCE 0x1 #define VF_CFG_EXT_BULLETIN 0x2 #define VF_CFG_VLAN_FILTER 0x4 u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO * IFLA_VF_LINK_STATE_ENABLE * IFLA_VF_LINK_STATE_DISABLE */ u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ #define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ /* 1(true) if spoof check is enabled */ u8 spoofchk; /* dma */ dma_addr_t fw_stat_map; u16 stats_stride; dma_addr_t bulletin_map; /* Allocated resources counters. Before the VF is acquired, the * counters hold the following values: * * - xxq_count = 0 as the queues memory is not allocated yet. * * - sb_count = The number of status blocks configured for this VF in * the IGU CAM. Initially read during probe. * * - xx_rules_count = The number of rules statically and equally * allocated for each VF, during PF load. */ struct vf_pf_resc_request alloc_resc; #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) u8 sb_count; /* actual number of SBs */ u8 igu_base_id; /* base igu status block id */ struct bnx2x_vf_queue *vfqs; #define LEADING_IDX 0 #define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) u8 index; /* index in the vf array */ u8 abs_vfid; u8 sp_cl_id; u32 error; /* 0 means all's-well */ /* BDF */ unsigned int domain; unsigned int bus; unsigned int devfn; /* bars */ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; /* set-mac ramrod state 1-pending, 0-done */ unsigned long filter_state; /* leading rss client id ~~ the client id of the first rxq, must be * set for each txq. */ int leading_rss; /* MCAST object */ struct bnx2x_mcast_obj mcast_obj; /* RSS configuration object */ struct bnx2x_rss_config_obj rss_conf_obj; /* slow-path operations */ struct mutex op_mutex; /* one vfop at a time mutex */ enum channel_tlvs op_current; u8 fp_hsi; struct bnx2x_credit_pool_obj vf_vlans_pool; struct bnx2x_credit_pool_obj vf_macs_pool; }; #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) #define for_each_vf(bp, var) \ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) #define for_each_vfq(vf, var) \ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) #define for_each_vf_sb(vf, var) \ for ((var) = 0; (var) < vf_sb_count(vf); (var)++) #define is_vf_multi(vf) (vf_rxq_count(vf) > 1) #define HW_VF_HANDLE(bp, abs_vfid) \ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) #define FW_PF_MAX_HANDLE 8 #define FW_VF_HANDLE(abs_vfid) \ (abs_vfid + FW_PF_MAX_HANDLE) #define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */ #define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \ : 0) #define VF_MAC_CREDIT_CNT 1 #define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */ /* locking and unlocking the channel mutex */ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv); void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs expected_tlv); /* VF mail box (aka vf-pf channel) */ /* a container for the bi-directional vf<-->pf messages. * The actual response will be placed according to the offset parameter * provided in the request */ #define MBX_MSG_ALIGN 8 #define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ MBX_MSG_ALIGN)) struct bnx2x_vf_mbx_msg { union vfpf_tlvs req; union pfvf_tlvs resp; }; struct bnx2x_vf_mbx { struct bnx2x_vf_mbx_msg *msg; dma_addr_t msg_mapping; /* VF GPA address */ u32 vf_addr_lo; u32 vf_addr_hi; struct vfpf_first_tlv first_tlv; /* saved VF request header */ }; struct bnx2x_vf_sp { union { struct eth_classify_rules_ramrod_data e2; } mac_rdata; union { struct eth_classify_rules_ramrod_data e2; } vlan_rdata; union { struct eth_classify_rules_ramrod_data e2; } vlan_mac_rdata; union { struct eth_filter_rules_ramrod_data e2; } rx_mode_rdata; union { struct eth_multicast_rules_ramrod_data e2; } mcast_rdata; union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; } q_data; union { struct eth_rss_update_ramrod_data e2; } rss_rdata; }; struct hw_dma { void *addr; dma_addr_t mapping; size_t size; }; struct bnx2x_vfdb { #define BP_VFDB(bp) ((bp)->vfdb) /* vf array */ struct bnx2x_virtf *vfs; #define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ &((bp)->vfdb->vfs[idx]) : NULL) #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) /* queue array - for all vfs */ struct bnx2x_vf_queue *vfqs; /* vf HW contexts */ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i]) /* SR-IOV information */ struct bnx2x_sriov sriov; struct hw_dma mbx_dma; #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid])) struct hw_dma bulletin_dma; #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) #define BP_VF_BULLETIN(bp, vf) \ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ + (vf)) struct hw_dma sp_dma; #define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ (vf)->index * sizeof(struct bnx2x_vf_sp) + \ offsetof(struct bnx2x_vf_sp, field)) #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ (vf)->index * sizeof(struct bnx2x_vf_sp) + \ offsetof(struct bnx2x_vf_sp, field)) #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) u32 flrd_vfs[FLRD_VFS_DWORDS]; /* the number of msix vectors belonging to this PF designated for VFs */ u16 vf_sbs_pool; u16 first_vf_igu_entry; /* sp_rtnl synchronization */ struct mutex event_mutex; u64 event_occur; /* bulletin board update synchronization */ struct mutex bulletin_mutex; }; /* queue access */ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) { return &(vf->vfqs[index]); } /* FW ids */ static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) { return vf->igu_base_id + sb_idx; } static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) { return vf_igu_sb(vf, sb_idx); } static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { return vf->igu_base_id + q->index; } static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { if (vf->cfg_flags & VF_CFG_STATS_COALESCE) return vf->leading_rss; else return vfq_cl_id(vf, q); } static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { return vfq_cl_id(vf, q); } /* global iov routines */ int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); void bnx2x_iov_remove_one(struct bnx2x *bp); void bnx2x_iov_free_mem(struct bnx2x *bp); int bnx2x_iov_alloc_mem(struct bnx2x *bp); int bnx2x_iov_nic_init(struct bnx2x *bp); int bnx2x_iov_chip_cleanup(struct bnx2x *bp); void bnx2x_iov_init_dq(struct bnx2x *bp); void bnx2x_iov_init_dmae(struct bnx2x *bp); void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj); int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); void bnx2x_iov_storm_stats_update(struct bnx2x *bp); /* global vf mailbox routines */ void bnx2x_vf_mbx(struct bnx2x *bp); void bnx2x_vf_mbx_schedule(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); /* CORE VF API */ typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; /* acquire */ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, struct vf_pf_resc_request *resc); /* init */ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map); /* VFOP queue construction helpers */ void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, struct bnx2x_queue_setup_params *setup_params, u16 q_idx, u16 sb_idx); void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_params, struct bnx2x_queue_setup_params *setup_params, u16 q_idx, u16 sb_idx); void bnx2x_vfop_qctor_prep(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q, struct bnx2x_vf_queue_construct_params *p, unsigned long q_type); int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mac_vlan_filters *filters, int qid, bool drv_only); int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, struct bnx2x_vf_queue_construct_params *qctor); int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, unsigned long accept_flags); int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_config_rss_params *rss); int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, struct vfpf_tpa_tlv *tlv, struct bnx2x_queue_update_tpa_params *params); /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. */ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); /* FLR routines */ /* VF FLR helpers */ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); /* Handles an FLR (or VF_DISABLE) notification form the MCP */ void bnx2x_vf_handle_flr_event(struct bnx2x *bp); bool bnx2x_tlv_supported(u16 tlvtype); u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin); int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, bool support_long); enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); /* VF side vfpf channel functions */ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading); int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set); int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) { strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); } static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, struct bnx2x_fastpath *fp) { return PXP_VF_ADDR_USDM_QUEUES_START + bp->acquire_resp.resc.hw_qid[fp->index] * sizeof(struct ustorm_queue_zone_data); } enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); void bnx2x_timer_sriov(struct bnx2x *bp); void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); void bnx2x_vf_pci_dealloc(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp); static inline int bnx2x_vf_headroom(struct bnx2x *bp) { return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); void bnx2x_iov_task(struct work_struct *work); void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); void bnx2x_iov_link_update(struct bnx2x *bp); int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add); #else /* CONFIG_BNX2X_SRIOV */ #define GET_NUM_VFS_PER_PATH(bp) 0 #define GET_NUM_VFS_PER_PF(bp) 0 #define VF_MAC_CREDIT_CNT 0 #define VF_VLAN_CREDIT_CNT 0 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) {return 1; } static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) {} static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param) {return 0; } static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) {return 0; } static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set) {return 0; } static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params) {return 0; } static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) {} static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, struct bnx2x_fastpath *fp) {return 0; } static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) { return PFVF_BULLETIN_UNCHANGED; } static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { return NULL; } static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {} static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} static inline void bnx2x_iov_task(struct work_struct *work) {} static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} static inline void bnx2x_iov_link_update(struct bnx2x *bp) {} static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; } static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state) {return 0; } struct pf_vf_bulletin_content; static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, bool support_long) {} static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* Definitions for power management and wakeup registers */ /* Wake Up Control */ #define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ /* Wake Up Filter Control */ #define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ #define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ /* Wake Up Status */ #define E1000_WUS_EX 0x00000004 /* Directed Exact */ #define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ #define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ #define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ #define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ /* Packet types that are enabled for wake packet delivery */ #define WAKE_PKT_WUS ( \ E1000_WUS_EX | \ E1000_WUS_ARPD | \ E1000_WUS_IPV4 | \ E1000_WUS_IPV6 | \ E1000_WUS_NSD) /* Wake Up Packet Length */ #define E1000_WUPL_MASK 0x00000FFF /* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ #define E1000_WUPM_BYTES 128 /* Extended Device Control */ #define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ #define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ /* Physical Func Reset Done Indication */ #define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_IRCA 0x00000001 /* Interrupt delay cancellation */ /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Interrupt acknowledge Auto-mask */ /* Clear Interrupt timers after IMS clear */ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_OPCODE_READ 0x08000000 #define E1000_I2CCMD_OPCODE_WRITE 0x00000000 #define E1000_I2CCMD_READY 0x20000000 #define E1000_I2CCMD_ERROR 0x80000000 #define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) #define E1000_MAX_SGMII_PHY_REG_ADDR 255 #define E1000_I2CCMD_PHY_TIMEOUT 200 #define E1000_IVAR_VALID 0x80 #define E1000_GPIE_NSICR 0x00000001 #define E1000_GPIE_MSIX_MODE 0x00000010 #define E1000_GPIE_EIAME 0x40000000 #define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ #define E1000_RXDEXT_STATERR_LB 0x00040000 #define E1000_RXDEXT_STATERR_CE 0x01000000 #define E1000_RXDEXT_STATERR_SE 0x02000000 #define E1000_RXDEXT_STATERR_SEQ 0x04000000 #define E1000_RXDEXT_STATERR_CXE 0x10000000 #define E1000_RXDEXT_STATERR_TCPE 0x20000000 #define E1000_RXDEXT_STATERR_IPE 0x40000000 #define E1000_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ E1000_RXDEXT_STATERR_CE | \ E1000_RXDEXT_STATERR_SE | \ E1000_RXDEXT_STATERR_SEQ | \ E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_RXE) #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 #define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 /* Management Control */ #define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ #define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ #define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ /* Enable Neighbor Discovery Filtering */ #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ /* Enable MAC address filtering */ #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Receive Control */ #define E1000_RCTL_EN 0x00000002 /* enable */ #define E1000_RCTL_SBP 0x00000004 /* store bad packet */ #define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ #define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ #define E1000_RCTL_LPE 0x00000020 /* long packet enable */ #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ #define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ #define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ /* Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & * E1000_PSRCTL_BSIZE0_MASK) | * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & * E1000_PSRCTL_BSIZE1_MASK) | * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & * E1000_PSRCTL_BSIZE2_MASK) | * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; * E1000_PSRCTL_BSIZE3_MASK)) * where value0 = [128..16256], default=256 * value1 = [1024..64512], default=4096 * value2 = [0..64512], default=4096 * value3 = [0..64512], default=0 */ #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 #define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 #define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ #define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* SWFW_SYNC Definitions */ #define E1000_SWFW_EEP_SM 0x1 #define E1000_SWFW_PHY0_SM 0x2 #define E1000_SWFW_PHY1_SM 0x4 #define E1000_SWFW_PHY2_SM 0x20 #define E1000_SWFW_PHY3_SM 0x40 /* FACTPS Definitions */ /* Device Control */ #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ #define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ #define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ #define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ #define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ #define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ #define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ /* Defined polarity of Dock/Undock indication in SDP[0] */ /* Reset both PHY ports, through PHYRST_N pin */ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ /* Initiate an interrupt to manageability engine */ #define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ /* Bit definitions for the Management Data IO (MDIO) and Management Data * Clock (MDC) pins in the Device Control Register. */ #define E1000_CONNSW_ENRGSRC 0x4 #define E1000_CONNSW_PHYSD 0x400 #define E1000_CONNSW_PHY_PDN 0x800 #define E1000_CONNSW_SERDESD 0x200 #define E1000_CONNSW_AUTOSENSE_CONF 0x2 #define E1000_CONNSW_AUTOSENSE_EN 0x1 #define E1000_PCS_CFG_PCS_EN 8 #define E1000_PCS_LCTL_FLV_LINK_UP 1 #define E1000_PCS_LCTL_FSV_100 2 #define E1000_PCS_LCTL_FSV_1000 4 #define E1000_PCS_LCTL_FDV_FULL 8 #define E1000_PCS_LCTL_FSD 0x10 #define E1000_PCS_LCTL_FORCE_LINK 0x20 #define E1000_PCS_LCTL_FORCE_FCTRL 0x80 #define E1000_PCS_LCTL_AN_ENABLE 0x10000 #define E1000_PCS_LCTL_AN_RESTART 0x20000 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 #define E1000_ENABLE_SERDES_LOOPBACK 0x0410 #define E1000_PCS_LSTS_LINK_OK 1 #define E1000_PCS_LSTS_SPEED_100 2 #define E1000_PCS_LSTS_SPEED_1000 4 #define E1000_PCS_LSTS_DUPLEX_FULL 8 #define E1000_PCS_LSTS_SYNK_OK 0x10 /* Device Status */ #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ #define E1000_STATUS_FUNC_SHIFT 2 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ /* Change in Dock/Undock state. Clear on write '0'. */ /* Status of Master requests. */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* BMC external code execution disabled */ #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ /* Constants used to intrepret the masked PCI-X bus speed. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 #define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_100_HALF 0x0004 #define ADVERTISE_100_FULL 0x0008 #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ #define ADVERTISE_1000_FULL 0x0020 /* 1000/H is not supported, nor spec-compliant. */ #define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL) #define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) #define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) #define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ #define E1000_LEDCTL_LED0_MODE_SHIFT 0 #define E1000_LEDCTL_LED0_BLINK 0x00000080 #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F #define E1000_LEDCTL_LED0_IVRT 0x00000040 #define E1000_LEDCTL_MODE_LED_ON 0xE #define E1000_LEDCTL_MODE_LED_OFF 0xF /* Transmit Descriptor bit definitions */ #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ #define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ #define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ /* Extended desc bits for Linksec and timesync */ /* Transmit Control */ #define E1000_TCTL_EN 0x00000002 /* enable tx */ #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ /* DMA Coalescing register fields */ #define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ #define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ #define E1000_DMACR_DMACTHR_SHIFT 16 #define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ #define E1000_DMACR_DMAC_LX_SHIFT 28 #define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ /* DMA Coalescing BMC-to-OS Watchdog Enable */ #define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 #define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ #define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ #define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ #define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ #define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ #define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ #define E1000_FCRTC_RTH_COAL_SHIFT 4 #define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ /* Timestamp in Rx buffer */ #define E1000_RXPBS_CFG_TS_EN 0x80000000 #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I210_RXPBSIZE_MASK 0x0000003F #define I210_RXPBSIZE_PB_30KB 0x0000001E #define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define I210_TXPBSIZE_MASK 0xC0FFFFFF #define I210_TXPBSIZE_PB0_6KB (6 << 0) #define I210_TXPBSIZE_PB1_6KB (6 << 6) #define I210_TXPBSIZE_PB2_6KB (6 << 12) #define I210_TXPBSIZE_PB3_6KB (6 << 18) #define I210_DTXMXPKTSZ_DEFAULT 0x00000098 #define I210_SR_QUEUES_NUM 2 /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 /* Receive Checksum Control */ #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 #define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 #define E1000_CT_SHIFT 4 #define E1000_COLLISION_DISTANCE 63 #define E1000_COLD_SHIFT 12 /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ #define MAX_JUMBO_FRAME_SIZE 0x2600 #define MAX_STD_JUMBO_FRAME_SIZE 9216 /* PBA constants */ #define E1000_PBA_34K 0x0022 #define E1000_PBA_64K 0x0040 /* 64KB */ /* SW Semaphore Register */ #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ /* Interrupt Cause Read */ #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ #define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ #define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 /* LAN connected device generates an interrupt */ #define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ /* Extended Interrupt Cause Read */ #define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ #define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ #define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ #define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ #define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ #define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ #define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ #define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ #define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* TCP Timer */ /* This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: * o RXT0 = Receiver Timer Interrupt (ring 0) * o TXDW = Transmit Descriptor Written Back * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) * o RXSEQ = Receive Sequence Error * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ E1000_IMS_RXT0 | \ E1000_IMS_TXDW | \ E1000_IMS_RXDMT0 | \ E1000_IMS_RXSEQ | \ E1000_IMS_LSC | \ E1000_IMS_DOUTSYNC) /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ #define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ #define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ #define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ /* Extended Interrupt Mask Set */ #define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ /* Extended Interrupt Cause Set */ /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ #define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ /* Transmit Descriptor Control */ /* Enable the counting of descriptors still to be processed. */ /* Flow Control Constants */ #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 #define FLOW_CONTROL_TYPE 0x8808 /* Transmit Config Word */ #define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ #define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ /* 802.1q VLAN Packet Size */ #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ /* Receive Address */ /* Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * Technically, we have 16 spots. However, we reserve one of these spots * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ #define E1000_RAH_ASEL_SRC_ADDR 0x00010000 #define E1000_RAH_QSEL_ENABLE 0x10000000 #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_POOL_MASK 0x03FC0000 #define E1000_RAH_POOL_1 0x00040000 /* Error Codes */ #define E1000_ERR_NVM 1 #define E1000_ERR_PHY 2 #define E1000_ERR_CONFIG 3 #define E1000_ERR_PARAM 4 #define E1000_ERR_MAC_INIT 5 #define E1000_ERR_RESET 9 #define E1000_ERR_MASTER_REQUESTS_PENDING 10 #define E1000_BLK_PHY_RESET 12 #define E1000_ERR_SWFW_SYNC 13 #define E1000_NOT_IMPLEMENTED 14 #define E1000_ERR_MBX 15 #define E1000_ERR_INVALID_ARGUMENT 16 #define E1000_ERR_NO_SPACE 17 #define E1000_ERR_NVM_PBA_SECTION 18 #define E1000_ERR_INVM_VALUE_NOT_FOUND 19 #define E1000_ERR_I2C 20 /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 #define PHY_FORCE_LIMIT 20 /* Number of 100 microseconds we wait for PCI Express master disable */ #define MASTER_DISABLE_TIMEOUT 800 /* Number of milliseconds we wait for PHY configuration done after MAC reset */ #define PHY_CFG_TIMEOUT 100 /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ /* Number of milliseconds for NVM auto read done after MAC reset. */ #define AUTO_READ_DONE_TIMEOUT 10 /* Flow Control */ #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ #define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ #define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ #define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ #define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 #define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 #define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 #define E1000_TSYNCRXCTL_TYPE_ALL 0x08 #define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ #define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF #define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 #define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 #define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 #define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 #define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 #define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 #define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 #define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 #define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 #define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 #define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 #define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 #define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 #define E1000_TIMINCA_16NS_SHIFT 24 /* Time Sync Interrupt Cause/Mask Register Bits */ #define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ #define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ #define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ #define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ #define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ #define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ #define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ #define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ #define TSYNC_INTERRUPTS TSINTR_TXTS #define E1000_TSICR_TXTS TSINTR_TXTS /* TSAUXC Configuration Bits */ #define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ #define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ #define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ #define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ #define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ #define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ #define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ #define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ #define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ #define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ #define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ #define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ #define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ #define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ /* SDP Configuration Bits */ #define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ #define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ #define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ #define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ #define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ #define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ #define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ #define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ #define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ #define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ #define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ #define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ #define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ #define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ #define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ #define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ #define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ #define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ #define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ #define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ #define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ #define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ #define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ #define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ #define E1000_MDICNFG_PHY_MASK 0x03E00000 #define E1000_MDICNFG_PHY_SHIFT 21 #define E1000_MEDIA_PORT_COPPER 1 #define E1000_MEDIA_PORT_OTHER 2 #define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 #define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 #define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ #define E1000_M88E1112_MAC_CTRL_1 0x10 #define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define E1000_M88E1112_PAGE_ADDR 0x16 #define E1000_M88E1112_STATUS 0x01 #define E1000_M88E1512_CFG_REG_1 0x0010 #define E1000_M88E1512_CFG_REG_2 0x0011 #define E1000_M88E1512_CFG_REG_3 0x0007 #define E1000_M88E1512_MODE 0x0014 /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 #define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 #define E1000_GCR_CAP_VER2 0x00040000 /* mPHY Address Control and Data Registers */ #define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ #define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 #define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ /* mPHY PCS CLK Register */ #define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ /* mPHY Near End Digital Loopback Override Bit */ #define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 #define E1000_PCS_LCTL_FORCE_FCTRL 0x80 #define E1000_PCS_LSTS_AN_COMPLETE 0x10000 /* PHY Control Register */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ #define MII_CR_SPEED_1000 0x0040 #define MII_CR_SPEED_100 0x2000 #define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ /* Autoneg Advertisement Register */ #define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ #define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ #define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ #define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ #define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ /* Link Partner Ability Register (Base Page) */ #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ /* Autoneg Expansion Register */ /* 1000BASE-T Control Register */ #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ #define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ /* 0=Configure PHY as Slave */ #define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ /* 0=Automatic Master/Slave config */ /* 1000BASE-T Status Register */ #define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ #define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ #define PHY_CONTROL 0x00 /* Control Register */ #define PHY_STATUS 0x01 /* Status Register */ #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ /* NVM Control */ #define E1000_EECD_SK 0x00000001 /* NVM Clock */ #define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ #define E1000_EECD_DI 0x00000004 /* NVM Data In */ #define E1000_EECD_DO 0x00000008 /* NVM Data Out */ #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ #define E1000_EECD_PRES 0x00000100 /* NVM Present */ /* NVM Addressing bits based on type 0=small, 1=large */ #define E1000_EECD_ADDR_BITS 0x00000400 #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ #define E1000_EECD_SIZE_EX_SHIFT 11 #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ #define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ #define E1000_FLUDONE_ATTEMPTS 20000 #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ #define E1000_I210_FIFO_SEL_RX 0x00 #define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ /* Secure FLASH mode requires removing MSb */ #define E1000_I210_FW_PTR_MASK 0x7FFF /* Firmware code revision field word offset*/ #define E1000_I210_FW_VER_OFFSET 328 #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ #define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ #define E1000_FLUDONE_ATTEMPTS 20000 #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ #define E1000_I210_FIFO_SEL_RX 0x00 #define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 /* Offset to data in NVM read/write registers */ #define E1000_NVM_RW_REG_DATA 16 #define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ #define E1000_NVM_RW_REG_START 1 /* Start operation */ #define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ #define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ /* NVM Word Offsets */ #define NVM_COMPAT 0x0003 #define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ #define NVM_VERSION 0x0005 #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 #define NVM_INIT_CONTROL3_PORT_A 0x0024 #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F #define NVM_COMPATIBILITY_REG_3 0x0003 #define NVM_COMPATIBILITY_BIT_MASK 0x8000 #define NVM_MAC_ADDR 0x0000 #define NVM_SUB_DEV_ID 0x000B #define NVM_SUB_VEN_ID 0x000C #define NVM_DEV_ID 0x000D #define NVM_VEN_ID 0x000E #define NVM_INIT_CTRL_2 0x000F #define NVM_INIT_CTRL_4 0x0013 #define NVM_LED_1_CFG 0x001C #define NVM_LED_0_2_CFG 0x001F #define NVM_ETRACK_WORD 0x0042 #define NVM_ETRACK_HIWORD 0x0043 #define NVM_COMB_VER_OFF 0x0083 #define NVM_COMB_VER_PTR 0x003d /* NVM version defines */ #define NVM_MAJOR_MASK 0xF000 #define NVM_MINOR_MASK 0x0FF0 #define NVM_IMAGE_ID_MASK 0x000F #define NVM_COMB_VER_MASK 0x00FF #define NVM_MAJOR_SHIFT 12 #define NVM_MINOR_SHIFT 4 #define NVM_COMB_VER_SHFT 8 #define NVM_VER_INVALID 0xFFFF #define NVM_ETRACK_SHIFT 16 #define NVM_ETRACK_VALID 0x8000 #define NVM_NEW_DEC_MASK 0x0F00 #define NVM_HEX_CONV 16 #define NVM_HEX_TENS 10 #define NVM_ETS_CFG 0x003E #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 #define NVM_ETS_LTHRES_DELTA_SHIFT 6 #define NVM_ETS_TYPE_MASK 0x0038 #define NVM_ETS_TYPE_SHIFT 3 #define NVM_ETS_TYPE_EMC 0x000 #define NVM_ETS_NUM_SENSORS_MASK 0x0007 #define NVM_ETS_DATA_LOC_MASK 0x3C00 #define NVM_ETS_DATA_LOC_SHIFT 10 #define NVM_ETS_DATA_INDEX_MASK 0x0300 #define NVM_ETS_DATA_INDEX_SHIFT 8 #define NVM_ETS_DATA_HTHRESH_MASK 0x00FF #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ #define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ #define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) /* Mask bits for fields in Word 0x24 of the NVM */ #define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ #define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ /* Mask bits for fields in Word 0x0f of the NVM */ #define NVM_WORD0F_PAUSE_MASK 0x3000 #define NVM_WORD0F_ASM_DIR 0x2000 /* Mask bits for fields in Word 0x1a of the NVM */ /* length of string needed to store part num */ #define E1000_PBANUM_LENGTH 11 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 #define NVM_RESERVED_WORD 0xFFFF #define NVM_PBA_PTR_GUARD 0xFAFA #define NVM_WORD_SIZE_BASE_SHIFT 6 /* NVM Commands - Microwire */ /* NVM Commands - SPI */ #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ #define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ /* SPI NVM Status Register */ #define NVM_STATUS_RDY_SPI 0x01 /* Word definitions for ID LED Settings */ #define ID_LED_RESERVED_0000 0x0000 #define ID_LED_RESERVED_FFFF 0xFFFF #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) #define ID_LED_DEF1_DEF2 0x1 #define ID_LED_DEF1_ON2 0x2 #define ID_LED_DEF1_OFF2 0x3 #define ID_LED_ON1_DEF2 0x4 #define ID_LED_ON1_ON2 0x5 #define ID_LED_ON1_OFF2 0x6 #define ID_LED_OFF1_DEF2 0x7 #define ID_LED_OFF1_ON2 0x8 #define ID_LED_OFF1_OFF2 0x9 #define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF #define IGP_ACTIVITY_LED_ENABLE 0x0300 #define IGP_LED3_MODE 0x07000000 /* PCI/PCI-X/PCI-EX Config space */ #define PCIE_DEVICE_CONTROL2 0x28 #define PCIE_DEVICE_CONTROL2_16ms 0x0005 #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ #define MAX_PHY_MULTI_PAGE_REG 0xF /* Bit definitions for valid PHY IDs. */ /* I = Integrated * E = External */ #define M88E1111_I_PHY_ID 0x01410CC0 #define M88E1112_E_PHY_ID 0x01410C90 #define I347AT4_E_PHY_ID 0x01410DC0 #define IGP03E1000_E_PHY_ID 0x02A80390 #define I82580_I_PHY_ID 0x015403A0 #define I350_I_PHY_ID 0x015403B0 #define M88_VENDOR 0x0141 #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 #define M88E1512_E_PHY_ID 0x01410DD0 #define BCM54616_E_PHY_ID 0x03625D10 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ #define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ #define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ #define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ #define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ /* M88E1000 PHY Specific Control Register */ #define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ /* 1=CLK125 low, 0=CLK125 toggling */ #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ /* Manual MDI configuration */ #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ #define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold * 0=Normal 10BASE-T Rx Threshold */ /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ /* M88E1000 PHY Specific Status Register */ #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ /* 0 = <50M * 1 = 50-80M * 2 = 80-110M * 3 = 110-140M * 4 = >140M */ #define M88E1000_PSSR_CABLE_LENGTH 0x0380 #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 /* M88E1000 Extended PHY Specific Control Register */ /* 1 = Lost lock detect enabled. * Will assert lost lock and bring * link down if idle not seen * within 1ms in 1000BASE-T */ /* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 /* Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ /* Intel i347-AT4 Registers */ #define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ #define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ #define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ #define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ #define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ #define I347AT4_PAGE_SELECT 0x16 /* i347-AT4 Extended PHY Specific Control Register */ /* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 #define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 #define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 #define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 #define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 #define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 #define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 #define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 #define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 #define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 /* i347-AT4 PHY Cable Diagnostics Control */ #define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ /* Marvell 1112 only registers */ #define M88E1112_VCT_DSP_DISTANCE 0x001A /* M88EC018 Rev 2 specific DownShift settings */ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 /* MDI Control */ #define E1000_MDIC_DATA_MASK 0x0000FFFF #define E1000_MDIC_REG_MASK 0x001F0000 #define E1000_MDIC_REG_SHIFT 16 #define E1000_MDIC_PHY_MASK 0x03E00000 #define E1000_MDIC_PHY_SHIFT 21 #define E1000_MDIC_OP_WRITE 0x04000000 #define E1000_MDIC_OP_READ 0x08000000 #define E1000_MDIC_READY 0x10000000 #define E1000_MDIC_INT_EN 0x20000000 #define E1000_MDIC_ERROR 0x40000000 #define E1000_MDIC_DEST 0x80000000 /* Thermal Sensor */ #define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ #define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ /* Energy Efficient Ethernet */ #define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ #define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ #define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ #define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ #define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ #define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ #define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ #define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ #define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ #define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ #define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ #define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ #define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define E1000_M88E1543_EEE_CTRL_1 0x0 #define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ #define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ #define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ #define E1000_PCS_STATUS_DEV_I354 3 #define E1000_PCS_STATUS_ADDR_I354 1 #define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ #define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 #define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 #define E1000_GEN_CTL_ADDRESS_SHIFT 8 #define E1000_GEN_POLL_TIMEOUT 640 #define E1000_VFTA_ENTRY_SHIFT 5 #define E1000_VFTA_ENTRY_MASK 0x7F #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F /* Tx Rate-Scheduler Config fields */ #define E1000_RTTBCNRC_RS_ENA 0x80000000 #define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF #define E1000_RTTBCNRC_RF_INT_SHIFT 14 #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) #define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) #define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define E1000_VLAPQF_QUEUE_MASK 0x03 /* TX Qav Control fields */ #define E1000_TQAVCTRL_XMIT_MODE BIT(0) #define E1000_TQAVCTRL_DATAFETCHARB BIT(4) #define E1000_TQAVCTRL_DATATRANARB BIT(8) #define E1000_TQAVCTRL_DATATRANTIM BIT(9) #define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) /* Fetch Time Delta - bits 31:16 * * This field holds the value to be reduced from the launch time for * fetch time decision. The FetchTimeDelta value is defined in 32 ns * granularity. * * This field is 16 bits wide, and so the maximum value is: * * 65535 * 32 = 2097120 ~= 2.1 msec * * XXX: We are configuring the max value here since we couldn't come up * with a reason for not doing so. */ #define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) /* TX Qav Credit Control fields */ #define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF #define E1000_TQAVCC_QUEUEMODE BIT(31) /* Transmit Descriptor Control fields */ #define E1000_TXDCTL_PRIORITY BIT(27) #endif
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2023 Advanced Micro Devices, Inc. */ #include <linux/io.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/pds/pds_common.h> #include <linux/pds/pds_core_if.h> #include <linux/pds/pds_adminq.h> #include "vfio_dev.h" #include "cmds.h" #define SUSPEND_TIMEOUT_S 5 #define SUSPEND_CHECK_INTERVAL_MS 1 static int pds_vfio_client_adminq_cmd(struct pds_vfio_pci_device *pds_vfio, union pds_core_adminq_cmd *req, union pds_core_adminq_comp *resp, bool fast_poll) { struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio); union pds_core_adminq_cmd cmd = {}; struct pdsc *pdsc; int err; /* Wrap the client request */ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD; cmd.client_request.client_id = cpu_to_le16(pds_vfio->client_id); memcpy(cmd.client_request.client_cmd, req, sizeof(cmd.client_request.client_cmd)); pdsc = pdsc_get_pf_struct(pdev); if (IS_ERR(pdsc)) return PTR_ERR(pdsc); err = pdsc_adminq_post(pdsc, &cmd, resp, fast_poll); if (err && err != -EAGAIN) dev_err(pds_vfio_to_dev(pds_vfio), "client admin cmd failed: %pe\n", ERR_PTR(err)); return err; } int pds_vfio_register_client_cmd(struct pds_vfio_pci_device *pds_vfio) { struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio); char devname[PDS_DEVNAME_LEN]; struct pdsc *pdsc; int ci; snprintf(devname, sizeof(devname), "%s.%d-%u", PDS_VFIO_LM_DEV_NAME, pci_domain_nr(pdev->bus), PCI_DEVID(pdev->bus->number, pdev->devfn)); pdsc = pdsc_get_pf_struct(pdev); if (IS_ERR(pdsc)) return PTR_ERR(pdsc); ci = pds_client_register(pdsc, devname); if (ci < 0) return ci; pds_vfio->client_id = ci; return 0; } void pds_vfio_unregister_client_cmd(struct pds_vfio_pci_device *pds_vfio) { struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio); struct pdsc *pdsc; int err; pdsc = pdsc_get_pf_struct(pdev); if (IS_ERR(pdsc)) return; err = pds_client_unregister(pdsc, pds_vfio->client_id); if (err) dev_err(&pdev->dev, "unregister from DSC failed: %pe\n", ERR_PTR(err)); pds_vfio->client_id = 0; } static int pds_vfio_suspend_wait_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type) { union pds_core_adminq_cmd cmd = { .lm_suspend_status = { .opcode = PDS_LM_CMD_SUSPEND_STATUS, .vf_id = cpu_to_le16(pds_vfio->vf_id), .type = type, }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; unsigned long time_limit; unsigned long time_start; unsigned long time_done; int err; time_start = jiffies; time_limit = time_start + HZ * SUSPEND_TIMEOUT_S; do { err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true); if (err != -EAGAIN) break; msleep(SUSPEND_CHECK_INTERVAL_MS); } while (time_before(jiffies, time_limit)); time_done = jiffies; dev_dbg(dev, "%s: vf%u: Suspend comp received in %d msecs\n", __func__, pds_vfio->vf_id, jiffies_to_msecs(time_done - time_start)); /* Check the results */ if (time_after_eq(time_done, time_limit)) { dev_err(dev, "%s: vf%u: Suspend comp timeout\n", __func__, pds_vfio->vf_id); err = -ETIMEDOUT; } return err; } int pds_vfio_suspend_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type) { union pds_core_adminq_cmd cmd = { .lm_suspend = { .opcode = PDS_LM_CMD_SUSPEND, .vf_id = cpu_to_le16(pds_vfio->vf_id), .type = type, }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; dev_dbg(dev, "vf%u: Suspend device\n", pds_vfio->vf_id); /* * The initial suspend request to the firmware starts the device suspend * operation and the firmware returns success if it's started * successfully. */ err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true); if (err) { dev_err(dev, "vf%u: Suspend failed: %pe\n", pds_vfio->vf_id, ERR_PTR(err)); return err; } /* * The subsequent suspend status request(s) check if the firmware has * completed the device suspend process. */ return pds_vfio_suspend_wait_device_cmd(pds_vfio, type); } int pds_vfio_resume_device_cmd(struct pds_vfio_pci_device *pds_vfio, u8 type) { union pds_core_adminq_cmd cmd = { .lm_resume = { .opcode = PDS_LM_CMD_RESUME, .vf_id = cpu_to_le16(pds_vfio->vf_id), .type = type, }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; dev_dbg(dev, "vf%u: Resume device\n", pds_vfio->vf_id); return pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, true); } int pds_vfio_get_lm_state_size_cmd(struct pds_vfio_pci_device *pds_vfio, u64 *size) { union pds_core_adminq_cmd cmd = { .lm_state_size = { .opcode = PDS_LM_CMD_STATE_SIZE, .vf_id = cpu_to_le16(pds_vfio->vf_id), }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; dev_dbg(dev, "vf%u: Get migration status\n", pds_vfio->vf_id); err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) return err; *size = le64_to_cpu(comp.lm_state_size.size); return 0; } static int pds_vfio_dma_map_lm_file(struct device *dev, enum dma_data_direction dir, struct pds_vfio_lm_file *lm_file) { struct pds_lm_sg_elem *sgl, *sge; struct scatterlist *sg; dma_addr_t sgl_addr; size_t sgl_size; int err; int i; if (!lm_file) return -EINVAL; /* dma map file pages */ err = dma_map_sgtable(dev, &lm_file->sg_table, dir, 0); if (err) return err; lm_file->num_sge = lm_file->sg_table.nents; /* alloc sgl */ sgl_size = lm_file->num_sge * sizeof(struct pds_lm_sg_elem); sgl = kzalloc(sgl_size, GFP_KERNEL); if (!sgl) { err = -ENOMEM; goto out_unmap_sgtable; } /* fill sgl */ sge = sgl; for_each_sgtable_dma_sg(&lm_file->sg_table, sg, i) { sge->addr = cpu_to_le64(sg_dma_address(sg)); sge->len = cpu_to_le32(sg_dma_len(sg)); dev_dbg(dev, "addr = %llx, len = %u\n", sge->addr, sge->len); sge++; } sgl_addr = dma_map_single(dev, sgl, sgl_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, sgl_addr)) { err = -EIO; goto out_free_sgl; } lm_file->sgl = sgl; lm_file->sgl_addr = sgl_addr; return 0; out_free_sgl: kfree(sgl); out_unmap_sgtable: lm_file->num_sge = 0; dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0); return err; } static void pds_vfio_dma_unmap_lm_file(struct device *dev, enum dma_data_direction dir, struct pds_vfio_lm_file *lm_file) { if (!lm_file) return; /* free sgl */ if (lm_file->sgl) { dma_unmap_single(dev, lm_file->sgl_addr, lm_file->num_sge * sizeof(*lm_file->sgl), DMA_TO_DEVICE); kfree(lm_file->sgl); lm_file->sgl = NULL; lm_file->sgl_addr = DMA_MAPPING_ERROR; lm_file->num_sge = 0; } /* dma unmap file pages */ dma_unmap_sgtable(dev, &lm_file->sg_table, dir, 0); } int pds_vfio_get_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio) { union pds_core_adminq_cmd cmd = { .lm_save = { .opcode = PDS_LM_CMD_SAVE, .vf_id = cpu_to_le16(pds_vfio->vf_id), }, }; struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio); struct device *pdsc_dev = &pci_physfn(pdev)->dev; union pds_core_adminq_comp comp = {}; struct pds_vfio_lm_file *lm_file; int err; dev_dbg(&pdev->dev, "vf%u: Get migration state\n", pds_vfio->vf_id); lm_file = pds_vfio->save_file; err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file); if (err) { dev_err(&pdev->dev, "failed to map save migration file: %pe\n", ERR_PTR(err)); return err; } cmd.lm_save.sgl_addr = cpu_to_le64(lm_file->sgl_addr); cmd.lm_save.num_sge = cpu_to_le32(lm_file->num_sge); err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) dev_err(&pdev->dev, "failed to get migration state: %pe\n", ERR_PTR(err)); pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_FROM_DEVICE, lm_file); return err; } int pds_vfio_set_lm_state_cmd(struct pds_vfio_pci_device *pds_vfio) { union pds_core_adminq_cmd cmd = { .lm_restore = { .opcode = PDS_LM_CMD_RESTORE, .vf_id = cpu_to_le16(pds_vfio->vf_id), }, }; struct pci_dev *pdev = pds_vfio_to_pci_dev(pds_vfio); struct device *pdsc_dev = &pci_physfn(pdev)->dev; union pds_core_adminq_comp comp = {}; struct pds_vfio_lm_file *lm_file; int err; dev_dbg(&pdev->dev, "vf%u: Set migration state\n", pds_vfio->vf_id); lm_file = pds_vfio->restore_file; err = pds_vfio_dma_map_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file); if (err) { dev_err(&pdev->dev, "failed to map restore migration file: %pe\n", ERR_PTR(err)); return err; } cmd.lm_restore.sgl_addr = cpu_to_le64(lm_file->sgl_addr); cmd.lm_restore.num_sge = cpu_to_le32(lm_file->num_sge); err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) dev_err(&pdev->dev, "failed to set migration state: %pe\n", ERR_PTR(err)); pds_vfio_dma_unmap_lm_file(pdsc_dev, DMA_TO_DEVICE, lm_file); return err; } void pds_vfio_send_host_vf_lm_status_cmd(struct pds_vfio_pci_device *pds_vfio, enum pds_lm_host_vf_status vf_status) { union pds_core_adminq_cmd cmd = { .lm_host_vf_status = { .opcode = PDS_LM_CMD_HOST_VF_STATUS, .vf_id = cpu_to_le16(pds_vfio->vf_id), .status = vf_status, }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; dev_dbg(dev, "vf%u: Set host VF LM status: %u", pds_vfio->vf_id, vf_status); if (vf_status != PDS_LM_STA_IN_PROGRESS && vf_status != PDS_LM_STA_NONE) { dev_warn(dev, "Invalid host VF migration status, %d\n", vf_status); return; } err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) dev_warn(dev, "failed to send host VF migration status: %pe\n", ERR_PTR(err)); } int pds_vfio_dirty_status_cmd(struct pds_vfio_pci_device *pds_vfio, u64 regions_dma, u8 *max_regions, u8 *num_regions) { union pds_core_adminq_cmd cmd = { .lm_dirty_status = { .opcode = PDS_LM_CMD_DIRTY_STATUS, .vf_id = cpu_to_le16(pds_vfio->vf_id), }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; dev_dbg(dev, "vf%u: Dirty status\n", pds_vfio->vf_id); cmd.lm_dirty_status.regions_dma = cpu_to_le64(regions_dma); cmd.lm_dirty_status.max_regions = *max_regions; err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) { dev_err(dev, "failed to get dirty status: %pe\n", ERR_PTR(err)); return err; } /* only support seq_ack approach for now */ if (!(le32_to_cpu(comp.lm_dirty_status.bmp_type_mask) & BIT(PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK))) { dev_err(dev, "Dirty bitmap tracking SEQ_ACK not supported\n"); return -EOPNOTSUPP; } *num_regions = comp.lm_dirty_status.num_regions; *max_regions = comp.lm_dirty_status.max_regions; dev_dbg(dev, "Page Tracking Status command successful, max_regions: %d, num_regions: %d, bmp_type: %s\n", *max_regions, *num_regions, "PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK"); return 0; } int pds_vfio_dirty_enable_cmd(struct pds_vfio_pci_device *pds_vfio, u64 regions_dma, u8 num_regions) { union pds_core_adminq_cmd cmd = { .lm_dirty_enable = { .opcode = PDS_LM_CMD_DIRTY_ENABLE, .vf_id = cpu_to_le16(pds_vfio->vf_id), .regions_dma = cpu_to_le64(regions_dma), .bmp_type = PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK, .num_regions = num_regions, }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) { dev_err(dev, "failed dirty tracking enable: %pe\n", ERR_PTR(err)); return err; } return 0; } int pds_vfio_dirty_disable_cmd(struct pds_vfio_pci_device *pds_vfio) { union pds_core_adminq_cmd cmd = { .lm_dirty_disable = { .opcode = PDS_LM_CMD_DIRTY_DISABLE, .vf_id = cpu_to_le16(pds_vfio->vf_id), }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err || comp.lm_dirty_status.num_regions != 0) { /* in case num_regions is still non-zero after disable */ err = err ? err : -EIO; dev_err(dev, "failed dirty tracking disable: %pe, num_regions %d\n", ERR_PTR(err), comp.lm_dirty_status.num_regions); return err; } return 0; } int pds_vfio_dirty_seq_ack_cmd(struct pds_vfio_pci_device *pds_vfio, u64 sgl_dma, u16 num_sge, u32 offset, u32 total_len, bool read_seq) { const char *cmd_type_str = read_seq ? "read_seq" : "write_ack"; union pds_core_adminq_cmd cmd = { .lm_dirty_seq_ack = { .vf_id = cpu_to_le16(pds_vfio->vf_id), .len_bytes = cpu_to_le32(total_len), .off_bytes = cpu_to_le32(offset), .sgl_addr = cpu_to_le64(sgl_dma), .num_sge = cpu_to_le16(num_sge), }, }; struct device *dev = pds_vfio_to_dev(pds_vfio); union pds_core_adminq_comp comp = {}; int err; if (read_seq) cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_READ_SEQ; else cmd.lm_dirty_seq_ack.opcode = PDS_LM_CMD_DIRTY_WRITE_ACK; err = pds_vfio_client_adminq_cmd(pds_vfio, &cmd, &comp, false); if (err) { dev_err(dev, "failed cmd Page Tracking %s: %pe\n", cmd_type_str, ERR_PTR(err)); return err; } return 0; }
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * IPQ5018 SoC device tree source * * Copyright (c) 2023 The Linux Foundation. All rights reserved. */ #include <dt-bindings/clock/qcom,apss-ipq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/clock/qcom,gcc-ipq5018.h> #include <dt-bindings/reset/qcom,gcc-ipq5018.h> / { interrupt-parent = <&intc>; #address-cells = <2>; #size-cells = <2>; clocks { sleep_clk: sleep-clk { compatible = "fixed-clock"; #clock-cells = <0>; }; xo_board_clk: xo-board-clk { compatible = "fixed-clock"; #clock-cells = <0>; }; }; cpus { #address-cells = <1>; #size-cells = <0>; cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x0>; enable-method = "psci"; next-level-cache = <&l2_0>; clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; operating-points-v2 = <&cpu_opp_table>; }; cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x1>; enable-method = "psci"; next-level-cache = <&l2_0>; clocks = <&apcs_glb APCS_ALIAS0_CORE_CLK>; operating-points-v2 = <&cpu_opp_table>; }; l2_0: l2-cache { compatible = "cache"; cache-level = <2>; cache-size = <0x80000>; cache-unified; }; }; cpu_opp_table: opp-table-cpu { compatible = "operating-points-v2"; opp-shared; opp-800000000 { opp-hz = /bits/ 64 <800000000>; opp-microvolt = <1100000>; clock-latency-ns = <200000>; }; opp-1008000000 { opp-hz = /bits/ 64 <1008000000>; opp-microvolt = <1100000>; clock-latency-ns = <200000>; }; }; firmware { scm { compatible = "qcom,scm-ipq5018", "qcom,scm"; qcom,sdi-enabled; }; }; memory@40000000 { device_type = "memory"; /* We expect the bootloader to fill in the size */ reg = <0x0 0x40000000 0x0 0x0>; }; pmu { compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; }; psci { compatible = "arm,psci-1.0"; method = "smc"; }; reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; bootloader@4a800000 { reg = <0x0 0x4a800000 0x0 0x200000>; no-map; }; sbl@4aa00000 { reg = <0x0 0x4aa00000 0x0 0x100000>; no-map; }; smem@4ab00000 { compatible = "qcom,smem"; reg = <0x0 0x4ab00000 0x0 0x100000>; no-map; hwlocks = <&tcsr_mutex 3>; }; tz_region: tz@4ac00000 { reg = <0x0 0x4ac00000 0x0 0x200000>; no-map; }; }; soc: soc@0 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0 0 0 0xffffffff>; usbphy0: phy@5b000 { compatible = "qcom,ipq5018-usb-hsphy"; reg = <0x0005b000 0x120>; clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>; resets = <&gcc GCC_QUSB2_0_PHY_BCR>; #phy-cells = <0>; status = "disabled"; }; tlmm: pinctrl@1000000 { compatible = "qcom,ipq5018-tlmm"; reg = <0x01000000 0x300000>; interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&tlmm 0 0 47>; interrupt-controller; #interrupt-cells = <2>; uart1_pins: uart1-state { pins = "gpio31", "gpio32", "gpio33", "gpio34"; function = "blsp1_uart1"; drive-strength = <8>; bias-pull-down; }; }; gcc: clock-controller@1800000 { compatible = "qcom,gcc-ipq5018"; reg = <0x01800000 0x80000>; clocks = <&xo_board_clk>, <&sleep_clk>, <0>, <0>, <0>, <0>, <0>, <0>, <0>; #clock-cells = <1>; #reset-cells = <1>; }; tcsr_mutex: hwlock@1905000 { compatible = "qcom,tcsr-mutex"; reg = <0x01905000 0x20000>; #hwlock-cells = <1>; }; sdhc_1: mmc@7804000 { compatible = "qcom,ipq5018-sdhci", "qcom,sdhci-msm-v5"; reg = <0x7804000 0x1000>; reg-names = "hc"; interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>, <&xo_board_clk>; clock-names = "iface", "core", "xo"; non-removable; status = "disabled"; }; blsp_dma: dma-controller@7884000 { compatible = "qcom,bam-v1.7.0"; reg = <0x07884000 0x1d000>; interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "bam_clk"; #dma-cells = <1>; qcom,ee = <0>; }; blsp1_uart1: serial@78af000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0x078af000 0x200>; interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; status = "disabled"; }; blsp1_spi1: spi@78b5000 { compatible = "qcom,spi-qup-v2.2.1"; #address-cells = <1>; #size-cells = <0>; reg = <0x078b5000 0x600>; interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp_dma 4>, <&blsp_dma 5>; dma-names = "tx", "rx"; status = "disabled"; }; usb: usb@8af8800 { compatible = "qcom,ipq5018-dwc3", "qcom,dwc3"; reg = <0x08af8800 0x400>; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "hs_phy_irq"; clocks = <&gcc GCC_USB0_MASTER_CLK>, <&gcc GCC_SYS_NOC_USB0_AXI_CLK>, <&gcc GCC_USB0_SLEEP_CLK>, <&gcc GCC_USB0_MOCK_UTMI_CLK>; clock-names = "core", "iface", "sleep", "mock_utmi"; resets = <&gcc GCC_USB0_BCR>; qcom,select-utmi-as-pipe-clk; #address-cells = <1>; #size-cells = <1>; ranges; status = "disabled"; usb_dwc: usb@8a00000 { compatible = "snps,dwc3"; reg = <0x08a00000 0xe000>; clocks = <&gcc GCC_USB0_MOCK_UTMI_CLK>; clock-names = "ref"; interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; phy-names = "usb2-phy"; phys = <&usbphy0>; tx-fifo-resize; snps,is-utmi-l1-suspend; snps,hird-threshold = /bits/ 8 <0x0>; snps,dis_u2_susphy_quirk; snps,dis_u3_susphy_quirk; }; }; intc: interrupt-controller@b000000 { compatible = "qcom,msm-qgic2"; reg = <0x0b000000 0x1000>, /* GICD */ <0x0b002000 0x2000>, /* GICC */ <0x0b001000 0x1000>, /* GICH */ <0x0b004000 0x2000>; /* GICV */ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; interrupt-controller; #interrupt-cells = <3>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x0b00a000 0x1ffa>; v2m0: v2m@0 { compatible = "arm,gic-v2m-frame"; reg = <0x00000000 0xff8>; msi-controller; }; v2m1: v2m@1000 { compatible = "arm,gic-v2m-frame"; reg = <0x00001000 0xff8>; msi-controller; }; }; watchdog: watchdog@b017000 { compatible = "qcom,apss-wdt-ipq5018", "qcom,kpss-wdt"; reg = <0x0b017000 0x40>; interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>; clocks = <&sleep_clk>; }; apcs_glb: mailbox@b111000 { compatible = "qcom,ipq5018-apcs-apps-global", "qcom,ipq6018-apcs-apps-global"; reg = <0x0b111000 0x1000>; #clock-cells = <1>; clocks = <&a53pll>, <&xo_board_clk>, <&gcc GPLL0>; clock-names = "pll", "xo", "gpll0"; #mbox-cells = <1>; }; a53pll: clock@b116000 { compatible = "qcom,ipq5018-a53pll"; reg = <0x0b116000 0x40>; #clock-cells = <0>; clocks = <&xo_board_clk>; clock-names = "xo"; }; timer@b120000 { compatible = "arm,armv7-timer-mem"; reg = <0x0b120000 0x1000>; #address-cells = <1>; #size-cells = <1>; ranges; frame@b120000 { reg = <0x0b121000 0x1000>, <0x0b122000 0x1000>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; frame-number = <0>; }; frame@b123000 { reg = <0xb123000 0x1000>; interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; frame-number = <1>; status = "disabled"; }; frame@b124000 { frame-number = <2>; interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; reg = <0x0b124000 0x1000>; status = "disabled"; }; frame@b125000 { reg = <0x0b125000 0x1000>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; frame-number = <3>; status = "disabled"; }; frame@b126000 { reg = <0x0b126000 0x1000>; interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; frame-number = <4>; status = "disabled"; }; frame@b127000 { reg = <0x0b127000 0x1000>; interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>; frame-number = <5>; status = "disabled"; }; frame@b128000 { reg = <0x0b128000 0x1000>; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; frame-number = <6>; status = "disabled"; }; }; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>; }; };
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * Copyright(c) 2021 Intel Corporation */ #ifndef __SOUND_SOC_SOF_PCI_H #define __SOUND_SOC_SOF_PCI_H extern const struct dev_pm_ops sof_pci_pm; int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id); void sof_pci_remove(struct pci_dev *pci); void sof_pci_shutdown(struct pci_dev *pci); #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2020 Arm Ltd. */ #ifndef _CCU_SUN50I_H616_H_ #define _CCU_SUN50I_H616_H_ #include <dt-bindings/clock/sun50i-h616-ccu.h> #include <dt-bindings/reset/sun50i-h616-ccu.h> #define CLK_OSC12M 0 #define CLK_PLL_CPUX 1 #define CLK_PLL_DDR0 2 #define CLK_PLL_DDR1 3 /* PLL_PERIPH0 exported for PRCM */ #define CLK_PLL_PERIPH0_2X 5 #define CLK_PLL_PERIPH1 6 #define CLK_PLL_PERIPH1_2X 7 #define CLK_PLL_GPU 8 #define CLK_PLL_VIDEO0 9 #define CLK_PLL_VIDEO0_4X 10 #define CLK_PLL_VIDEO1 11 #define CLK_PLL_VIDEO1_4X 12 #define CLK_PLL_VIDEO2 13 #define CLK_PLL_VIDEO2_4X 14 #define CLK_PLL_VE 15 #define CLK_PLL_DE 16 #define CLK_PLL_AUDIO_HS 17 #define CLK_PLL_AUDIO_1X 18 #define CLK_PLL_AUDIO_2X 19 #define CLK_PLL_AUDIO_4X 20 /* CPUX clock exported for DVFS */ #define CLK_AXI 22 #define CLK_CPUX_APB 23 #define CLK_PSI_AHB1_AHB2 24 #define CLK_AHB3 25 /* APB1 clock exported for PIO */ #define CLK_APB2 27 #define CLK_MBUS 28 /* All module clocks and bus gates are exported except DRAM */ #define CLK_DRAM 49 #define CLK_BUS_DRAM 56 #define CLK_NUMBER (CLK_BUS_GPADC + 1) #endif /* _CCU_SUN50I_H616_H_ */
/* * Fast C2P (Chunky-to-Planar) Conversion * * Copyright (C) 2003-2008 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/unaligned.h> #include "c2p.h" #include "c2p_core.h" /* * Perform a full C2P step on 16 8-bit pixels, stored in 4 32-bit words * containing * - 16 8-bit chunky pixels on input * - permutated planar data (2 planes per 32-bit word) on output */ static void c2p_16x8(u32 d[4]) { transp4(d, 8, 2); transp4(d, 1, 2); transp4x(d, 16, 2); transp4x(d, 2, 2); transp4(d, 4, 1); } /* * Array containing the permutation indices of the planar data after c2p */ static const int perm_c2p_16x8[4] = { 1, 3, 0, 2 }; /* * Store a full block of iplan2 data after c2p conversion */ static inline void store_iplan2(void *dst, u32 bpp, u32 d[4]) { int i; for (i = 0; i < bpp/2; i++, dst += 4) put_unaligned_be32(d[perm_c2p_16x8[i]], dst); } /* * Store a partial block of iplan2 data after c2p conversion */ static inline void store_iplan2_masked(void *dst, u32 bpp, u32 d[4], u32 mask) { int i; for (i = 0; i < bpp/2; i++, dst += 4) put_unaligned_be32(comp(d[perm_c2p_16x8[i]], get_unaligned_be32(dst), mask), dst); } /* * c2p_iplan2 - Copy 8-bit chunky image data to an interleaved planar * frame buffer with 2 bytes of interleave * @dst: Starting address of the planar frame buffer * @dx: Horizontal destination offset (in pixels) * @dy: Vertical destination offset (in pixels) * @width: Image width (in pixels) * @height: Image height (in pixels) * @dst_nextline: Frame buffer offset to the next line (in bytes) * @src_nextline: Image offset to the next line (in bytes) * @bpp: Bits per pixel of the planar frame buffer (2, 4, or 8) */ void c2p_iplan2(void *dst, const void *src, u32 dx, u32 dy, u32 width, u32 height, u32 dst_nextline, u32 src_nextline, u32 bpp) { union { u8 pixels[16]; u32 words[4]; } d; u32 dst_idx, first, last, w; const u8 *c; void *p; dst += dy*dst_nextline+(dx & ~15)*bpp; dst_idx = dx % 16; first = 0xffffU >> dst_idx; first |= first << 16; last = 0xffffU ^ (0xffffU >> ((dst_idx+width) % 16)); last |= last << 16; while (height--) { c = src; p = dst; w = width; if (dst_idx+width <= 16) { /* Single destination word */ first &= last; memset(d.pixels, 0, sizeof(d)); memcpy(d.pixels+dst_idx, c, width); c += width; c2p_16x8(d.words); store_iplan2_masked(p, bpp, d.words, first); p += bpp*2; } else { /* Multiple destination words */ w = width; /* Leading bits */ if (dst_idx) { w = 16 - dst_idx; memset(d.pixels, 0, dst_idx); memcpy(d.pixels+dst_idx, c, w); c += w; c2p_16x8(d.words); store_iplan2_masked(p, bpp, d.words, first); p += bpp*2; w = width-w; } /* Main chunk */ while (w >= 16) { memcpy(d.pixels, c, 16); c += 16; c2p_16x8(d.words); store_iplan2(p, bpp, d.words); p += bpp*2; w -= 16; } /* Trailing bits */ w %= 16; if (w > 0) { memcpy(d.pixels, c, w); memset(d.pixels+w, 0, 16-w); c2p_16x8(d.words); store_iplan2_masked(p, bpp, d.words, last); } } src += src_nextline; dst += dst_nextline; } } EXPORT_SYMBOL_GPL(c2p_iplan2); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* Copyright (c) 2023 Imagination Technologies Ltd. */ #ifndef PVR_PARAMS_H #define PVR_PARAMS_H #include "pvr_rogue_fwif.h" #include <linux/cache.h> #include <linux/compiler_attributes.h> /* * This is the definitive list of types allowed in the definition of * %PVR_DEVICE_PARAMS. */ #define PVR_PARAM_TYPE_X32_C u32 /* * This macro defines all device-specific parameters; that is parameters which * are set independently per device. * * The X-macro accepts the following arguments. Arguments marked with [debugfs] * are ignored when debugfs is disabled; values used for these arguments may * safely be gated behind CONFIG_DEBUG_FS. * * @type_: The definitive list of allowed values is PVR_PARAM_TYPE_*_C. * @name_: Name of the parameter. This is used both as the field name in C and * stringified as the parameter name. * @value_: Initial/default value. * @desc_: String literal used as help text to describe the usage of this * parameter. * @mode_: [debugfs] One of {RO,RW}. The access mode of the debugfs entry for * this parameter. * @update_: [debugfs] When debugfs support is enabled, parameters may be * updated at runtime. When this happens, this function will be * called to allow changes to propagate. The signature of this * function is: * * void (*)(struct pvr_device *pvr_dev, T old_val, T new_val) * * Where T is the C type associated with @type_. * * If @mode_ does not allow write access, this function will never be * called. In this case, or if no update callback is required, you * should specify NULL for this argument. */ #define PVR_DEVICE_PARAMS \ X(X32, fw_trace_mask, ROGUE_FWIF_LOG_TYPE_NONE, \ "Enable FW trace for the specified groups. Specifying 0 disables " \ "all FW tracing.", \ RW, pvr_fw_trace_mask_update) struct pvr_device_params { #define X(type_, name_, value_, desc_, ...) \ PVR_PARAM_TYPE_##type_##_C name_; PVR_DEVICE_PARAMS #undef X }; int pvr_device_params_init(struct pvr_device_params *params); #if defined(CONFIG_DEBUG_FS) /* Forward declaration from "pvr_device.h". */ struct pvr_device; /* Forward declaration from <linux/dcache.h>. */ struct dentry; void pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir); #endif /* defined(CONFIG_DEBUG_FS) */ #endif /* PVR_PARAMS_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. */ #ifndef _DPU_HW_CATALOG_H #define _DPU_HW_CATALOG_H #include <linux/kernel.h> #include <linux/bug.h> #include <linux/bitmap.h> #include <linux/err.h> /** * Max hardware block count: For ex: max 12 SSPP pipes or * 5 ctl paths. In all cases, it can have max 12 hardware blocks * based on current design */ #define MAX_BLOCKS 12 #define DPU_HW_BLK_NAME_LEN 16 #define DPU_MAX_IMG_WIDTH 0x3fff #define DPU_MAX_IMG_HEIGHT 0x3fff #define CRTC_DUAL_MIXERS 2 #define MAX_XIN_COUNT 16 /** * MDP TOP BLOCK features * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results * in a failure * @DPU_MDP_VSYNC_SEL Enables vsync source selection via MDP_VSYNC_SEL register * (moved into INTF block since DPU 5.0.0) * @DPU_MDP_MAX Maximum value */ enum { DPU_MDP_PANIC_PER_PIPE = 0x1, DPU_MDP_10BIT_SUPPORT, DPU_MDP_AUDIO_SELECT, DPU_MDP_PERIPH_0_REMOVED, DPU_MDP_VSYNC_SEL, DPU_MDP_MAX }; /** * SSPP sub-blocks/features * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4) * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_CSC, Support of Color space converion * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec * @DPU_SSPP_CDP Supports client driven prefetch * @DPU_SSPP_INLINE_ROTATION Support inline rotation * @DPU_SSPP_MAX maximum value */ enum { DPU_SSPP_SCALER_QSEED2 = 0x1, DPU_SSPP_SCALER_QSEED3_COMPATIBLE, DPU_SSPP_SCALER_RGB, DPU_SSPP_CSC, DPU_SSPP_CSC_10BIT, DPU_SSPP_CURSOR, DPU_SSPP_QOS, DPU_SSPP_QOS_8LVL, DPU_SSPP_EXCL_RECT, DPU_SSPP_SMART_DMA_V1, DPU_SSPP_SMART_DMA_V2, DPU_SSPP_TS_PREFILL, DPU_SSPP_TS_PREFILL_REC1, DPU_SSPP_CDP, DPU_SSPP_INLINE_ROTATION, DPU_SSPP_MAX }; /* * MIXER sub-blocks/features * @DPU_MIXER_LAYER Layer mixer layer blend configuration, * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration * @DPU_MIXER_GC Gamma correction block * @DPU_DIM_LAYER Layer mixer supports dim layer * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register * @DPU_MIXER_MAX maximum value */ enum { DPU_MIXER_LAYER = 0x1, DPU_MIXER_SOURCESPLIT, DPU_MIXER_GC, DPU_DIM_LAYER, DPU_MIXER_COMBINED_ALPHA, DPU_MIXER_MAX }; /** * DSPP sub-blocks * @DPU_DSPP_PCC Panel color correction block */ enum { DPU_DSPP_PCC = 0x1, DPU_DSPP_MAX }; /** * PINGPONG sub-blocks * @DPU_PINGPONG_TE2 Additional tear check block for split pipes * @DPU_PINGPONG_SPLIT PP block supports split fifo * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo * @DPU_PINGPONG_DITHER Dither blocks * @DPU_PINGPONG_DSC PP block supports DSC * @DPU_PINGPONG_MAX */ enum { DPU_PINGPONG_TE2 = 0x1, DPU_PINGPONG_SPLIT, DPU_PINGPONG_SLAVE, DPU_PINGPONG_DITHER, DPU_PINGPONG_DSC, DPU_PINGPONG_MAX }; /** * CTL sub-blocks * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs) * @DPU_CTL_VM_CFG: CTL config to support multiple VMs * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register * @DPU_CTL_DSPP_BLOCK_FLUSH: CTL config to support dspp sub-block flush * @DPU_CTL_MAX */ enum { DPU_CTL_SPLIT_DISPLAY = 0x1, DPU_CTL_ACTIVE_CFG, DPU_CTL_FETCH_ACTIVE, DPU_CTL_VM_CFG, DPU_CTL_HAS_LAYER_EXT4, DPU_CTL_DSPP_SUB_BLOCK_FLUSH, DPU_CTL_MAX }; /** * INTF sub-blocks * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which * pixel data arrives to this INTF * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate * than video timing * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register * @DPU_INTF_MAX */ enum { DPU_INTF_INPUT_CTRL = 0x1, DPU_DATA_HCTL_EN, DPU_INTF_STATUS_SUPPORTED, DPU_INTF_MAX }; /** * WB sub-blocks and features * @DPU_WB_LINE_MODE Writeback module supports line/linear mode * @DPU_WB_BLOCK_MODE Writeback module supports block mode read * @DPU_WB_CHROMA_DOWN, Writeback chroma down block, * @DPU_WB_DOWNSCALE, Writeback integer downscaler, * @DPU_WB_DITHER, Dither block * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc * @DPU_WB_UBWC, Writeback Universal bandwidth compression * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in * the destination image * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control * @DPU_WB_CDP Writeback supports client driven prefetch * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel * data arrives. * @DPU_WB_CROP CWB supports cropping * @DPU_WB_MAX maximum value */ enum { DPU_WB_LINE_MODE = 0x1, DPU_WB_BLOCK_MODE, DPU_WB_UBWC, DPU_WB_YUV_CONFIG, DPU_WB_PIPE_ALPHA, DPU_WB_XY_ROI_OFFSET, DPU_WB_QOS, DPU_WB_QOS_8LVL, DPU_WB_CDP, DPU_WB_INPUT_CTRL, DPU_WB_CROP, DPU_WB_MAX }; /** * VBIF sub-blocks and features * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap * @DPU_VBIF_MAX maximum value */ enum { DPU_VBIF_QOS_OTLIM = 0x1, DPU_VBIF_QOS_REMAP, DPU_VBIF_MAX }; /** * DSC sub-blocks/features * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets * the pixel output from this DSC. * @DPU_DSC_HW_REV_1_2 DSC block supports DSC 1.1 and 1.2 * @DPU_DSC_NATIVE_42x_EN Supports NATIVE_422_EN and NATIVE_420_EN encoding * @DPU_DSC_MAX */ enum { DPU_DSC_OUTPUT_CTRL = 0x1, DPU_DSC_HW_REV_1_2, DPU_DSC_NATIVE_42x_EN, DPU_DSC_MAX }; /** * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU * @name: string name for debug purposes * @id: enum identifying this block * @base: register base offset to mdss * @len: length of hardware block * @features bit mask identifying sub-blocks/features */ #define DPU_HW_BLK_INFO \ char name[DPU_HW_BLK_NAME_LEN]; \ u32 id; \ u32 base; \ u32 len; \ unsigned long features /** * struct dpu_scaler_blk: Scaler information * @name: string name for debug purposes * @base: offset of this sub-block relative to the block offset * @len: register block length of this sub-block * @version: qseed block revision, on QSEED3+ platforms this is the value of * scaler_blk.base + QSEED3_HW_VERSION registers. */ struct dpu_scaler_blk { char name[DPU_HW_BLK_NAME_LEN]; u32 base; u32 len; u32 version; }; struct dpu_csc_blk { char name[DPU_HW_BLK_NAME_LEN]; u32 base; u32 len; }; /** * struct dpu_pp_blk : Pixel processing sub-blk information * @name: string name for debug purposes * @base: offset of this sub-block relative to the block offset * @len: register block length of this sub-block * @version: HW Algorithm version */ struct dpu_pp_blk { char name[DPU_HW_BLK_NAME_LEN]; u32 base; u32 len; u32 version; }; /** * struct dpu_dsc_blk - DSC Encoder sub-blk information * @name: string name for debug purposes * @base: offset of this sub-block relative to the block offset * @len: register block length of this sub-block */ struct dpu_dsc_blk { char name[DPU_HW_BLK_NAME_LEN]; u32 base; u32 len; }; /** * enum dpu_qos_lut_usage - define QoS LUT use cases */ enum dpu_qos_lut_usage { DPU_QOS_LUT_USAGE_LINEAR, DPU_QOS_LUT_USAGE_MACROTILE, DPU_QOS_LUT_USAGE_NRT, DPU_QOS_LUT_USAGE_MAX, }; /** * struct dpu_qos_lut_entry - define QoS LUT table entry * @fl: fill level, or zero on last entry to indicate default lut * @lut: lut to use if equal to or less than fill level */ struct dpu_qos_lut_entry { u32 fl; u64 lut; }; /** * struct dpu_qos_lut_tbl - define QoS LUT table * @nentry: number of entry in this table * @entries: Pointer to table entries */ struct dpu_qos_lut_tbl { u32 nentry; const struct dpu_qos_lut_entry *entries; }; /** * struct dpu_rotation_cfg - define inline rotation config * @rot_maxheight: max pre rotated height allowed for rotation * @rot_num_formats: number of elements in @rot_format_list * @rot_format_list: list of supported rotator formats */ struct dpu_rotation_cfg { u32 rot_maxheight; size_t rot_num_formats; const u32 *rot_format_list; }; /** * struct dpu_caps - define DPU capabilities * @max_mixer_width max layer mixer line width support. * @max_mixer_blendstages max layer mixer blend stages or * supported z order * @has_src_split source split feature status * @has_dim_layer dim layer feature status * @has_idle_pc indicate if idle power collapse feature is supported * @has_3d_merge indicate if 3D merge is supported * @max_linewidth max linewidth for sspp * @pixel_ram_size size of latency hiding and de-tiling buffer in bytes * @max_hdeci_exp max horizontal decimation supported (max is 2^value) * @max_vdeci_exp max vertical decimation supported (max is 2^value) */ struct dpu_caps { u32 max_mixer_width; u32 max_mixer_blendstages; bool has_src_split; bool has_dim_layer; bool has_idle_pc; bool has_3d_merge; /* SSPP limits */ u32 max_linewidth; u32 pixel_ram_size; u32 max_hdeci_exp; u32 max_vdeci_exp; }; /** * struct dpu_sspp_sub_blks : SSPP sub-blocks * common: Pointer to common configurations shared by sub blocks * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps * @qseed_ver: qseed version * @scaler_blk: * @csc_blk: * @format_list: Pointer to list of supported formats * @num_formats: Number of supported formats * @dpu_rotation_cfg: inline rotation configuration */ struct dpu_sspp_sub_blks { u32 max_per_pipe_bw; u32 qseed_ver; struct dpu_scaler_blk scaler_blk; struct dpu_pp_blk csc_blk; const u32 *format_list; u32 num_formats; const struct dpu_rotation_cfg *rotation_cfg; }; /** * struct dpu_lm_sub_blks: information of mixer block * @maxwidth: Max pixel width supported by this mixer * @maxblendstages: Max number of blend-stages supported * @blendstage_base: Blend-stage register base offset */ struct dpu_lm_sub_blks { u32 maxwidth; u32 maxblendstages; u32 blendstage_base[MAX_BLOCKS]; }; /** * struct dpu_dspp_sub_blks: Information of DSPP block * @pcc: pixel color correction block */ struct dpu_dspp_sub_blks { struct dpu_pp_blk pcc; }; struct dpu_pingpong_sub_blks { struct dpu_pp_blk te; struct dpu_pp_blk te2; struct dpu_pp_blk dither; }; /** * struct dpu_dsc_sub_blks - DSC sub-blks * @enc: DSC encoder sub-block * @ctl: DSC controller sub-block */ struct dpu_dsc_sub_blks { struct dpu_dsc_blk enc; struct dpu_dsc_blk ctl; }; /** * dpu_clk_ctrl_type - Defines top level clock control signals */ enum dpu_clk_ctrl_type { DPU_CLK_CTRL_NONE, DPU_CLK_CTRL_VIG0, DPU_CLK_CTRL_VIG1, DPU_CLK_CTRL_VIG2, DPU_CLK_CTRL_VIG3, DPU_CLK_CTRL_VIG4, DPU_CLK_CTRL_RGB0, DPU_CLK_CTRL_RGB1, DPU_CLK_CTRL_RGB2, DPU_CLK_CTRL_RGB3, DPU_CLK_CTRL_DMA0, DPU_CLK_CTRL_DMA1, DPU_CLK_CTRL_DMA2, DPU_CLK_CTRL_DMA3, DPU_CLK_CTRL_DMA4, DPU_CLK_CTRL_DMA5, DPU_CLK_CTRL_CURSOR0, DPU_CLK_CTRL_CURSOR1, DPU_CLK_CTRL_INLINE_ROT0_SSPP, DPU_CLK_CTRL_REG_DMA, DPU_CLK_CTRL_WB2, DPU_CLK_CTRL_MAX, }; /* struct dpu_clk_ctrl_reg : Clock control register * @reg_off: register offset * @bit_off: bit offset */ struct dpu_clk_ctrl_reg { u32 reg_off; u32 bit_off; }; /* struct dpu_mdp_cfg : MDP TOP-BLK instance info * @id: index identifying this block * @base: register base offset to mdss * @features bit mask identifying sub-blocks/features * @clk_ctrls clock control register definition */ struct dpu_mdp_cfg { DPU_HW_BLK_INFO; struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX]; }; /* struct dpu_ctl_cfg : MDP CTL instance info * @id: index identifying this block * @base: register base offset to mdss * @features bit mask identifying sub-blocks/features * @intr_start: interrupt index for CTL_START */ struct dpu_ctl_cfg { DPU_HW_BLK_INFO; unsigned int intr_start; }; /** * struct dpu_sspp_cfg - information of source pipes * @id: index identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * @sblk: SSPP sub-blocks information * @xin_id: bus client identifier * @clk_ctrl clock control identifier * @type sspp type identifier */ struct dpu_sspp_cfg { DPU_HW_BLK_INFO; const struct dpu_sspp_sub_blks *sblk; u32 xin_id; enum dpu_clk_ctrl_type clk_ctrl; u32 type; }; /** * struct dpu_lm_cfg - information of layer mixer blocks * @id: index identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * @sblk: LM Sub-blocks information * @pingpong: ID of connected PingPong, PINGPONG_NONE if unsupported * @lm_pair: ID of LM that can be controlled by same CTL */ struct dpu_lm_cfg { DPU_HW_BLK_INFO; const struct dpu_lm_sub_blks *sblk; u32 pingpong; u32 dspp; unsigned long lm_pair; }; /** * struct dpu_dspp_cfg - information of DSPP blocks * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * supported by this block * @sblk sub-blocks information */ struct dpu_dspp_cfg { DPU_HW_BLK_INFO; const struct dpu_dspp_sub_blks *sblk; }; /** * struct dpu_pingpong_cfg - information of PING-PONG blocks * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * @intr_done: index for PINGPONG done interrupt * @intr_rdptr: index for PINGPONG readpointer done interrupt * @sblk sub-blocks information */ struct dpu_pingpong_cfg { DPU_HW_BLK_INFO; u32 merge_3d; unsigned int intr_done; unsigned int intr_rdptr; const struct dpu_pingpong_sub_blks *sblk; }; /** * struct dpu_merge_3d_cfg - information of DSPP blocks * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * supported by this block * @sblk sub-blocks information */ struct dpu_merge_3d_cfg { DPU_HW_BLK_INFO; const struct dpu_merge_3d_sub_blks *sblk; }; /** * struct dpu_dsc_cfg - information of DSC blocks * @id enum identifying this block * @base register offset of this block * @len: length of hardware block * @features bit mask identifying sub-blocks/features * @sblk: sub-blocks information */ struct dpu_dsc_cfg { DPU_HW_BLK_INFO; const struct dpu_dsc_sub_blks *sblk; }; /** * struct dpu_intf_cfg - information of timing engine blocks * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * @type: Interface type(DSI, DP, HDMI) * @controller_id: Controller Instance ID in case of multiple of intf type * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch * @intr_underrun: index for INTF underrun interrupt * @intr_vsync: index for INTF VSYNC interrupt * @intr_tear_rd_ptr: Index for INTF TEAR_RD_PTR interrupt */ struct dpu_intf_cfg { DPU_HW_BLK_INFO; u32 type; /* interface type*/ u32 controller_id; u32 prog_fetch_lines_worst_case; unsigned int intr_underrun; unsigned int intr_vsync; unsigned int intr_tear_rd_ptr; }; /** * struct dpu_wb_cfg - information of writeback blocks * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO * @vbif_idx: vbif client index * @maxlinewidth: max line width supported by writeback block * @xin_id: bus client identifier * @intr_wb_done: interrupt index for WB_DONE * @format_list: list of formats supported by this writeback block * @num_formats: number of formats supported by this writeback block * @clk_ctrl: clock control identifier */ struct dpu_wb_cfg { DPU_HW_BLK_INFO; u8 vbif_idx; u32 maxlinewidth; u32 xin_id; unsigned int intr_wb_done; const u32 *format_list; u32 num_formats; enum dpu_clk_ctrl_type clk_ctrl; }; /** * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting * @pps pixel per seconds * @ot_limit OT limit to use up to specified pixel per second */ struct dpu_vbif_dynamic_ot_cfg { u64 pps; u32 ot_limit; }; /** * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table * @count length of cfg * @cfg pointer to array of configuration settings with * ascending requirements */ struct dpu_vbif_dynamic_ot_tbl { u32 count; const struct dpu_vbif_dynamic_ot_cfg *cfg; }; /** * struct dpu_vbif_qos_tbl - QoS priority table * @npriority_lvl num of priority level * @priority_lvl pointer to array of priority level in ascending order */ struct dpu_vbif_qos_tbl { u32 npriority_lvl; const u32 *priority_lvl; }; /** * struct dpu_vbif_cfg - information of VBIF blocks * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features * @ot_rd_limit default OT read limit * @ot_wr_limit default OT write limit * @xin_halt_timeout maximum time (in usec) for xin to halt * @qos_rp_remap_size size of VBIF_XINL_QOS_RP_REMAP register space * @dynamic_ot_rd_tbl dynamic OT read configuration table * @dynamic_ot_wr_tbl dynamic OT write configuration table * @qos_rt_tbl real-time QoS priority table * @qos_nrt_tbl non-real-time QoS priority table * @memtype_count number of defined memtypes * @memtype array of xin memtype definitions */ struct dpu_vbif_cfg { DPU_HW_BLK_INFO; u32 default_ot_rd_limit; u32 default_ot_wr_limit; u32 xin_halt_timeout; u32 qos_rp_remap_size; struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl; struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl; struct dpu_vbif_qos_tbl qos_rt_tbl; struct dpu_vbif_qos_tbl qos_nrt_tbl; u32 memtype_count; u32 memtype[MAX_XIN_COUNT]; }; /** * struct dpu_cdm_cfg - information of chroma down blocks * @name string name for debug purposes * @id enum identifying this block * @base register offset of this block * @features bit mask identifying sub-blocks/features */ struct dpu_cdm_cfg { DPU_HW_BLK_INFO; }; /** * Define CDP use cases * @DPU_PERF_CDP_UDAGE_RT: real-time use cases * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD */ enum { DPU_PERF_CDP_USAGE_RT, DPU_PERF_CDP_USAGE_NRT, DPU_PERF_CDP_USAGE_MAX }; /** * struct dpu_perf_cdp_cfg - define CDP use case configuration * @rd_enable: true if read pipe CDP is enabled * @wr_enable: true if write pipe CDP is enabled */ struct dpu_perf_cdp_cfg { bool rd_enable; bool wr_enable; }; /** * struct dpu_mdss_version - DPU's major and minor versions * @core_major_ver: DPU core's major version * @core_minor_ver: DPU core's minor version */ struct dpu_mdss_version { u8 core_major_ver; u8 core_minor_ver; }; /** * struct dpu_perf_cfg - performance control settings * @max_bw_low low threshold of maximum bandwidth (kbps) * @max_bw_high high threshold of maximum bandwidth (kbps) * @min_core_ib minimum bandwidth for core (kbps) * @min_core_ib minimum mnoc ib vote in kbps * @min_llcc_ib minimum llcc ib vote in kbps * @min_dram_ib minimum dram ib vote in kbps * @undersized_prefill_lines undersized prefill in lines * @xtra_prefill_lines extra prefill latency in lines * @dest_scale_prefill_lines destination scaler latency in lines * @macrotile_perfill_lines macrotile latency in lines * @yuv_nv12_prefill_lines yuv_nv12 latency in lines * @linear_prefill_lines linear latency in lines * @downscaling_prefill_lines downscaling latency in lines * @amortizable_theshold minimum y position for traffic shaping prefill * @min_prefill_lines minimum pipeline latency in lines * @clk_inefficiency_factor DPU src clock inefficiency factor * @bw_inefficiency_factor DPU axi bus bw inefficiency factor * @safe_lut_tbl: LUT tables for safe signals * @danger_lut_tbl: LUT tables for danger signals * @qos_lut_tbl: LUT tables for QoS signals * @cdp_cfg cdp use case configurations */ struct dpu_perf_cfg { u32 max_bw_low; u32 max_bw_high; u32 min_core_ib; u32 min_llcc_ib; u32 min_dram_ib; u32 undersized_prefill_lines; u32 xtra_prefill_lines; u32 dest_scale_prefill_lines; u32 macrotile_prefill_lines; u32 yuv_nv12_prefill_lines; u32 linear_prefill_lines; u32 downscaling_prefill_lines; u32 amortizable_threshold; u32 min_prefill_lines; u32 clk_inefficiency_factor; u32 bw_inefficiency_factor; u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX]; }; /** * struct dpu_mdss_cfg - information of MDSS HW * This is the main catalog data structure representing * this HW version. Contains dpu's major and minor versions, * number of instances, register offsets, capabilities of the * all MDSS HW sub-blocks. * * @dma_formats Supported formats for dma pipe * @cursor_formats Supported formats for cursor pipe * @vig_formats Supported formats for vig pipe */ struct dpu_mdss_cfg { const struct dpu_mdss_version *mdss_ver; const struct dpu_caps *caps; const struct dpu_mdp_cfg *mdp; u32 ctl_count; const struct dpu_ctl_cfg *ctl; u32 sspp_count; const struct dpu_sspp_cfg *sspp; u32 mixer_count; const struct dpu_lm_cfg *mixer; u32 pingpong_count; const struct dpu_pingpong_cfg *pingpong; u32 merge_3d_count; const struct dpu_merge_3d_cfg *merge_3d; u32 dsc_count; const struct dpu_dsc_cfg *dsc; u32 intf_count; const struct dpu_intf_cfg *intf; u32 vbif_count; const struct dpu_vbif_cfg *vbif; u32 wb_count; const struct dpu_wb_cfg *wb; const struct dpu_cdm_cfg *cdm; u32 ad_count; u32 dspp_count; const struct dpu_dspp_cfg *dspp; /* Add additional block data structures here */ const struct dpu_perf_cfg *perf; const struct dpu_format_extended *dma_formats; const struct dpu_format_extended *cursor_formats; const struct dpu_format_extended *vig_formats; }; extern const struct dpu_mdss_cfg dpu_msm8917_cfg; extern const struct dpu_mdss_cfg dpu_msm8937_cfg; extern const struct dpu_mdss_cfg dpu_msm8953_cfg; extern const struct dpu_mdss_cfg dpu_msm8996_cfg; extern const struct dpu_mdss_cfg dpu_msm8998_cfg; extern const struct dpu_mdss_cfg dpu_sdm630_cfg; extern const struct dpu_mdss_cfg dpu_sdm660_cfg; extern const struct dpu_mdss_cfg dpu_sdm845_cfg; extern const struct dpu_mdss_cfg dpu_sdm670_cfg; extern const struct dpu_mdss_cfg dpu_sm8150_cfg; extern const struct dpu_mdss_cfg dpu_sc8180x_cfg; extern const struct dpu_mdss_cfg dpu_sm7150_cfg; extern const struct dpu_mdss_cfg dpu_sm8250_cfg; extern const struct dpu_mdss_cfg dpu_sc7180_cfg; extern const struct dpu_mdss_cfg dpu_sm6115_cfg; extern const struct dpu_mdss_cfg dpu_sm6125_cfg; extern const struct dpu_mdss_cfg dpu_sm6350_cfg; extern const struct dpu_mdss_cfg dpu_qcm2290_cfg; extern const struct dpu_mdss_cfg dpu_sm6375_cfg; extern const struct dpu_mdss_cfg dpu_sm8350_cfg; extern const struct dpu_mdss_cfg dpu_sc7280_cfg; extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; extern const struct dpu_mdss_cfg dpu_sm8450_cfg; extern const struct dpu_mdss_cfg dpu_sa8775p_cfg; extern const struct dpu_mdss_cfg dpu_sm8550_cfg; extern const struct dpu_mdss_cfg dpu_sm8650_cfg; extern const struct dpu_mdss_cfg dpu_x1e80100_cfg; #endif /* _DPU_HW_CATALOG_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_CTC2_TYPES_H #define __IA_CSS_CTC2_TYPES_H /* Chroma Tone Control configuration. * * ISP block: CTC2 (CTC by polygonal approximation) * (ISP1: CTC1 (CTC by look-up table) is used.) * ISP2: CTC2 is used. * ISP261: CTC2 (CTC by Fast Approximate Distance) */ struct ia_css_ctc2_config { /** Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5 * --default/ineffective value: 4096(0.5f) */ s32 y_y0; s32 y_y1; s32 y_y2; s32 y_y3; s32 y_y4; s32 y_y5; /* 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0 */ s32 y_x1; s32 y_x2; s32 y_x3; s32 y_x4; /* Gains by UV(Chroma) under threholds uv_x0 and uv_x1 * --default/ineffective value: 4096(0.5f) */ s32 uv_y0; s32 uv_y1; /* Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1 * --default/ineffective value: n/a */ s32 uv_x0; s32 uv_x1; }; #endif /* __IA_CSS_CTC2_TYPES_H */
/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __SDMA_V4_0_H__ #define __SDMA_V4_0_H__ extern const struct amd_ip_funcs sdma_v4_0_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v4_0_ip_block; #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for Realtek ALC codecs * * Copyright (c) 2004 Kailang Yang <[email protected]> * PeiSen Hou <[email protected]> * Takashi Iwai <[email protected]> * Jonathan Woithe <[email protected]> */ #include <linux/acpi.h> #include <linux/cleanup.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/ctype.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/hda_codec.h> #include "hda_local.h" #include "hda_auto_parser.h" #include "hda_jack.h" #include "hda_generic.h" #include "hda_component.h" /* keep halting ALC5505 DSP, for power saving */ #define HALT_REALTEK_ALC5505 /* extra amp-initialization sequence types */ enum { ALC_INIT_UNDEFINED, ALC_INIT_NONE, ALC_INIT_DEFAULT, }; enum { ALC_HEADSET_MODE_UNKNOWN, ALC_HEADSET_MODE_UNPLUGGED, ALC_HEADSET_MODE_HEADSET, ALC_HEADSET_MODE_MIC, ALC_HEADSET_MODE_HEADPHONE, }; enum { ALC_HEADSET_TYPE_UNKNOWN, ALC_HEADSET_TYPE_CTIA, ALC_HEADSET_TYPE_OMTP, }; enum { ALC_KEY_MICMUTE_INDEX, }; struct alc_customize_define { unsigned int sku_cfg; unsigned char port_connectivity; unsigned char check_sum; unsigned char customization; unsigned char external_amp; unsigned int enable_pcbeep:1; unsigned int platform_type:1; unsigned int swap:1; unsigned int override:1; unsigned int fixup:1; /* Means that this sku is set by driver, not read from hw */ }; struct alc_coef_led { unsigned int idx; unsigned int mask; unsigned int on; unsigned int off; }; struct alc_spec { struct hda_gen_spec gen; /* must be at head */ /* codec parameterization */ struct alc_customize_define cdefine; unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */ /* GPIO bits */ unsigned int gpio_mask; unsigned int gpio_dir; unsigned int gpio_data; bool gpio_write_delay; /* add a delay before writing gpio_data */ /* mute LED for HP laptops, see vref_mute_led_set() */ int mute_led_polarity; int micmute_led_polarity; hda_nid_t mute_led_nid; hda_nid_t cap_mute_led_nid; unsigned int gpio_mute_led_mask; unsigned int gpio_mic_led_mask; struct alc_coef_led mute_led_coef; struct alc_coef_led mic_led_coef; struct mutex coef_mutex; hda_nid_t headset_mic_pin; hda_nid_t headphone_mic_pin; int current_headset_mode; int current_headset_type; /* hooks */ void (*init_hook)(struct hda_codec *codec); void (*power_hook)(struct hda_codec *codec); void (*shutup)(struct hda_codec *codec); int init_amp; int codec_variant; /* flag for other variants */ unsigned int has_alc5505_dsp:1; unsigned int no_depop_delay:1; unsigned int done_hp_init:1; unsigned int no_shutup_pins:1; unsigned int ultra_low_power:1; unsigned int has_hs_key:1; unsigned int no_internal_mic_pin:1; unsigned int en_3kpull_low:1; int num_speaker_amps; /* for PLL fix */ hda_nid_t pll_nid; unsigned int pll_coef_idx, pll_coef_bit; unsigned int coef0; struct input_dev *kb_dev; u8 alc_mute_keycode_map[1]; /* component binding */ struct hda_component_parent comps; }; /* * COEF access helper functions */ static void coef_mutex_lock(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; snd_hda_power_up_pm(codec); mutex_lock(&spec->coef_mutex); } static void coef_mutex_unlock(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; mutex_unlock(&spec->coef_mutex); snd_hda_power_down_pm(codec); } static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx) { unsigned int val; snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx); val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PROC_COEF, 0); return val; } static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx) { unsigned int val; coef_mutex_lock(codec); val = __alc_read_coefex_idx(codec, nid, coef_idx); coef_mutex_unlock(codec); return val; } #define alc_read_coef_idx(codec, coef_idx) \ alc_read_coefex_idx(codec, 0x20, coef_idx) static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int coef_val) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val); } static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int coef_val) { coef_mutex_lock(codec); __alc_write_coefex_idx(codec, nid, coef_idx, coef_val); coef_mutex_unlock(codec); } #define alc_write_coef_idx(codec, coef_idx, coef_val) \ alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val) static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int mask, unsigned int bits_set) { unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx); if (val != -1) __alc_write_coefex_idx(codec, nid, coef_idx, (val & ~mask) | bits_set); } static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int mask, unsigned int bits_set) { coef_mutex_lock(codec); __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set); coef_mutex_unlock(codec); } #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \ alc_update_coefex_idx(codec, 0x20, coef_idx, mask, bits_set) /* a special bypass for COEF 0; read the cached value at the second time */ static unsigned int alc_get_coef0(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!spec->coef0) spec->coef0 = alc_read_coef_idx(codec, 0); return spec->coef0; } /* coef writes/updates batch */ struct coef_fw { unsigned char nid; unsigned char idx; unsigned short mask; unsigned short val; }; #define UPDATE_COEFEX(_nid, _idx, _mask, _val) \ { .nid = (_nid), .idx = (_idx), .mask = (_mask), .val = (_val) } #define WRITE_COEFEX(_nid, _idx, _val) UPDATE_COEFEX(_nid, _idx, -1, _val) #define WRITE_COEF(_idx, _val) WRITE_COEFEX(0x20, _idx, _val) #define UPDATE_COEF(_idx, _mask, _val) UPDATE_COEFEX(0x20, _idx, _mask, _val) static void alc_process_coef_fw(struct hda_codec *codec, const struct coef_fw *fw) { coef_mutex_lock(codec); for (; fw->nid; fw++) { if (fw->mask == (unsigned short)-1) __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); else __alc_update_coefex_idx(codec, fw->nid, fw->idx, fw->mask, fw->val); } coef_mutex_unlock(codec); } /* * GPIO setup tables, used in initialization */ /* Enable GPIO mask and set output */ static void alc_setup_gpio(struct hda_codec *codec, unsigned int mask) { struct alc_spec *spec = codec->spec; spec->gpio_mask |= mask; spec->gpio_dir |= mask; spec->gpio_data |= mask; } static void alc_write_gpio_data(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, spec->gpio_data); } static void alc_update_gpio_data(struct hda_codec *codec, unsigned int mask, bool on) { struct alc_spec *spec = codec->spec; unsigned int oldval = spec->gpio_data; if (on) spec->gpio_data |= mask; else spec->gpio_data &= ~mask; if (oldval != spec->gpio_data) alc_write_gpio_data(codec); } static void alc_write_gpio(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!spec->gpio_mask) return; snd_hda_codec_write(codec, codec->core.afg, 0, AC_VERB_SET_GPIO_MASK, spec->gpio_mask); snd_hda_codec_write(codec, codec->core.afg, 0, AC_VERB_SET_GPIO_DIRECTION, spec->gpio_dir); if (spec->gpio_write_delay) msleep(1); alc_write_gpio_data(codec); } static void alc_fixup_gpio(struct hda_codec *codec, int action, unsigned int mask) { if (action == HDA_FIXUP_ACT_PRE_PROBE) alc_setup_gpio(codec, mask); } static void alc_fixup_gpio1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_gpio(codec, action, 0x01); } static void alc_fixup_gpio2(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_gpio(codec, action, 0x02); } static void alc_fixup_gpio3(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_gpio(codec, action, 0x03); } static void alc_fixup_gpio4(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_gpio(codec, action, 0x04); } static void alc_fixup_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) snd_hda_gen_add_micmute_led_cdev(codec, NULL); } /* * Fix hardware PLL issue * On some codecs, the analog PLL gating control must be off while * the default value is 1. */ static void alc_fix_pll(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (spec->pll_nid) alc_update_coefex_idx(codec, spec->pll_nid, spec->pll_coef_idx, 1 << spec->pll_coef_bit, 0); } static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int coef_bit) { struct alc_spec *spec = codec->spec; spec->pll_nid = nid; spec->pll_coef_idx = coef_idx; spec->pll_coef_bit = coef_bit; alc_fix_pll(codec); } /* update the master volume per volume-knob's unsol event */ static void alc_update_knob_master(struct hda_codec *codec, struct hda_jack_callback *jack) { unsigned int val; struct snd_kcontrol *kctl; struct snd_ctl_elem_value *uctl; kctl = snd_hda_find_mixer_ctl(codec, "Master Playback Volume"); if (!kctl) return; uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return; val = snd_hda_codec_read(codec, jack->nid, 0, AC_VERB_GET_VOLUME_KNOB_CONTROL, 0); val &= HDA_AMP_VOLMASK; uctl->value.integer.value[0] = val; uctl->value.integer.value[1] = val; kctl->put(kctl, uctl); kfree(uctl); } static void alc880_unsol_event(struct hda_codec *codec, unsigned int res) { /* For some reason, the res given from ALC880 is broken. Here we adjust it properly. */ snd_hda_jack_unsol_event(codec, res >> 2); } /* Change EAPD to verb control */ static void alc_fill_eapd_coef(struct hda_codec *codec) { int coef; coef = alc_get_coef0(codec); switch (codec->core.vendor_id) { case 0x10ec0262: alc_update_coef_idx(codec, 0x7, 0, 1<<5); break; case 0x10ec0267: case 0x10ec0268: alc_update_coef_idx(codec, 0x7, 0, 1<<13); break; case 0x10ec0269: if ((coef & 0x00f0) == 0x0010) alc_update_coef_idx(codec, 0xd, 0, 1<<14); if ((coef & 0x00f0) == 0x0020) alc_update_coef_idx(codec, 0x4, 1<<15, 0); if ((coef & 0x00f0) == 0x0030) alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0280: case 0x10ec0284: case 0x10ec0290: case 0x10ec0292: alc_update_coef_idx(codec, 0x4, 1<<15, 0); break; case 0x10ec0225: case 0x10ec0295: case 0x10ec0299: alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); fallthrough; case 0x10ec0215: case 0x10ec0285: case 0x10ec0289: alc_update_coef_idx(codec, 0x36, 1<<13, 0); fallthrough; case 0x10ec0230: case 0x10ec0233: case 0x10ec0235: case 0x10ec0236: case 0x10ec0245: case 0x10ec0255: case 0x10ec0256: case 0x19e58326: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: case 0x10ec0298: case 0x10ec0300: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0275: alc_update_coef_idx(codec, 0xe, 0, 1<<0); break; case 0x10ec0287: alc_update_coef_idx(codec, 0x10, 1<<9, 0); alc_write_coef_idx(codec, 0x8, 0x4ab7); break; case 0x10ec0293: alc_update_coef_idx(codec, 0xa, 1<<13, 0); break; case 0x10ec0234: case 0x10ec0274: alc_write_coef_idx(codec, 0x6e, 0x0c25); fallthrough; case 0x10ec0294: case 0x10ec0700: case 0x10ec0701: case 0x10ec0703: case 0x10ec0711: alc_update_coef_idx(codec, 0x10, 1<<15, 0); break; case 0x10ec0662: if ((coef & 0x00f0) == 0x0030) alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */ break; case 0x10ec0272: case 0x10ec0273: case 0x10ec0663: case 0x10ec0665: case 0x10ec0670: case 0x10ec0671: case 0x10ec0672: alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ break; case 0x10ec0222: case 0x10ec0623: alc_update_coef_idx(codec, 0x19, 1<<13, 0); break; case 0x10ec0668: alc_update_coef_idx(codec, 0x7, 3<<13, 0); break; case 0x10ec0867: alc_update_coef_idx(codec, 0x4, 1<<10, 0); break; case 0x10ec0888: if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030) alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0892: case 0x10ec0897: alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0899: case 0x10ec0900: case 0x10ec0b00: case 0x10ec1168: case 0x10ec1220: alc_update_coef_idx(codec, 0x7, 1<<1, 0); break; } } /* additional initialization for ALC888 variants */ static void alc888_coef_init(struct hda_codec *codec) { switch (alc_get_coef0(codec) & 0x00f0) { /* alc888-VA */ case 0x00: /* alc888-VB */ case 0x10: alc_update_coef_idx(codec, 7, 0, 0x2030); /* Turn EAPD to High */ break; } } /* turn on/off EAPD control (only if available) */ static void set_eapd(struct hda_codec *codec, hda_nid_t nid, int on) { if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) return; if (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD) snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_EAPD_BTLENABLE, on ? 2 : 0); } /* turn on/off EAPD controls of the codec */ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) { /* We currently only handle front, HP */ static const hda_nid_t pins[] = { 0x0f, 0x10, 0x14, 0x15, 0x17, 0 }; const hda_nid_t *p; for (p = pins; *p; p++) set_eapd(codec, *p, on); } static int find_ext_mic_pin(struct hda_codec *codec); static void alc_headset_mic_no_shutup(struct hda_codec *codec) { const struct hda_pincfg *pin; int mic_pin = find_ext_mic_pin(codec); int i; /* don't shut up pins when unloading the driver; otherwise it breaks * the default pin setup at the next load of the driver */ if (codec->bus->shutdown) return; snd_array_for_each(&codec->init_pins, i, pin) { /* use read here for syncing after issuing each verb */ if (pin->nid != mic_pin) snd_hda_codec_read(codec, pin->nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0); } codec->pins_shutup = 1; } static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: case 0x10ec0257: case 0x19e58326: case 0x10ec0283: case 0x10ec0285: case 0x10ec0286: case 0x10ec0287: case 0x10ec0288: case 0x10ec0295: case 0x10ec0298: alc_headset_mic_no_shutup(codec); break; default: if (!spec->no_shutup_pins) snd_hda_shutup_pins(codec); break; } } /* generic shutup callback; * just turning off EAPD and a little pause for avoiding pop-noise */ static void alc_eapd_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; alc_auto_setup_eapd(codec, false); if (!spec->no_depop_delay) msleep(200); alc_shutup_pins(codec); } /* generic EAPD initialization */ static void alc_auto_init_amp(struct hda_codec *codec, int type) { alc_auto_setup_eapd(codec, true); alc_write_gpio(codec); switch (type) { case ALC_INIT_DEFAULT: switch (codec->core.vendor_id) { case 0x10ec0260: alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010); break; case 0x10ec0880: case 0x10ec0882: case 0x10ec0883: case 0x10ec0885: alc_update_coef_idx(codec, 7, 0, 0x2030); break; case 0x10ec0888: alc888_coef_init(codec); break; } break; } } /* get a primary headphone pin if available */ static hda_nid_t alc_get_hp_pin(struct alc_spec *spec) { if (spec->gen.autocfg.hp_pins[0]) return spec->gen.autocfg.hp_pins[0]; if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) return spec->gen.autocfg.line_out_pins[0]; return 0; } /* * Realtek SSID verification */ /* Could be any non-zero and even value. When used as fixup, tells * the driver to ignore any present sku defines. */ #define ALC_FIXUP_SKU_IGNORE (2) static void alc_fixup_sku_ignore(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->cdefine.fixup = 1; spec->cdefine.sku_cfg = ALC_FIXUP_SKU_IGNORE; } } static void alc_fixup_no_depop_delay(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PROBE) { spec->no_depop_delay = 1; codec->depop_delay = 0; } } static int alc_auto_parse_customize_define(struct hda_codec *codec) { unsigned int ass, tmp, i; unsigned nid = 0; struct alc_spec *spec = codec->spec; spec->cdefine.enable_pcbeep = 1; /* assume always enabled */ if (spec->cdefine.fixup) { ass = spec->cdefine.sku_cfg; if (ass == ALC_FIXUP_SKU_IGNORE) return -1; goto do_sku; } if (!codec->bus->pci) return -1; ass = codec->core.subsystem_id & 0xffff; if (ass != codec->bus->pci->subsystem_device && (ass & 1)) goto do_sku; nid = 0x1d; if (codec->core.vendor_id == 0x10ec0260) nid = 0x17; ass = snd_hda_codec_get_pincfg(codec, nid); if (!(ass & 1)) { codec_info(codec, "%s: SKU not ready 0x%08x\n", codec->core.chip_name, ass); return -1; } /* check sum */ tmp = 0; for (i = 1; i < 16; i++) { if ((ass >> i) & 1) tmp++; } if (((ass >> 16) & 0xf) != tmp) return -1; spec->cdefine.port_connectivity = ass >> 30; spec->cdefine.enable_pcbeep = (ass & 0x100000) >> 20; spec->cdefine.check_sum = (ass >> 16) & 0xf; spec->cdefine.customization = ass >> 8; do_sku: spec->cdefine.sku_cfg = ass; spec->cdefine.external_amp = (ass & 0x38) >> 3; spec->cdefine.platform_type = (ass & 0x4) >> 2; spec->cdefine.swap = (ass & 0x2) >> 1; spec->cdefine.override = ass & 0x1; codec_dbg(codec, "SKU: Nid=0x%x sku_cfg=0x%08x\n", nid, spec->cdefine.sku_cfg); codec_dbg(codec, "SKU: port_connectivity=0x%x\n", spec->cdefine.port_connectivity); codec_dbg(codec, "SKU: enable_pcbeep=0x%x\n", spec->cdefine.enable_pcbeep); codec_dbg(codec, "SKU: check_sum=0x%08x\n", spec->cdefine.check_sum); codec_dbg(codec, "SKU: customization=0x%08x\n", spec->cdefine.customization); codec_dbg(codec, "SKU: external_amp=0x%x\n", spec->cdefine.external_amp); codec_dbg(codec, "SKU: platform_type=0x%x\n", spec->cdefine.platform_type); codec_dbg(codec, "SKU: swap=0x%x\n", spec->cdefine.swap); codec_dbg(codec, "SKU: override=0x%x\n", spec->cdefine.override); return 0; } /* return the position of NID in the list, or -1 if not found */ static int find_idx_in_nid_list(hda_nid_t nid, const hda_nid_t *list, int nums) { int i; for (i = 0; i < nums; i++) if (list[i] == nid) return i; return -1; } /* return true if the given NID is found in the list */ static bool found_in_nid_list(hda_nid_t nid, const hda_nid_t *list, int nums) { return find_idx_in_nid_list(nid, list, nums) >= 0; } /* check subsystem ID and set up device-specific initialization; * return 1 if initialized, 0 if invalid SSID */ /* 32-bit subsystem ID for BIOS loading in HD Audio codec. * 31 ~ 16 : Manufacture ID * 15 ~ 8 : SKU ID * 7 ~ 0 : Assembly ID * port-A --> pin 39/41, port-E --> pin 14/15, port-D --> pin 35/36 */ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports) { unsigned int ass, tmp, i; unsigned nid; struct alc_spec *spec = codec->spec; if (spec->cdefine.fixup) { ass = spec->cdefine.sku_cfg; if (ass == ALC_FIXUP_SKU_IGNORE) return 0; goto do_sku; } ass = codec->core.subsystem_id & 0xffff; if (codec->bus->pci && ass != codec->bus->pci->subsystem_device && (ass & 1)) goto do_sku; /* invalid SSID, check the special NID pin defcfg instead */ /* * 31~30 : port connectivity * 29~21 : reserve * 20 : PCBEEP input * 19~16 : Check sum (15:1) * 15~1 : Custom * 0 : override */ nid = 0x1d; if (codec->core.vendor_id == 0x10ec0260) nid = 0x17; ass = snd_hda_codec_get_pincfg(codec, nid); codec_dbg(codec, "realtek: No valid SSID, checking pincfg 0x%08x for NID 0x%x\n", ass, nid); if (!(ass & 1)) return 0; if ((ass >> 30) != 1) /* no physical connection */ return 0; /* check sum */ tmp = 0; for (i = 1; i < 16; i++) { if ((ass >> i) & 1) tmp++; } if (((ass >> 16) & 0xf) != tmp) return 0; do_sku: codec_dbg(codec, "realtek: Enabling init ASM_ID=0x%04x CODEC_ID=%08x\n", ass & 0xffff, codec->core.vendor_id); /* * 0 : override * 1 : Swap Jack * 2 : 0 --> Desktop, 1 --> Laptop * 3~5 : External Amplifier control * 7~6 : Reserved */ tmp = (ass & 0x38) >> 3; /* external Amp control */ if (spec->init_amp == ALC_INIT_UNDEFINED) { switch (tmp) { case 1: alc_setup_gpio(codec, 0x01); break; case 3: alc_setup_gpio(codec, 0x02); break; case 7: alc_setup_gpio(codec, 0x04); break; case 5: default: spec->init_amp = ALC_INIT_DEFAULT; break; } } /* is laptop or Desktop and enable the function "Mute internal speaker * when the external headphone out jack is plugged" */ if (!(ass & 0x8000)) return 1; /* * 10~8 : Jack location * 12~11: Headphone out -> 00: PortA, 01: PortE, 02: PortD, 03: Resvered * 14~13: Resvered * 15 : 1 --> enable the function "Mute internal speaker * when the external headphone out jack is plugged" */ if (!alc_get_hp_pin(spec)) { hda_nid_t nid; tmp = (ass >> 11) & 0x3; /* HP to chassis */ nid = ports[tmp]; if (found_in_nid_list(nid, spec->gen.autocfg.line_out_pins, spec->gen.autocfg.line_outs)) return 1; spec->gen.autocfg.hp_pins[0] = nid; } return 1; } /* Check the validity of ALC subsystem-id * ports contains an array of 4 pin NIDs for port-A, E, D and I */ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports) { if (!alc_subsystem_id(codec, ports)) { struct alc_spec *spec = codec->spec; if (spec->init_amp == ALC_INIT_UNDEFINED) { codec_dbg(codec, "realtek: Enable default setup for auto mode as fallback\n"); spec->init_amp = ALC_INIT_DEFAULT; } } } /* */ static void alc_fixup_inv_dmic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; spec->gen.inv_dmic_split = 1; } static int alc_build_controls(struct hda_codec *codec) { int err; err = snd_hda_gen_build_controls(codec); if (err < 0) return err; snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_BUILD); return 0; } /* * Common callbacks */ static void alc_pre_init(struct hda_codec *codec) { alc_fill_eapd_coef(codec); } #define is_s3_resume(codec) \ ((codec)->core.dev.power.power_state.event == PM_EVENT_RESUME) #define is_s4_resume(codec) \ ((codec)->core.dev.power.power_state.event == PM_EVENT_RESTORE) #define is_s4_suspend(codec) \ ((codec)->core.dev.power.power_state.event == PM_EVENT_FREEZE) static int alc_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; /* hibernation resume needs the full chip initialization */ if (is_s4_resume(codec)) alc_pre_init(codec); if (spec->init_hook) spec->init_hook(codec); spec->gen.skip_verbs = 1; /* applied in below */ snd_hda_gen_init(codec); alc_fix_pll(codec); alc_auto_init_amp(codec, spec->init_amp); snd_hda_apply_verbs(codec); /* apply verbs here after own init */ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); return 0; } /* forward declaration */ static const struct component_master_ops comp_master_ops; static void alc_free(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (spec) hda_component_manager_free(&spec->comps, &comp_master_ops); snd_hda_gen_free(codec); } static inline void alc_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!snd_hda_get_bool_hint(codec, "shutup")) return; /* disabled explicitly by hints */ if (spec && spec->shutup) spec->shutup(codec); else alc_shutup_pins(codec); } static void alc_power_eapd(struct hda_codec *codec) { alc_auto_setup_eapd(codec, false); } static int alc_suspend(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; alc_shutup(codec); if (spec && spec->power_hook) spec->power_hook(codec); return 0; } static int alc_resume(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!spec->no_depop_delay) msleep(150); /* to avoid pop noise */ codec->patch_ops.init(codec); snd_hda_regmap_sync(codec); hda_call_check_power_status(codec, 0x01); return 0; } /* */ static const struct hda_codec_ops alc_patch_ops = { .build_controls = alc_build_controls, .build_pcms = snd_hda_gen_build_pcms, .init = alc_init, .free = alc_free, .unsol_event = snd_hda_jack_unsol_event, .resume = alc_resume, .suspend = alc_suspend, .check_power_status = snd_hda_gen_check_power_status, }; #define alc_codec_rename(codec, name) snd_hda_codec_set_name(codec, name) /* * Rename codecs appropriately from COEF value or subvendor id */ struct alc_codec_rename_table { unsigned int vendor_id; unsigned short coef_mask; unsigned short coef_bits; const char *name; }; struct alc_codec_rename_pci_table { unsigned int codec_vendor_id; unsigned short pci_subvendor; unsigned short pci_subdevice; const char *name; }; static const struct alc_codec_rename_table rename_tbl[] = { { 0x10ec0221, 0xf00f, 0x1003, "ALC231" }, { 0x10ec0269, 0xfff0, 0x3010, "ALC277" }, { 0x10ec0269, 0xf0f0, 0x2010, "ALC259" }, { 0x10ec0269, 0xf0f0, 0x3010, "ALC258" }, { 0x10ec0269, 0x00f0, 0x0010, "ALC269VB" }, { 0x10ec0269, 0xffff, 0xa023, "ALC259" }, { 0x10ec0269, 0xffff, 0x6023, "ALC281X" }, { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" }, { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" }, { 0x10ec0662, 0xffff, 0x4020, "ALC656" }, { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" }, { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" }, { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" }, { 0x10ec0899, 0x2000, 0x2000, "ALC899" }, { 0x10ec0892, 0xffff, 0x8020, "ALC661" }, { 0x10ec0892, 0xffff, 0x8011, "ALC661" }, { 0x10ec0892, 0xffff, 0x4011, "ALC656" }, { } /* terminator */ }; static const struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0280, 0x1028, 0, "ALC3220" }, { 0x10ec0282, 0x1028, 0, "ALC3221" }, { 0x10ec0283, 0x1028, 0, "ALC3223" }, { 0x10ec0288, 0x1028, 0, "ALC3263" }, { 0x10ec0292, 0x1028, 0, "ALC3226" }, { 0x10ec0293, 0x1028, 0, "ALC3235" }, { 0x10ec0255, 0x1028, 0, "ALC3234" }, { 0x10ec0668, 0x1028, 0, "ALC3661" }, { 0x10ec0275, 0x1028, 0, "ALC3260" }, { 0x10ec0899, 0x1028, 0, "ALC3861" }, { 0x10ec0298, 0x1028, 0, "ALC3266" }, { 0x10ec0236, 0x1028, 0, "ALC3204" }, { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0225, 0x1028, 0, "ALC3253" }, { 0x10ec0295, 0x1028, 0, "ALC3254" }, { 0x10ec0299, 0x1028, 0, "ALC3271" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, { 0x10ec0233, 0x1043, 0, "ALC3236" }, { 0x10ec0280, 0x103c, 0, "ALC3228" }, { 0x10ec0282, 0x103c, 0, "ALC3227" }, { 0x10ec0286, 0x103c, 0, "ALC3242" }, { 0x10ec0290, 0x103c, 0, "ALC3241" }, { 0x10ec0668, 0x103c, 0, "ALC3662" }, { 0x10ec0283, 0x17aa, 0, "ALC3239" }, { 0x10ec0292, 0x17aa, 0, "ALC3232" }, { } /* terminator */ }; static int alc_codec_rename_from_preset(struct hda_codec *codec) { const struct alc_codec_rename_table *p; const struct alc_codec_rename_pci_table *q; for (p = rename_tbl; p->vendor_id; p++) { if (p->vendor_id != codec->core.vendor_id) continue; if ((alc_get_coef0(codec) & p->coef_mask) == p->coef_bits) return alc_codec_rename(codec, p->name); } if (!codec->bus->pci) return 0; for (q = rename_pci_tbl; q->codec_vendor_id; q++) { if (q->codec_vendor_id != codec->core.vendor_id) continue; if (q->pci_subvendor != codec->bus->pci->subsystem_vendor) continue; if (!q->pci_subdevice || q->pci_subdevice == codec->bus->pci->subsystem_device) return alc_codec_rename(codec, q->name); } return 0; } /* * Digital-beep handlers */ #ifdef CONFIG_SND_HDA_INPUT_BEEP /* additional beep mixers; private_value will be overwritten */ static const struct snd_kcontrol_new alc_beep_mixer[] = { HDA_CODEC_VOLUME("Beep Playback Volume", 0, 0, HDA_INPUT), HDA_CODEC_MUTE_BEEP("Beep Playback Switch", 0, 0, HDA_INPUT), }; /* set up and create beep controls */ static int set_beep_amp(struct alc_spec *spec, hda_nid_t nid, int idx, int dir) { struct snd_kcontrol_new *knew; unsigned int beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir); int i; for (i = 0; i < ARRAY_SIZE(alc_beep_mixer); i++) { knew = snd_hda_gen_add_kctl(&spec->gen, NULL, &alc_beep_mixer[i]); if (!knew) return -ENOMEM; knew->private_value = beep_amp; } return 0; } static const struct snd_pci_quirk beep_allow_list[] = { SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x115d, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x8376, "EeePC", 1), SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1), SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1), SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), /* denylist -- no beep available */ SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0), SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0), {} }; static inline int has_cdefine_beep(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; const struct snd_pci_quirk *q; q = snd_pci_quirk_lookup(codec->bus->pci, beep_allow_list); if (q) return q->value; return spec->cdefine.enable_pcbeep; } #else #define set_beep_amp(spec, nid, idx, dir) 0 #define has_cdefine_beep(codec) 0 #endif /* parse the BIOS configuration and set up the alc_spec */ /* return 1 if successful, 0 if the proper config is not found, * or a negative error code */ static int alc_parse_auto_config(struct hda_codec *codec, const hda_nid_t *ignore_nids, const hda_nid_t *ssid_nids) { struct alc_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->gen.autocfg; int err; err = snd_hda_parse_pin_defcfg(codec, cfg, ignore_nids, spec->parse_flags); if (err < 0) return err; if (ssid_nids) alc_ssid_check(codec, ssid_nids); err = snd_hda_gen_parse_auto_config(codec, cfg); if (err < 0) return err; return 1; } /* common preparation job for alc_spec */ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) { struct alc_spec *spec = kzalloc(sizeof(*spec), GFP_KERNEL); int err; if (!spec) return -ENOMEM; codec->spec = spec; snd_hda_gen_spec_init(&spec->gen); spec->gen.mixer_nid = mixer_nid; spec->gen.own_eapd_ctl = 1; codec->single_adc_amp = 1; /* FIXME: do we need this for all Realtek codec models? */ codec->spdif_status_reset = 1; codec->forced_resume = 1; codec->patch_ops = alc_patch_ops; mutex_init(&spec->coef_mutex); err = alc_codec_rename_from_preset(codec); if (err < 0) { kfree(spec); return err; } return 0; } static int alc880_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc880_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 }; return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids); } /* * ALC880 fix-ups */ enum { ALC880_FIXUP_GPIO1, ALC880_FIXUP_GPIO2, ALC880_FIXUP_MEDION_RIM, ALC880_FIXUP_LG, ALC880_FIXUP_LG_LW25, ALC880_FIXUP_W810, ALC880_FIXUP_EAPD_COEF, ALC880_FIXUP_TCL_S700, ALC880_FIXUP_VOL_KNOB, ALC880_FIXUP_FUJITSU, ALC880_FIXUP_F1734, ALC880_FIXUP_UNIWILL, ALC880_FIXUP_UNIWILL_DIG, ALC880_FIXUP_Z71V, ALC880_FIXUP_ASUS_W5A, ALC880_FIXUP_3ST_BASE, ALC880_FIXUP_3ST, ALC880_FIXUP_3ST_DIG, ALC880_FIXUP_5ST_BASE, ALC880_FIXUP_5ST, ALC880_FIXUP_5ST_DIG, ALC880_FIXUP_6ST_BASE, ALC880_FIXUP_6ST, ALC880_FIXUP_6ST_DIG, ALC880_FIXUP_6ST_AUTOMUTE, }; /* enable the volume-knob widget support on NID 0x21 */ static void alc880_fixup_vol_knob(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PROBE) snd_hda_jack_detect_enable_callback(codec, 0x21, alc_update_knob_master); } static const struct hda_fixup alc880_fixups[] = { [ALC880_FIXUP_GPIO1] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio1, }, [ALC880_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio2, }, [ALC880_FIXUP_MEDION_RIM] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3060 }, { } }, .chained = true, .chain_id = ALC880_FIXUP_GPIO2, }, [ALC880_FIXUP_LG] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* disable bogus unused pins */ { 0x16, 0x411111f0 }, { 0x18, 0x411111f0 }, { 0x1a, 0x411111f0 }, { } } }, [ALC880_FIXUP_LG_LW25] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x0181344f }, /* line-in */ { 0x1b, 0x0321403f }, /* headphone */ { } } }, [ALC880_FIXUP_W810] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* disable bogus unused pins */ { 0x17, 0x411111f0 }, { } }, .chained = true, .chain_id = ALC880_FIXUP_GPIO2, }, [ALC880_FIXUP_EAPD_COEF] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* change to EAPD mode */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3060 }, {} }, }, [ALC880_FIXUP_TCL_S700] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* change to EAPD mode */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3070 }, {} }, .chained = true, .chain_id = ALC880_FIXUP_GPIO2, }, [ALC880_FIXUP_VOL_KNOB] = { .type = HDA_FIXUP_FUNC, .v.func = alc880_fixup_vol_knob, }, [ALC880_FIXUP_FUJITSU] = { /* override all pins as BIOS on old Amilo is broken */ .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x0121401f }, /* HP */ { 0x15, 0x99030120 }, /* speaker */ { 0x16, 0x99030130 }, /* bass speaker */ { 0x17, 0x411111f0 }, /* N/A */ { 0x18, 0x411111f0 }, /* N/A */ { 0x19, 0x01a19950 }, /* mic-in */ { 0x1a, 0x411111f0 }, /* N/A */ { 0x1b, 0x411111f0 }, /* N/A */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ { 0x1e, 0x01454140 }, /* SPDIF out */ { } }, .chained = true, .chain_id = ALC880_FIXUP_VOL_KNOB, }, [ALC880_FIXUP_F1734] = { /* almost compatible with FUJITSU, but no bass and SPDIF */ .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x0121401f }, /* HP */ { 0x15, 0x99030120 }, /* speaker */ { 0x16, 0x411111f0 }, /* N/A */ { 0x17, 0x411111f0 }, /* N/A */ { 0x18, 0x411111f0 }, /* N/A */ { 0x19, 0x01a19950 }, /* mic-in */ { 0x1a, 0x411111f0 }, /* N/A */ { 0x1b, 0x411111f0 }, /* N/A */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ { 0x1e, 0x411111f0 }, /* N/A */ { } }, .chained = true, .chain_id = ALC880_FIXUP_VOL_KNOB, }, [ALC880_FIXUP_UNIWILL] = { /* need to fix HP and speaker pins to be parsed correctly */ .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x0121411f }, /* HP */ { 0x15, 0x99030120 }, /* speaker */ { 0x16, 0x99030130 }, /* bass speaker */ { } }, }, [ALC880_FIXUP_UNIWILL_DIG] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* disable bogus unused pins */ { 0x17, 0x411111f0 }, { 0x19, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1f, 0x411111f0 }, { } } }, [ALC880_FIXUP_Z71V] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* set up the whole pins as BIOS is utterly broken */ { 0x14, 0x99030120 }, /* speaker */ { 0x15, 0x0121411f }, /* HP */ { 0x16, 0x411111f0 }, /* N/A */ { 0x17, 0x411111f0 }, /* N/A */ { 0x18, 0x01a19950 }, /* mic-in */ { 0x19, 0x411111f0 }, /* N/A */ { 0x1a, 0x01813031 }, /* line-in */ { 0x1b, 0x411111f0 }, /* N/A */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ { 0x1e, 0x0144111e }, /* SPDIF */ { } } }, [ALC880_FIXUP_ASUS_W5A] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* set up the whole pins as BIOS is utterly broken */ { 0x14, 0x0121411f }, /* HP */ { 0x15, 0x411111f0 }, /* N/A */ { 0x16, 0x411111f0 }, /* N/A */ { 0x17, 0x411111f0 }, /* N/A */ { 0x18, 0x90a60160 }, /* mic */ { 0x19, 0x411111f0 }, /* N/A */ { 0x1a, 0x411111f0 }, /* N/A */ { 0x1b, 0x411111f0 }, /* N/A */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ { 0x1e, 0xb743111e }, /* SPDIF out */ { } }, .chained = true, .chain_id = ALC880_FIXUP_GPIO1, }, [ALC880_FIXUP_3ST_BASE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x01014010 }, /* line-out */ { 0x15, 0x411111f0 }, /* N/A */ { 0x16, 0x411111f0 }, /* N/A */ { 0x17, 0x411111f0 }, /* N/A */ { 0x18, 0x01a19c30 }, /* mic-in */ { 0x19, 0x0121411f }, /* HP */ { 0x1a, 0x01813031 }, /* line-in */ { 0x1b, 0x02a19c40 }, /* front-mic */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ /* 0x1e is filled in below */ { 0x1f, 0x411111f0 }, /* N/A */ { } } }, [ALC880_FIXUP_3ST] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x411111f0 }, /* N/A */ { } }, .chained = true, .chain_id = ALC880_FIXUP_3ST_BASE, }, [ALC880_FIXUP_3ST_DIG] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x0144111e }, /* SPDIF */ { } }, .chained = true, .chain_id = ALC880_FIXUP_3ST_BASE, }, [ALC880_FIXUP_5ST_BASE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x01014010 }, /* front */ { 0x15, 0x411111f0 }, /* N/A */ { 0x16, 0x01011411 }, /* CLFE */ { 0x17, 0x01016412 }, /* surr */ { 0x18, 0x01a19c30 }, /* mic-in */ { 0x19, 0x0121411f }, /* HP */ { 0x1a, 0x01813031 }, /* line-in */ { 0x1b, 0x02a19c40 }, /* front-mic */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ /* 0x1e is filled in below */ { 0x1f, 0x411111f0 }, /* N/A */ { } } }, [ALC880_FIXUP_5ST] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x411111f0 }, /* N/A */ { } }, .chained = true, .chain_id = ALC880_FIXUP_5ST_BASE, }, [ALC880_FIXUP_5ST_DIG] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x0144111e }, /* SPDIF */ { } }, .chained = true, .chain_id = ALC880_FIXUP_5ST_BASE, }, [ALC880_FIXUP_6ST_BASE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x01014010 }, /* front */ { 0x15, 0x01016412 }, /* surr */ { 0x16, 0x01011411 }, /* CLFE */ { 0x17, 0x01012414 }, /* side */ { 0x18, 0x01a19c30 }, /* mic-in */ { 0x19, 0x02a19c40 }, /* front-mic */ { 0x1a, 0x01813031 }, /* line-in */ { 0x1b, 0x0121411f }, /* HP */ { 0x1c, 0x411111f0 }, /* N/A */ { 0x1d, 0x411111f0 }, /* N/A */ /* 0x1e is filled in below */ { 0x1f, 0x411111f0 }, /* N/A */ { } } }, [ALC880_FIXUP_6ST] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x411111f0 }, /* N/A */ { } }, .chained = true, .chain_id = ALC880_FIXUP_6ST_BASE, }, [ALC880_FIXUP_6ST_DIG] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x0144111e }, /* SPDIF */ { } }, .chained = true, .chain_id = ALC880_FIXUP_6ST_BASE, }, [ALC880_FIXUP_6ST_AUTOMUTE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x0121401f }, /* HP with jack detect */ { } }, .chained_before = true, .chain_id = ALC880_FIXUP_6ST_BASE, }, }; static const struct hda_quirk alc880_fixup_tbl[] = { SND_PCI_QUIRK(0x1019, 0x0f69, "Coeus G610P", ALC880_FIXUP_W810), SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS W5A", ALC880_FIXUP_ASUS_W5A), SND_PCI_QUIRK(0x1043, 0x1964, "ASUS Z71V", ALC880_FIXUP_Z71V), SND_PCI_QUIRK_VENDOR(0x1043, "ASUS", ALC880_FIXUP_GPIO1), SND_PCI_QUIRK(0x147b, 0x1045, "ABit AA8XE", ALC880_FIXUP_6ST_AUTOMUTE), SND_PCI_QUIRK(0x1558, 0x5401, "Clevo GPIO2", ALC880_FIXUP_GPIO2), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", ALC880_FIXUP_EAPD_COEF), SND_PCI_QUIRK(0x1584, 0x9050, "Uniwill", ALC880_FIXUP_UNIWILL_DIG), SND_PCI_QUIRK(0x1584, 0x9054, "Uniwill", ALC880_FIXUP_F1734), SND_PCI_QUIRK(0x1584, 0x9070, "Uniwill", ALC880_FIXUP_UNIWILL), SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB), SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE), SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU), SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), /* Below is the copied entries from alc880_quirks.c. * It's not quite sure whether BIOS sets the correct pin-config table * on these machines, thus they are kept to be compatible with * the old static quirks. Once when it's confirmed to work without * these overrides, it'd be better to remove. */ SND_PCI_QUIRK(0x1019, 0xa880, "ECS", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x1019, 0xa884, "Acer APFV", ALC880_FIXUP_6ST), SND_PCI_QUIRK(0x1025, 0x0070, "ULI", ALC880_FIXUP_3ST_DIG), SND_PCI_QUIRK(0x1025, 0x0077, "ULI", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1025, 0x0078, "ULI", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1025, 0x0087, "ULI", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_FIXUP_3ST_DIG), SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_FIXUP_3ST), SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x104d, 0x81a0, "Sony", ALC880_FIXUP_3ST), SND_PCI_QUIRK(0x104d, 0x81d6, "Sony", ALC880_FIXUP_3ST), SND_PCI_QUIRK(0x107b, 0x3032, "Gateway", ALC880_FIXUP_5ST), SND_PCI_QUIRK(0x107b, 0x3033, "Gateway", ALC880_FIXUP_5ST), SND_PCI_QUIRK(0x107b, 0x4039, "Gateway", ALC880_FIXUP_5ST), SND_PCI_QUIRK(0x1297, 0xc790, "Shuttle ST20G5", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1458, 0xa102, "Gigabyte K8", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x1150, "MSI", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1509, 0x925d, "FIC P4M", ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x1565, 0x8202, "Biostar", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x1695, 0x400d, "EPoX", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x2668, 0x8086, NULL, ALC880_FIXUP_6ST_DIG), /* broken BIOS */ SND_PCI_QUIRK(0x8086, 0x2668, NULL, ALC880_FIXUP_6ST_DIG), SND_PCI_QUIRK(0x8086, 0xa100, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xd400, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xd401, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xd402, "Intel mobo", ALC880_FIXUP_3ST_DIG), SND_PCI_QUIRK(0x8086, 0xe224, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xe305, "Intel mobo", ALC880_FIXUP_3ST_DIG), SND_PCI_QUIRK(0x8086, 0xe308, "Intel mobo", ALC880_FIXUP_3ST_DIG), SND_PCI_QUIRK(0x8086, 0xe400, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xe401, "Intel mobo", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0x8086, 0xe402, "Intel mobo", ALC880_FIXUP_5ST_DIG), /* default Intel */ SND_PCI_QUIRK_VENDOR(0x8086, "Intel mobo", ALC880_FIXUP_3ST), SND_PCI_QUIRK(0xa0a0, 0x0560, "AOpen i915GMm-HFS", ALC880_FIXUP_5ST_DIG), SND_PCI_QUIRK(0xe803, 0x1019, NULL, ALC880_FIXUP_6ST_DIG), {} }; static const struct hda_model_fixup alc880_fixup_models[] = { {.id = ALC880_FIXUP_3ST, .name = "3stack"}, {.id = ALC880_FIXUP_3ST_DIG, .name = "3stack-digout"}, {.id = ALC880_FIXUP_5ST, .name = "5stack"}, {.id = ALC880_FIXUP_5ST_DIG, .name = "5stack-digout"}, {.id = ALC880_FIXUP_6ST, .name = "6stack"}, {.id = ALC880_FIXUP_6ST_DIG, .name = "6stack-digout"}, {.id = ALC880_FIXUP_6ST_AUTOMUTE, .name = "6stack-automute"}, {} }; /* * OK, here we have finally the patch for ALC880 */ static int patch_alc880(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; spec->gen.need_dac_fix = 1; spec->gen.beep_nid = 0x01; codec->patch_ops.unsol_event = alc880_unsol_event; alc_pre_init(codec); snd_hda_pick_fixup(codec, alc880_fixup_models, alc880_fixup_tbl, alc880_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); /* automatic parse from the BIOS config */ err = alc880_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog) { err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC260 support */ static int alc260_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc260_ignore[] = { 0x17, 0 }; static const hda_nid_t alc260_ssids[] = { 0x10, 0x15, 0x0f, 0 }; return alc_parse_auto_config(codec, alc260_ignore, alc260_ssids); } /* * Pin config fixes */ enum { ALC260_FIXUP_HP_DC5750, ALC260_FIXUP_HP_PIN_0F, ALC260_FIXUP_COEF, ALC260_FIXUP_GPIO1, ALC260_FIXUP_GPIO1_TOGGLE, ALC260_FIXUP_REPLACER, ALC260_FIXUP_HP_B1900, ALC260_FIXUP_KN1, ALC260_FIXUP_FSC_S7020, ALC260_FIXUP_FSC_S7020_JWSE, ALC260_FIXUP_VAIO_PINS, }; static void alc260_gpio1_automute(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; alc_update_gpio_data(codec, 0x01, spec->gen.hp_jack_present); } static void alc260_fixup_gpio1_toggle(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PROBE) { /* although the machine has only one output pin, we need to * toggle GPIO1 according to the jack state */ spec->gen.automute_hook = alc260_gpio1_automute; spec->gen.detect_hp = 1; spec->gen.automute_speaker = 1; spec->gen.autocfg.hp_pins[0] = 0x0f; /* copy it for automute */ snd_hda_jack_detect_enable_callback(codec, 0x0f, snd_hda_gen_hp_automute); alc_setup_gpio(codec, 0x01); } } static void alc260_fixup_kn1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const struct hda_pintbl pincfgs[] = { { 0x0f, 0x02214000 }, /* HP/speaker */ { 0x12, 0x90a60160 }, /* int mic */ { 0x13, 0x02a19000 }, /* ext mic */ { 0x18, 0x01446000 }, /* SPDIF out */ /* disable bogus I/O pins */ { 0x10, 0x411111f0 }, { 0x11, 0x411111f0 }, { 0x14, 0x411111f0 }, { 0x15, 0x411111f0 }, { 0x16, 0x411111f0 }, { 0x17, 0x411111f0 }, { 0x19, 0x411111f0 }, { } }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); spec->init_amp = ALC_INIT_NONE; break; } } static void alc260_fixup_fsc_s7020(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->init_amp = ALC_INIT_NONE; } static void alc260_fixup_fsc_s7020_jwse(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->gen.add_jack_modes = 1; spec->gen.hp_mic = 1; } } static const struct hda_fixup alc260_fixups[] = { [ALC260_FIXUP_HP_DC5750] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x11, 0x90130110 }, /* speaker */ { } } }, [ALC260_FIXUP_HP_PIN_0F] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x0f, 0x01214000 }, /* HP */ { } } }, [ALC260_FIXUP_COEF] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x1a, AC_VERB_SET_PROC_COEF, 0x3040 }, { } }, }, [ALC260_FIXUP_GPIO1] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio1, }, [ALC260_FIXUP_GPIO1_TOGGLE] = { .type = HDA_FIXUP_FUNC, .v.func = alc260_fixup_gpio1_toggle, .chained = true, .chain_id = ALC260_FIXUP_HP_PIN_0F, }, [ALC260_FIXUP_REPLACER] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x1a, AC_VERB_SET_PROC_COEF, 0x3050 }, { } }, .chained = true, .chain_id = ALC260_FIXUP_GPIO1_TOGGLE, }, [ALC260_FIXUP_HP_B1900] = { .type = HDA_FIXUP_FUNC, .v.func = alc260_fixup_gpio1_toggle, .chained = true, .chain_id = ALC260_FIXUP_COEF, }, [ALC260_FIXUP_KN1] = { .type = HDA_FIXUP_FUNC, .v.func = alc260_fixup_kn1, }, [ALC260_FIXUP_FSC_S7020] = { .type = HDA_FIXUP_FUNC, .v.func = alc260_fixup_fsc_s7020, }, [ALC260_FIXUP_FSC_S7020_JWSE] = { .type = HDA_FIXUP_FUNC, .v.func = alc260_fixup_fsc_s7020_jwse, .chained = true, .chain_id = ALC260_FIXUP_FSC_S7020, }, [ALC260_FIXUP_VAIO_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* Pin configs are missing completely on some VAIOs */ { 0x0f, 0x01211020 }, { 0x10, 0x0001003f }, { 0x11, 0x411111f0 }, { 0x12, 0x01a15930 }, { 0x13, 0x411111f0 }, { 0x14, 0x411111f0 }, { 0x15, 0x411111f0 }, { 0x16, 0x411111f0 }, { 0x17, 0x411111f0 }, { 0x18, 0x411111f0 }, { 0x19, 0x411111f0 }, { } } }, }; static const struct hda_quirk alc260_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_FIXUP_GPIO1), SND_PCI_QUIRK(0x1025, 0x007f, "Acer Aspire 9500", ALC260_FIXUP_COEF), SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_FIXUP_GPIO1), SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750), SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900), SND_PCI_QUIRK(0x104d, 0x81bb, "Sony VAIO", ALC260_FIXUP_VAIO_PINS), SND_PCI_QUIRK(0x104d, 0x81e2, "Sony VAIO TX", ALC260_FIXUP_HP_PIN_0F), SND_PCI_QUIRK(0x10cf, 0x1326, "FSC LifeBook S7020", ALC260_FIXUP_FSC_S7020), SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1), SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1), SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER), SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF), {} }; static const struct hda_model_fixup alc260_fixup_models[] = { {.id = ALC260_FIXUP_GPIO1, .name = "gpio1"}, {.id = ALC260_FIXUP_COEF, .name = "coef"}, {.id = ALC260_FIXUP_FSC_S7020, .name = "fujitsu"}, {.id = ALC260_FIXUP_FSC_S7020_JWSE, .name = "fujitsu-jwse"}, {} }; /* */ static int patch_alc260(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x07); if (err < 0) return err; spec = codec->spec; /* as quite a few machines require HP amp for speaker outputs, * it's easier to enable it unconditionally; even if it's unneeded, * it's almost harmless. */ spec->gen.prefer_hp_amp = 1; spec->gen.beep_nid = 0x01; spec->shutup = alc_eapd_shutup; alc_pre_init(codec); snd_hda_pick_fixup(codec, alc260_fixup_models, alc260_fixup_tbl, alc260_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); /* automatic parse from the BIOS config */ err = alc260_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog) { err = set_beep_amp(spec, 0x07, 0x05, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC882/883/885/888/889 support * * ALC882 is almost identical with ALC880 but has cleaner and more flexible * configuration. Each pin widget can choose any input DACs and a mixer. * Each ADC is connected from a mixer of all inputs. This makes possible * 6-channel independent captures. * * In addition, an independent DAC for the multi-playback (not used in this * driver yet). */ /* * Pin config fixes */ enum { ALC882_FIXUP_ABIT_AW9D_MAX, ALC882_FIXUP_LENOVO_Y530, ALC882_FIXUP_PB_M5210, ALC882_FIXUP_ACER_ASPIRE_7736, ALC882_FIXUP_ASUS_W90V, ALC889_FIXUP_CD, ALC889_FIXUP_FRONT_HP_NO_PRESENCE, ALC889_FIXUP_VAIO_TT, ALC888_FIXUP_EEE1601, ALC886_FIXUP_EAPD, ALC882_FIXUP_EAPD, ALC883_FIXUP_EAPD, ALC883_FIXUP_ACER_EAPD, ALC882_FIXUP_GPIO1, ALC882_FIXUP_GPIO2, ALC882_FIXUP_GPIO3, ALC889_FIXUP_COEF, ALC882_FIXUP_ASUS_W2JC, ALC882_FIXUP_ACER_ASPIRE_4930G, ALC882_FIXUP_ACER_ASPIRE_8930G, ALC882_FIXUP_ASPIRE_8930G_VERBS, ALC885_FIXUP_MACPRO_GPIO, ALC889_FIXUP_DAC_ROUTE, ALC889_FIXUP_MBP_VREF, ALC889_FIXUP_IMAC91_VREF, ALC889_FIXUP_MBA11_VREF, ALC889_FIXUP_MBA21_VREF, ALC889_FIXUP_MP11_VREF, ALC889_FIXUP_MP41_VREF, ALC882_FIXUP_INV_DMIC, ALC882_FIXUP_NO_PRIMARY_HP, ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_BASS_CHMAP, ALC1220_FIXUP_GB_DUAL_CODECS, ALC1220_FIXUP_GB_X570, ALC1220_FIXUP_CLEVO_P950, ALC1220_FIXUP_CLEVO_PB51ED, ALC1220_FIXUP_CLEVO_PB51ED_PINS, ALC887_FIXUP_ASUS_AUDIO, ALC887_FIXUP_ASUS_HMIC, ALCS1200A_FIXUP_MIC_VREF, ALC888VD_FIXUP_MIC_100VREF, }; static void alc889_fixup_coef(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action != HDA_FIXUP_ACT_INIT) return; alc_update_coef_idx(codec, 7, 0, 0x2030); } /* set up GPIO at initialization */ static void alc885_fixup_macpro_gpio(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; spec->gpio_write_delay = true; alc_fixup_gpio3(codec, fix, action); } /* Fix the connection of some pins for ALC889: * At least, Acer Aspire 5935 shows the connections to DAC3/4 don't * work correctly (bko#42740) */ static void alc889_fixup_dac_route(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* fake the connections during parsing the tree */ static const hda_nid_t conn1[] = { 0x0c, 0x0d }; static const hda_nid_t conn2[] = { 0x0e, 0x0f }; snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn2), conn2); snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn2), conn2); } else if (action == HDA_FIXUP_ACT_PROBE) { /* restore the connections */ static const hda_nid_t conn[] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); snd_hda_override_conn_list(codec, 0x18, ARRAY_SIZE(conn), conn); snd_hda_override_conn_list(codec, 0x1a, ARRAY_SIZE(conn), conn); } } /* Set VREF on HP pin */ static void alc889_fixup_mbp_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t nids[] = { 0x14, 0x15, 0x19 }; struct alc_spec *spec = codec->spec; int i; if (action != HDA_FIXUP_ACT_INIT) return; for (i = 0; i < ARRAY_SIZE(nids); i++) { unsigned int val = snd_hda_codec_get_pincfg(codec, nids[i]); if (get_defcfg_device(val) != AC_JACK_HP_OUT) continue; val = snd_hda_codec_get_pin_target(codec, nids[i]); val |= AC_PINCTL_VREF_80; snd_hda_set_pin_ctl(codec, nids[i], val); spec->gen.keep_vref_in_automute = 1; break; } } static void alc889_fixup_mac_pins(struct hda_codec *codec, const hda_nid_t *nids, int num_nids) { struct alc_spec *spec = codec->spec; int i; for (i = 0; i < num_nids; i++) { unsigned int val; val = snd_hda_codec_get_pin_target(codec, nids[i]); val |= AC_PINCTL_VREF_50; snd_hda_set_pin_ctl(codec, nids[i], val); } spec->gen.keep_vref_in_automute = 1; } /* Set VREF on speaker pins on imac91 */ static void alc889_fixup_imac91_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t nids[] = { 0x18, 0x1a }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); } /* Set VREF on speaker pins on mba11 */ static void alc889_fixup_mba11_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t nids[] = { 0x18 }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); } /* Set VREF on speaker pins on mba21 */ static void alc889_fixup_mba21_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t nids[] = { 0x18, 0x19 }; if (action == HDA_FIXUP_ACT_INIT) alc889_fixup_mac_pins(codec, nids, ARRAY_SIZE(nids)); } /* Don't take HP output as primary * Strangely, the speaker output doesn't work on Vaio Z and some Vaio * all-in-one desktop PCs (for example VGC-LN51JGB) through DAC 0x05 */ static void alc882_fixup_no_primary_hp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->gen.no_primary_hp = 1; spec->gen.no_multi_io = 1; } } static void alc_fixup_bass_chmap(struct hda_codec *codec, const struct hda_fixup *fix, int action); /* For dual-codec configuration, we need to disable some features to avoid * conflicts of kctls and PCM streams */ static void alc_fixup_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; /* disable vmaster */ spec->gen.suppress_vmaster = 1; /* auto-mute and auto-mic switch don't work with multiple codecs */ spec->gen.suppress_auto_mute = 1; spec->gen.suppress_auto_mic = 1; /* disable aamix as well */ spec->gen.mixer_nid = 0; /* add location prefix to avoid conflicts */ codec->force_pin_prefix = 1; } static void rename_ctl(struct hda_codec *codec, const char *oldname, const char *newname) { struct snd_kcontrol *kctl; kctl = snd_hda_find_mixer_ctl(codec, oldname); if (kctl) snd_ctl_rename(codec->card, kctl, newname); } static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_dual_codecs(codec, fix, action); switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /* override card longname to provide a unique UCM profile */ strcpy(codec->card->longname, "HDAudio-Gigabyte-ALC1220DualCodecs"); break; case HDA_FIXUP_ACT_BUILD: /* rename Capture controls depending on the codec */ rename_ctl(codec, "Capture Volume", codec->addr == 0 ? "Rear-Panel Capture Volume" : "Front-Panel Capture Volume"); rename_ctl(codec, "Capture Switch", codec->addr == 0 ? "Rear-Panel Capture Switch" : "Front-Panel Capture Switch"); break; } } static void alc1220_fixup_gb_x570(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t conn1[] = { 0x0c }; static const struct coef_fw gb_x570_coefs[] = { WRITE_COEF(0x07, 0x03c0), WRITE_COEF(0x1a, 0x01c1), WRITE_COEF(0x1b, 0x0202), WRITE_COEF(0x43, 0x3005), {} }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); break; case HDA_FIXUP_ACT_INIT: alc_process_coef_fw(codec, gb_x570_coefs); break; } } static void alc1220_fixup_clevo_p950(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t conn1[] = { 0x0c }; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; alc_update_coef_idx(codec, 0x7, 0, 0x3c3); /* We therefore want to make sure 0x14 (front headphone) and * 0x1b (speakers) use the stereo DAC 0x02 */ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); } static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action); static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc1220_fixup_clevo_p950(codec, fix, action); alc_fixup_headset_mode_no_hp_mic(codec, fix, action); } static void alc887_asus_hp_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; unsigned int vref; snd_hda_gen_hp_automute(codec, jack); if (spec->gen.hp_jack_present) vref = AC_PINCTL_VREF_80; else vref = AC_PINCTL_VREF_HIZ; snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref); } static void alc887_fixup_asus_jack(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action != HDA_FIXUP_ACT_PROBE) return; snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP); spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook; } static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x01080104 }, /* side */ { 0x16, 0x01011012 }, /* rear */ { 0x17, 0x01016011 }, /* clfe */ { } } }, [ALC882_FIXUP_LENOVO_Y530] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x99130112 }, /* rear int speakers */ { 0x16, 0x99130111 }, /* subwoofer */ { } } }, [ALC882_FIXUP_PB_M5210] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { { 0x19, PIN_VREF50 }, {} } }, [ALC882_FIXUP_ACER_ASPIRE_7736] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_sku_ignore, }, [ALC882_FIXUP_ASUS_W90V] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x99130110 }, /* fix sequence for CLFE */ { } } }, [ALC889_FIXUP_CD] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1c, 0x993301f0 }, /* CD */ { } } }, [ALC889_FIXUP_FRONT_HP_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x02214120 }, /* Front HP jack is flaky, disable jack detect */ { } }, .chained = true, .chain_id = ALC889_FIXUP_CD, }, [ALC889_FIXUP_VAIO_TT] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x90170111 }, /* hidden surround speaker */ { } } }, [ALC888_FIXUP_EEE1601] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x0b }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0838 }, { } } }, [ALC886_FIXUP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* change to EAPD mode */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 }, { } } }, [ALC882_FIXUP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* change to EAPD mode */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3060 }, { } } }, [ALC883_FIXUP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* change to EAPD mode */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3070 }, { } } }, [ALC883_FIXUP_ACER_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* eanable EAPD on Acer laptops */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 }, { } } }, [ALC882_FIXUP_GPIO1] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio1, }, [ALC882_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio2, }, [ALC882_FIXUP_GPIO3] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio3, }, [ALC882_FIXUP_ASUS_W2JC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio1, .chained = true, .chain_id = ALC882_FIXUP_EAPD, }, [ALC889_FIXUP_COEF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_coef, }, [ALC882_FIXUP_ACER_ASPIRE_4930G] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x99130111 }, /* CLFE speaker */ { 0x17, 0x99130112 }, /* surround speaker */ { } }, .chained = true, .chain_id = ALC882_FIXUP_GPIO1, }, [ALC882_FIXUP_ACER_ASPIRE_8930G] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x99130111 }, /* CLFE speaker */ { 0x1b, 0x99130112 }, /* surround speaker */ { } }, .chained = true, .chain_id = ALC882_FIXUP_ASPIRE_8930G_VERBS, }, [ALC882_FIXUP_ASPIRE_8930G_VERBS] = { /* additional init verbs for Acer Aspire 8930G */ .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Enable all DACs */ /* DAC DISABLE/MUTE 1? */ /* setting bits 1-5 disables DAC nids 0x02-0x06 * apparently. Init=0x38 */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x03 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0000 }, /* DAC DISABLE/MUTE 2? */ /* some bit here disables the other DACs. * Init=0x4900 */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x08 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0000 }, /* DMIC fix * This laptop has a stereo digital microphone. * The mics are only 1cm apart which makes the stereo * useless. However, either the mic or the ALC889 * makes the signal become a difference/sum signal * instead of standard stereo, which is annoying. * So instead we flip this bit which makes the * codec replicate the sum signal to both channels, * turning it into a normal mono mic. */ /* DMIC_CONTROL? Init value = 0x0001 */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0b }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0003 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 }, { } }, .chained = true, .chain_id = ALC882_FIXUP_GPIO1, }, [ALC885_FIXUP_MACPRO_GPIO] = { .type = HDA_FIXUP_FUNC, .v.func = alc885_fixup_macpro_gpio, }, [ALC889_FIXUP_DAC_ROUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_dac_route, }, [ALC889_FIXUP_MBP_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_mbp_vref, .chained = true, .chain_id = ALC882_FIXUP_GPIO1, }, [ALC889_FIXUP_IMAC91_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_imac91_vref, .chained = true, .chain_id = ALC882_FIXUP_GPIO1, }, [ALC889_FIXUP_MBA11_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_mba11_vref, .chained = true, .chain_id = ALC889_FIXUP_MBP_VREF, }, [ALC889_FIXUP_MBA21_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_mba21_vref, .chained = true, .chain_id = ALC889_FIXUP_MBP_VREF, }, [ALC889_FIXUP_MP11_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_mba11_vref, .chained = true, .chain_id = ALC885_FIXUP_MACPRO_GPIO, }, [ALC889_FIXUP_MP41_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc889_fixup_mbp_vref, .chained = true, .chain_id = ALC885_FIXUP_MACPRO_GPIO, }, [ALC882_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, }, [ALC882_FIXUP_NO_PRIMARY_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc882_fixup_no_primary_hp, }, [ALC887_FIXUP_ASUS_BASS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { {0x16, 0x99130130}, /* bass speaker */ {} }, .chained = true, .chain_id = ALC887_FIXUP_BASS_CHMAP, }, [ALC887_FIXUP_BASS_CHMAP] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_bass_chmap, }, [ALC1220_FIXUP_GB_DUAL_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_gb_dual_codecs, }, [ALC1220_FIXUP_GB_X570] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_gb_x570, }, [ALC1220_FIXUP_CLEVO_P950] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_p950, }, [ALC1220_FIXUP_CLEVO_PB51ED] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_pb51ed, }, [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ {} }, .chained = true, .chain_id = ALC1220_FIXUP_CLEVO_PB51ED, }, [ALC887_FIXUP_ASUS_AUDIO] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */ { 0x19, 0x22219420 }, {} }, }, [ALC887_FIXUP_ASUS_HMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc887_fixup_asus_jack, .chained = true, .chain_id = ALC887_FIXUP_ASUS_AUDIO, }, [ALCS1200A_FIXUP_MIC_VREF] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { { 0x18, PIN_VREF50 }, /* rear mic */ { 0x19, PIN_VREF50 }, /* front mic */ {} } }, [ALC888VD_FIXUP_MIC_100VREF] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { { 0x18, PIN_VREF100 }, /* headset mic */ {} } }, }; static const struct hda_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0121, "Acer Aspire 5920G", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x013e, "Acer Aspire 4930G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x013f, "Acer Aspire 5930G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0145, "Acer Aspire 8930G", ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G", ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x1043, 0x8797, "ASUS TUF B550M-PLUS", ALCS1200A_FIXUP_MIC_VREF), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF), SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO), SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO), SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x3200, "iMac 7,1 Aluminum", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x106b, 0x3400, "MacBookAir 1,1", ALC889_FIXUP_MBA11_VREF), SND_PCI_QUIRK(0x106b, 0x3500, "MacBookAir 2,1", ALC889_FIXUP_MBA21_VREF), SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF), SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_FIXUP_MACPRO_GPIO), SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF), SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF), SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK(0x1462, 0xcc34, "MSI Godlike X570", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x66a6, "Clevo PE60SN[CDE]-[GS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED), SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e4, "Clevo P955ER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e5, "Clevo P955EE6", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e6, "Clevo P950R[CDF]", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0xd502, "Clevo PD50SNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_FIXUP_COEF), {} }; static const struct hda_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_ABIT_AW9D_MAX, .name = "abit-aw9d"}, {.id = ALC882_FIXUP_LENOVO_Y530, .name = "lenovo-y530"}, {.id = ALC882_FIXUP_ACER_ASPIRE_7736, .name = "acer-aspire-7736"}, {.id = ALC882_FIXUP_ASUS_W90V, .name = "asus-w90v"}, {.id = ALC889_FIXUP_CD, .name = "cd"}, {.id = ALC889_FIXUP_FRONT_HP_NO_PRESENCE, .name = "no-front-hp"}, {.id = ALC889_FIXUP_VAIO_TT, .name = "vaio-tt"}, {.id = ALC888_FIXUP_EEE1601, .name = "eee1601"}, {.id = ALC882_FIXUP_EAPD, .name = "alc882-eapd"}, {.id = ALC883_FIXUP_EAPD, .name = "alc883-eapd"}, {.id = ALC882_FIXUP_GPIO1, .name = "gpio1"}, {.id = ALC882_FIXUP_GPIO2, .name = "gpio2"}, {.id = ALC882_FIXUP_GPIO3, .name = "gpio3"}, {.id = ALC889_FIXUP_COEF, .name = "alc889-coef"}, {.id = ALC882_FIXUP_ASUS_W2JC, .name = "asus-w2jc"}, {.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"}, {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"}, {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, {.id = ALC885_FIXUP_MACPRO_GPIO, .name = "macpro-gpio"}, {.id = ALC889_FIXUP_DAC_ROUTE, .name = "dac-route"}, {.id = ALC889_FIXUP_MBP_VREF, .name = "mbp-vref"}, {.id = ALC889_FIXUP_IMAC91_VREF, .name = "imac91-vref"}, {.id = ALC889_FIXUP_MBA11_VREF, .name = "mba11-vref"}, {.id = ALC889_FIXUP_MBA21_VREF, .name = "mba21-vref"}, {.id = ALC889_FIXUP_MP11_VREF, .name = "mp11-vref"}, {.id = ALC889_FIXUP_MP41_VREF, .name = "mp41-vref"}, {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"}, {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"}, {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"}, {} }; static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950, {0x14, 0x01014010}, {0x15, 0x01011012}, {0x16, 0x01016011}, {0x18, 0x01a19040}, {0x19, 0x02a19050}, {0x1a, 0x0181304f}, {0x1b, 0x0221401f}, {0x1e, 0x01456130}), SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950, {0x14, 0x01015010}, {0x15, 0x01011012}, {0x16, 0x01011011}, {0x18, 0x01a11040}, {0x19, 0x02a19050}, {0x1a, 0x0181104f}, {0x1b, 0x0221401f}, {0x1e, 0x01451130}), {} }; /* * BIOS auto configuration */ /* almost identical with ALC880 parser... */ static int alc882_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc882_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc882_ssids[] = { 0x15, 0x1b, 0x14, 0 }; return alc_parse_auto_config(codec, alc882_ignore, alc882_ssids); } /* */ static int patch_alc882(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; switch (codec->core.vendor_id) { case 0x10ec0882: case 0x10ec0885: case 0x10ec0900: case 0x10ec0b00: case 0x10ec1220: break; default: /* ALC883 and variants */ alc_fix_pll_init(codec, 0x20, 0x0a, 10); break; } alc_pre_init(codec); snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl, alc882_fixups); snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); alc_auto_parse_customize_define(codec); if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x01; /* automatic parse from the BIOS config */ err = alc882_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog && spec->gen.beep_nid) { err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC262 support */ static int alc262_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc262_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc262_ssids[] = { 0x15, 0x1b, 0x14, 0 }; return alc_parse_auto_config(codec, alc262_ignore, alc262_ssids); } /* * Pin config fixes */ enum { ALC262_FIXUP_FSC_H270, ALC262_FIXUP_FSC_S7110, ALC262_FIXUP_HP_Z200, ALC262_FIXUP_TYAN, ALC262_FIXUP_LENOVO_3000, ALC262_FIXUP_BENQ, ALC262_FIXUP_BENQ_T31, ALC262_FIXUP_INV_DMIC, ALC262_FIXUP_INTEL_BAYLEYBAY, }; static const struct hda_fixup alc262_fixups[] = { [ALC262_FIXUP_FSC_H270] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0221142f }, /* front HP */ { 0x1b, 0x0121141f }, /* rear HP */ { } } }, [ALC262_FIXUP_FSC_S7110] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x90170110 }, /* speaker */ { } }, .chained = true, .chain_id = ALC262_FIXUP_BENQ, }, [ALC262_FIXUP_HP_Z200] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x99130120 }, /* internal speaker */ { } } }, [ALC262_FIXUP_TYAN] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x1993e1f0 }, /* int AUX */ { } } }, [ALC262_FIXUP_LENOVO_3000] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { { 0x19, PIN_VREF50 }, {} }, .chained = true, .chain_id = ALC262_FIXUP_BENQ, }, [ALC262_FIXUP_BENQ] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3070 }, {} } }, [ALC262_FIXUP_BENQ_T31] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 }, {} } }, [ALC262_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, }, [ALC262_FIXUP_INTEL_BAYLEYBAY] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_depop_delay, }, }; static const struct hda_quirk alc262_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", ALC262_FIXUP_HP_Z200), SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110), SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_FIXUP_BENQ_T31), SND_PCI_QUIRK(0x8086, 0x7270, "BayleyBay", ALC262_FIXUP_INTEL_BAYLEYBAY), {} }; static const struct hda_model_fixup alc262_fixup_models[] = { {.id = ALC262_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC262_FIXUP_FSC_H270, .name = "fsc-h270"}, {.id = ALC262_FIXUP_FSC_S7110, .name = "fsc-s7110"}, {.id = ALC262_FIXUP_HP_Z200, .name = "hp-z200"}, {.id = ALC262_FIXUP_TYAN, .name = "tyan"}, {.id = ALC262_FIXUP_LENOVO_3000, .name = "lenovo-3000"}, {.id = ALC262_FIXUP_BENQ, .name = "benq"}, {.id = ALC262_FIXUP_BENQ_T31, .name = "benq-t31"}, {.id = ALC262_FIXUP_INTEL_BAYLEYBAY, .name = "bayleybay"}, {} }; /* */ static int patch_alc262(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; spec->shutup = alc_eapd_shutup; #if 0 /* pshou 07/11/05 set a zero PCM sample to DAC when FIFO is * under-run */ alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x80); #endif alc_fix_pll_init(codec, 0x20, 0x0a, 10); alc_pre_init(codec); snd_hda_pick_fixup(codec, alc262_fixup_models, alc262_fixup_tbl, alc262_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); alc_auto_parse_customize_define(codec); if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x01; /* automatic parse from the BIOS config */ err = alc262_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog && spec->gen.beep_nid) { err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC268 */ /* bind Beep switches of both NID 0x0f and 0x10 */ static int alc268_beep_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); unsigned long pval; int err; mutex_lock(&codec->control_mutex); pval = kcontrol->private_value; kcontrol->private_value = (pval & ~0xff) | 0x0f; err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); if (err >= 0) { kcontrol->private_value = (pval & ~0xff) | 0x10; err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); } kcontrol->private_value = pval; mutex_unlock(&codec->control_mutex); return err; } static const struct snd_kcontrol_new alc268_beep_mixer[] = { HDA_CODEC_VOLUME("Beep Playback Volume", 0x1d, 0x0, HDA_INPUT), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Beep Playback Switch", .subdevice = HDA_SUBDEV_AMP_FLAG, .info = snd_hda_mixer_amp_switch_info, .get = snd_hda_mixer_amp_switch_get, .put = alc268_beep_switch_put, .private_value = HDA_COMPOSE_AMP_VAL(0x0f, 3, 1, HDA_INPUT) }, }; /* set PCBEEP vol = 0, mute connections */ static const struct hda_verb alc268_beep_init_verbs[] = { {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, { } }; enum { ALC268_FIXUP_INV_DMIC, ALC268_FIXUP_HP_EAPD, ALC268_FIXUP_SPDIF, }; static const struct hda_fixup alc268_fixups[] = { [ALC268_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, }, [ALC268_FIXUP_HP_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x15, AC_VERB_SET_EAPD_BTLENABLE, 0}, {} } }, [ALC268_FIXUP_SPDIF] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1e, 0x014b1180 }, /* enable SPDIF out */ {} } }, }; static const struct hda_model_fixup alc268_fixup_models[] = { {.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC268_FIXUP_HP_EAPD, .name = "hp-eapd"}, {.id = ALC268_FIXUP_SPDIF, .name = "spdif"}, {} }; static const struct hda_quirk alc268_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0139, "Acer TravelMate 6293", ALC268_FIXUP_SPDIF), SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC), /* below is codec SSID since multiple Toshiba laptops have the * same PCI SSID 1179:ff00 */ SND_PCI_QUIRK(0x1179, 0xff06, "Toshiba P200", ALC268_FIXUP_HP_EAPD), {} }; /* * BIOS auto configuration */ static int alc268_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc268_ssids[] = { 0x15, 0x1b, 0x14, 0 }; return alc_parse_auto_config(codec, NULL, alc268_ssids); } /* */ static int patch_alc268(struct hda_codec *codec) { struct alc_spec *spec; int i, err; /* ALC268 has no aa-loopback mixer */ err = alc_alloc_spec(codec, 0); if (err < 0) return err; spec = codec->spec; if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x01; spec->shutup = alc_eapd_shutup; alc_pre_init(codec); snd_hda_pick_fixup(codec, alc268_fixup_models, alc268_fixup_tbl, alc268_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); /* automatic parse from the BIOS config */ err = alc268_parse_auto_config(codec); if (err < 0) goto error; if (err > 0 && !spec->gen.no_analog && spec->gen.autocfg.speaker_pins[0] != 0x1d) { for (i = 0; i < ARRAY_SIZE(alc268_beep_mixer); i++) { if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &alc268_beep_mixer[i])) { err = -ENOMEM; goto error; } } snd_hda_add_verbs(codec, alc268_beep_init_verbs); if (!query_amp_caps(codec, 0x1d, HDA_INPUT)) /* override the amp caps for beep generator */ snd_hda_override_amp_caps(codec, 0x1d, HDA_INPUT, (0x0c << AC_AMPCAP_OFFSET_SHIFT) | (0x0c << AC_AMPCAP_NUM_STEPS_SHIFT) | (0x07 << AC_AMPCAP_STEP_SIZE_SHIFT) | (0 << AC_AMPCAP_MUTE_SHIFT)); } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC269 */ static const struct hda_pcm_stream alc269_44k_pcm_analog_playback = { .rates = SNDRV_PCM_RATE_44100, /* fixed rate */ }; static const struct hda_pcm_stream alc269_44k_pcm_analog_capture = { .rates = SNDRV_PCM_RATE_44100, /* fixed rate */ }; /* different alc269-variants */ enum { ALC269_TYPE_ALC269VA, ALC269_TYPE_ALC269VB, ALC269_TYPE_ALC269VC, ALC269_TYPE_ALC269VD, ALC269_TYPE_ALC280, ALC269_TYPE_ALC282, ALC269_TYPE_ALC283, ALC269_TYPE_ALC284, ALC269_TYPE_ALC293, ALC269_TYPE_ALC286, ALC269_TYPE_ALC298, ALC269_TYPE_ALC255, ALC269_TYPE_ALC256, ALC269_TYPE_ALC257, ALC269_TYPE_ALC215, ALC269_TYPE_ALC225, ALC269_TYPE_ALC245, ALC269_TYPE_ALC287, ALC269_TYPE_ALC294, ALC269_TYPE_ALC300, ALC269_TYPE_ALC623, ALC269_TYPE_ALC700, }; /* * BIOS auto configuration */ static int alc269_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc269_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 }; static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 }; struct alc_spec *spec = codec->spec; const hda_nid_t *ssids; switch (spec->codec_variant) { case ALC269_TYPE_ALC269VA: case ALC269_TYPE_ALC269VC: case ALC269_TYPE_ALC280: case ALC269_TYPE_ALC284: case ALC269_TYPE_ALC293: ssids = alc269va_ssids; break; case ALC269_TYPE_ALC269VB: case ALC269_TYPE_ALC269VD: case ALC269_TYPE_ALC282: case ALC269_TYPE_ALC283: case ALC269_TYPE_ALC286: case ALC269_TYPE_ALC298: case ALC269_TYPE_ALC255: case ALC269_TYPE_ALC256: case ALC269_TYPE_ALC257: case ALC269_TYPE_ALC215: case ALC269_TYPE_ALC225: case ALC269_TYPE_ALC245: case ALC269_TYPE_ALC287: case ALC269_TYPE_ALC294: case ALC269_TYPE_ALC300: case ALC269_TYPE_ALC623: case ALC269_TYPE_ALC700: ssids = alc269_ssids; break; default: ssids = alc269_ssids; break; } return alc_parse_auto_config(codec, alc269_ignore, ssids); } static const struct hda_jack_keymap alc_headset_btn_keymap[] = { { SND_JACK_BTN_0, KEY_PLAYPAUSE }, { SND_JACK_BTN_1, KEY_VOICECOMMAND }, { SND_JACK_BTN_2, KEY_VOLUMEUP }, { SND_JACK_BTN_3, KEY_VOLUMEDOWN }, {} }; static void alc_headset_btn_callback(struct hda_codec *codec, struct hda_jack_callback *jack) { int report = 0; if (jack->unsol_res & (7 << 13)) report |= SND_JACK_BTN_0; if (jack->unsol_res & (1 << 16 | 3 << 8)) report |= SND_JACK_BTN_1; /* Volume up key */ if (jack->unsol_res & (7 << 23)) report |= SND_JACK_BTN_2; /* Volume down key */ if (jack->unsol_res & (7 << 10)) report |= SND_JACK_BTN_3; snd_hda_jack_set_button_state(codec, jack->nid, report); } static void alc_disable_headset_jack_key(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!spec->has_hs_key) return; switch (codec->core.vendor_id) { case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0287: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_write_coef_idx(codec, 0x48, 0x0); alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x10ec0257: case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0x0); alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); break; } } static void alc_enable_headset_jack_key(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (!spec->has_hs_key) return; switch (codec->core.vendor_id) { case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0287: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x10ec0257: case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); break; } } static void alc_fixup_headset_jack(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->has_hs_key = 1; snd_hda_jack_detect_enable_callback(codec, 0x55, alc_headset_btn_callback); break; case HDA_FIXUP_ACT_BUILD: hp_pin = alc_get_hp_pin(spec); if (!hp_pin || snd_hda_jack_bind_keymap(codec, 0x55, alc_headset_btn_keymap, hp_pin)) snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false, SND_JACK_HEADSET, alc_headset_btn_keymap); alc_enable_headset_jack_key(codec); break; } } static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) { alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0); } static void alc269_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (spec->codec_variant == ALC269_TYPE_ALC269VB) alc269vb_toggle_power_output(codec, 0); if (spec->codec_variant == ALC269_TYPE_ALC269VB && (alc_get_coef0(codec) & 0x00ff) == 0x018) { msleep(150); } alc_shutup_pins(codec); } static const struct coef_fw alc282_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ WRITE_COEF(0x0a, 0xcccc), /* JD offset1 */ WRITE_COEF(0x0b, 0xcccc), /* JD offset2 */ WRITE_COEF(0x0e, 0x6e00), /* LDO1/2/3, DAC/ADC */ UPDATE_COEF(0x0f, 0xf800, 0x1000), /* JD */ UPDATE_COEF(0x10, 0xfc00, 0x0c00), /* Capless */ WRITE_COEF(0x6f, 0x0), /* Class D test 4 */ UPDATE_COEF(0x0c, 0xfe00, 0), /* IO power down directly */ WRITE_COEF(0x34, 0xa0c0), /* ANC */ UPDATE_COEF(0x16, 0x0008, 0), /* AGC MUX */ UPDATE_COEF(0x1d, 0x00e0, 0), /* DAC simple content protection */ UPDATE_COEF(0x1f, 0x00e0, 0), /* ADC simple content protection */ WRITE_COEF(0x21, 0x8804), /* DAC ADC Zero Detection */ WRITE_COEF(0x63, 0x2902), /* PLL */ WRITE_COEF(0x68, 0xa080), /* capless control 2 */ WRITE_COEF(0x69, 0x3400), /* capless control 3 */ WRITE_COEF(0x6a, 0x2f3e), /* capless control 4 */ WRITE_COEF(0x6b, 0x0), /* capless control 5 */ UPDATE_COEF(0x6d, 0x0fff, 0x0900), /* class D test 2 */ WRITE_COEF(0x6e, 0x110a), /* class D test 3 */ UPDATE_COEF(0x70, 0x00f8, 0x00d8), /* class D test 5 */ WRITE_COEF(0x71, 0x0014), /* class D test 6 */ WRITE_COEF(0x72, 0xc2ba), /* classD OCP */ UPDATE_COEF(0x77, 0x0f80, 0), /* classD pure DC test */ WRITE_COEF(0x6c, 0xfc06), /* Class D amp control */ {} }; static void alc282_restore_default_value(struct hda_codec *codec) { alc_process_coef_fw(codec, alc282_coefs); } static void alc282_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; int coef78; alc282_restore_default_value(codec); if (!hp_pin) return; hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); coef78 = alc_read_coef_idx(codec, 0x78); /* Index 0x78 Direct Drive HP AMP LPM Control 1 */ /* Headphone capless set to high power mode */ alc_write_coef_idx(codec, 0x78, 0x9004); if (hp_pin_sense) msleep(2); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); if (hp_pin_sense) msleep(85); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); if (hp_pin_sense) msleep(100); /* Headphone capless set to normal mode */ alc_write_coef_idx(codec, 0x78, coef78); } static void alc282_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; int coef78; if (!hp_pin) { alc269_shutup(codec); return; } hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); coef78 = alc_read_coef_idx(codec, 0x78); alc_write_coef_idx(codec, 0x78, 0x9004); if (hp_pin_sense) msleep(2); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); if (hp_pin_sense) msleep(85); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); if (hp_pin_sense) msleep(100); alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); alc_write_coef_idx(codec, 0x78, coef78); } static const struct coef_fw alc283_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ WRITE_COEF(0x0a, 0xcccc), /* JD offset1 */ WRITE_COEF(0x0b, 0xcccc), /* JD offset2 */ WRITE_COEF(0x0e, 0x6fc0), /* LDO1/2/3, DAC/ADC */ UPDATE_COEF(0x0f, 0xf800, 0x1000), /* JD */ UPDATE_COEF(0x10, 0xfc00, 0x0c00), /* Capless */ WRITE_COEF(0x3a, 0x0), /* Class D test 4 */ UPDATE_COEF(0x0c, 0xfe00, 0x0), /* IO power down directly */ WRITE_COEF(0x22, 0xa0c0), /* ANC */ UPDATE_COEFEX(0x53, 0x01, 0x000f, 0x0008), /* AGC MUX */ UPDATE_COEF(0x1d, 0x00e0, 0), /* DAC simple content protection */ UPDATE_COEF(0x1f, 0x00e0, 0), /* ADC simple content protection */ WRITE_COEF(0x21, 0x8804), /* DAC ADC Zero Detection */ WRITE_COEF(0x2e, 0x2902), /* PLL */ WRITE_COEF(0x33, 0xa080), /* capless control 2 */ WRITE_COEF(0x34, 0x3400), /* capless control 3 */ WRITE_COEF(0x35, 0x2f3e), /* capless control 4 */ WRITE_COEF(0x36, 0x0), /* capless control 5 */ UPDATE_COEF(0x38, 0x0fff, 0x0900), /* class D test 2 */ WRITE_COEF(0x39, 0x110a), /* class D test 3 */ UPDATE_COEF(0x3b, 0x00f8, 0x00d8), /* class D test 5 */ WRITE_COEF(0x3c, 0x0014), /* class D test 6 */ WRITE_COEF(0x3d, 0xc2ba), /* classD OCP */ UPDATE_COEF(0x42, 0x0f80, 0x0), /* classD pure DC test */ WRITE_COEF(0x49, 0x0), /* test mode */ UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */ UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */ WRITE_COEF(0x37, 0xfc06), /* Class D amp control */ UPDATE_COEF(0x1b, 0x8000, 0), /* HP JD control */ {} }; static void alc283_restore_default_value(struct hda_codec *codec) { alc_process_coef_fw(codec, alc283_coefs); } static void alc283_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; alc283_restore_default_value(codec); if (!hp_pin) return; msleep(30); hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); /* Index 0x43 Direct Drive HP AMP LPM Control 1 */ /* Headphone capless set to high power mode */ alc_write_coef_idx(codec, 0x43, 0x9004); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); if (hp_pin_sense) msleep(85); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); if (hp_pin_sense) msleep(85); /* Index 0x46 Combo jack auto switch control 2 */ /* 3k pull low control for Headset jack. */ alc_update_coef_idx(codec, 0x46, 3 << 12, 0); /* Headphone capless set to normal mode */ alc_write_coef_idx(codec, 0x43, 0x9614); } static void alc283_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; if (!hp_pin) { alc269_shutup(codec); return; } hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); alc_write_coef_idx(codec, 0x43, 0x9004); /*depop hp during suspend*/ alc_write_coef_idx(codec, 0x06, 0x2100); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); if (hp_pin_sense) msleep(100); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (hp_pin_sense) msleep(100); alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); alc_write_coef_idx(codec, 0x43, 0x9614); } static void alc256_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; if (spec->ultra_low_power) { alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1); alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2); alc_update_coef_idx(codec, 0x08, 7<<4, 0); alc_update_coef_idx(codec, 0x3b, 1<<15, 0); alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); msleep(30); } if (!hp_pin) hp_pin = 0x21; msleep(30); hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); if (hp_pin_sense) { msleep(2); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); msleep(75); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); msleep(75); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ } alc_update_coef_idx(codec, 0x46, 3 << 12, 0); alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); /* * Expose headphone mic (or possibly Line In on some machines) instead * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of * this register. */ alc_write_coef_idx(codec, 0x36, 0x5757); } static void alc256_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; if (!hp_pin) hp_pin = 0x21; alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); if (hp_pin_sense) { msleep(2); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(75); /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly * when booting with headset plugged. So skip setting it for the codec alc257 */ if (spec->en_3kpull_low) alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(75); } alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); if (spec->ultra_low_power) { msleep(50); alc_update_coef_idx(codec, 0x03, 1<<1, 0); alc_update_coef_idx(codec, 0x08, 7<<4, 7<<4); alc_update_coef_idx(codec, 0x08, 3<<2, 0); alc_update_coef_idx(codec, 0x3b, 1<<15, 1<<15); alc_update_coef_idx(codec, 0x0e, 7<<6, 0); msleep(30); } } static void alc285_hp_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); int i, val; int coef38, coef0d, coef36; alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */ alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */ coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */ coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */ coef36 = alc_read_coef_idx(codec, 0x36); /* Passthrough Control */ alc_update_coef_idx(codec, 0x38, 1<<4, 0x0); alc_update_coef_idx(codec, 0x0d, 0x110, 0x0); alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); if (hp_pin) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(130); alc_update_coef_idx(codec, 0x36, 1<<14, 1<<14); alc_update_coef_idx(codec, 0x36, 1<<13, 0x0); if (hp_pin) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(10); alc_write_coef_idx(codec, 0x67, 0x0); /* Set HP depop to manual mode */ alc_write_coefex_idx(codec, 0x58, 0x00, 0x7880); alc_write_coefex_idx(codec, 0x58, 0x0f, 0xf049); alc_update_coefex_idx(codec, 0x58, 0x03, 0x00f0, 0x00c0); alc_write_coefex_idx(codec, 0x58, 0x00, 0xf888); /* HP depop procedure start */ val = alc_read_coefex_idx(codec, 0x58, 0x00); for (i = 0; i < 20 && val & 0x8000; i++) { msleep(50); val = alc_read_coefex_idx(codec, 0x58, 0x00); } /* Wait for depop procedure finish */ alc_write_coefex_idx(codec, 0x58, 0x00, val); /* write back the result */ alc_update_coef_idx(codec, 0x38, 1<<4, coef38); alc_update_coef_idx(codec, 0x0d, 0x110, coef0d); alc_update_coef_idx(codec, 0x36, 3<<13, coef36); msleep(50); alc_update_coef_idx(codec, 0x4a, 1<<15, 0); } static void alc225_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp1_pin_sense, hp2_pin_sense; if (spec->ultra_low_power) { alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2); alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6); alc_update_coef_idx(codec, 0x33, 1<<11, 0); msleep(30); } if (spec->codec_variant != ALC269_TYPE_ALC287 && spec->codec_variant != ALC269_TYPE_ALC245) /* required only at boot or S3 and S4 resume time */ if (!spec->done_hp_init || is_s3_resume(codec) || is_s4_resume(codec)) { alc285_hp_init(codec); spec->done_hp_init = true; } if (!hp_pin) hp_pin = 0x21; msleep(30); hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); hp2_pin_sense = snd_hda_jack_detect(codec, 0x16); if (hp1_pin_sense || hp2_pin_sense) { msleep(2); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ if (hp1_pin_sense) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); if (hp2_pin_sense) snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); msleep(75); if (hp1_pin_sense) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); if (hp2_pin_sense) snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); msleep(75); alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ } } static void alc225_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp1_pin_sense, hp2_pin_sense; if (!hp_pin) hp_pin = 0x21; hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); hp2_pin_sense = snd_hda_jack_detect(codec, 0x16); if (hp1_pin_sense || hp2_pin_sense) { alc_disable_headset_jack_key(codec); /* 3k pull low control for Headset jack. */ alc_update_coef_idx(codec, 0x4a, 0, 3 << 10); msleep(2); if (hp1_pin_sense) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); if (hp2_pin_sense) snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(75); if (hp1_pin_sense) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); if (hp2_pin_sense) snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(75); alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); alc_enable_headset_jack_key(codec); } alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); if (spec->ultra_low_power) { msleep(50); alc_update_coef_idx(codec, 0x08, 0x0f << 2, 0x0c << 2); alc_update_coef_idx(codec, 0x0e, 7<<6, 0); alc_update_coef_idx(codec, 0x33, 1<<11, 1<<11); alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4); msleep(30); } } static void alc_default_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; if (!hp_pin) return; msleep(30); hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); if (hp_pin_sense) { msleep(2); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); msleep(75); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); msleep(75); } } static void alc_default_shutup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); bool hp_pin_sense; if (!hp_pin) { alc269_shutup(codec); return; } hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); if (hp_pin_sense) { msleep(2); snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(75); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(75); } alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); } static void alc294_hp_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t hp_pin = alc_get_hp_pin(spec); int i, val; if (!hp_pin) return; snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(100); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ /* Wait for depop procedure finish */ val = alc_read_coefex_idx(codec, 0x58, 0x01); for (i = 0; i < 20 && val & 0x0080; i++) { msleep(50); val = alc_read_coefex_idx(codec, 0x58, 0x01); } /* Set HP depop to auto mode */ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); msleep(50); } static void alc294_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; /* required only at boot or S4 resume time */ if (!spec->done_hp_init || codec->core.dev.power.power_state.event == PM_EVENT_RESTORE) { alc294_hp_init(codec); spec->done_hp_init = true; } alc_default_init(codec); } static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, unsigned int val) { snd_hda_codec_write(codec, 0x51, 0, AC_VERB_SET_COEF_INDEX, index_reg >> 1); snd_hda_codec_write(codec, 0x51, 0, AC_VERB_SET_PROC_COEF, val & 0xffff); /* LSB */ snd_hda_codec_write(codec, 0x51, 0, AC_VERB_SET_PROC_COEF, val >> 16); /* MSB */ } static int alc5505_coef_get(struct hda_codec *codec, unsigned int index_reg) { unsigned int val; snd_hda_codec_write(codec, 0x51, 0, AC_VERB_SET_COEF_INDEX, index_reg >> 1); val = snd_hda_codec_read(codec, 0x51, 0, AC_VERB_GET_PROC_COEF, 0) & 0xffff; val |= snd_hda_codec_read(codec, 0x51, 0, AC_VERB_GET_PROC_COEF, 0) << 16; return val; } static void alc5505_dsp_halt(struct hda_codec *codec) { unsigned int val; alc5505_coef_set(codec, 0x3000, 0x000c); /* DSP CPU stop */ alc5505_coef_set(codec, 0x880c, 0x0008); /* DDR enter self refresh */ alc5505_coef_set(codec, 0x61c0, 0x11110080); /* Clock control for PLL and CPU */ alc5505_coef_set(codec, 0x6230, 0xfc0d4011); /* Disable Input OP */ alc5505_coef_set(codec, 0x61b4, 0x040a2b03); /* Stop PLL2 */ alc5505_coef_set(codec, 0x61b0, 0x00005b17); /* Stop PLL1 */ alc5505_coef_set(codec, 0x61b8, 0x04133303); /* Stop PLL3 */ val = alc5505_coef_get(codec, 0x6220); alc5505_coef_set(codec, 0x6220, (val | 0x3000)); /* switch Ringbuffer clock to DBUS clock */ } static void alc5505_dsp_back_from_halt(struct hda_codec *codec) { alc5505_coef_set(codec, 0x61b8, 0x04133302); alc5505_coef_set(codec, 0x61b0, 0x00005b16); alc5505_coef_set(codec, 0x61b4, 0x040a2b02); alc5505_coef_set(codec, 0x6230, 0xf80d4011); alc5505_coef_set(codec, 0x6220, 0x2002010f); alc5505_coef_set(codec, 0x880c, 0x00000004); } static void alc5505_dsp_init(struct hda_codec *codec) { unsigned int val; alc5505_dsp_halt(codec); alc5505_dsp_back_from_halt(codec); alc5505_coef_set(codec, 0x61b0, 0x5b14); /* PLL1 control */ alc5505_coef_set(codec, 0x61b0, 0x5b16); alc5505_coef_set(codec, 0x61b4, 0x04132b00); /* PLL2 control */ alc5505_coef_set(codec, 0x61b4, 0x04132b02); alc5505_coef_set(codec, 0x61b8, 0x041f3300); /* PLL3 control*/ alc5505_coef_set(codec, 0x61b8, 0x041f3302); snd_hda_codec_write(codec, 0x51, 0, AC_VERB_SET_CODEC_RESET, 0); /* Function reset */ alc5505_coef_set(codec, 0x61b8, 0x041b3302); alc5505_coef_set(codec, 0x61b8, 0x04173302); alc5505_coef_set(codec, 0x61b8, 0x04163302); alc5505_coef_set(codec, 0x8800, 0x348b328b); /* DRAM control */ alc5505_coef_set(codec, 0x8808, 0x00020022); /* DRAM control */ alc5505_coef_set(codec, 0x8818, 0x00000400); /* DRAM control */ val = alc5505_coef_get(codec, 0x6200) >> 16; /* Read revision ID */ if (val <= 3) alc5505_coef_set(codec, 0x6220, 0x2002010f); /* I/O PAD Configuration */ else alc5505_coef_set(codec, 0x6220, 0x6002018f); alc5505_coef_set(codec, 0x61ac, 0x055525f0); /**/ alc5505_coef_set(codec, 0x61c0, 0x12230080); /* Clock control */ alc5505_coef_set(codec, 0x61b4, 0x040e2b02); /* PLL2 control */ alc5505_coef_set(codec, 0x61bc, 0x010234f8); /* OSC Control */ alc5505_coef_set(codec, 0x880c, 0x00000004); /* DRAM Function control */ alc5505_coef_set(codec, 0x880c, 0x00000003); alc5505_coef_set(codec, 0x880c, 0x00000010); #ifdef HALT_REALTEK_ALC5505 alc5505_dsp_halt(codec); #endif } #ifdef HALT_REALTEK_ALC5505 #define alc5505_dsp_suspend(codec) do { } while (0) /* NOP */ #define alc5505_dsp_resume(codec) do { } while (0) /* NOP */ #else #define alc5505_dsp_suspend(codec) alc5505_dsp_halt(codec) #define alc5505_dsp_resume(codec) alc5505_dsp_back_from_halt(codec) #endif static int alc269_suspend(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (spec->has_alc5505_dsp) alc5505_dsp_suspend(codec); return alc_suspend(codec); } static int alc269_resume(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; if (spec->codec_variant == ALC269_TYPE_ALC269VB) alc269vb_toggle_power_output(codec, 0); if (spec->codec_variant == ALC269_TYPE_ALC269VB && (alc_get_coef0(codec) & 0x00ff) == 0x018) { msleep(150); } codec->patch_ops.init(codec); if (spec->codec_variant == ALC269_TYPE_ALC269VB) alc269vb_toggle_power_output(codec, 1); if (spec->codec_variant == ALC269_TYPE_ALC269VB && (alc_get_coef0(codec) & 0x00ff) == 0x017) { msleep(200); } snd_hda_regmap_sync(codec); hda_call_check_power_status(codec, 0x01); /* on some machine, the BIOS will clear the codec gpio data when enter * suspend, and won't restore the data after resume, so we restore it * in the driver. */ if (spec->gpio_data) alc_write_gpio_data(codec); if (spec->has_alc5505_dsp) alc5505_dsp_resume(codec); return 0; } static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; } static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21); unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19); if (cfg_headphone && cfg_headset_mic == 0x411111f0) snd_hda_codec_set_pincfg(codec, 0x19, (cfg_headphone & ~AC_DEFCFG_DEVICE) | (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT)); } static void alc269_fixup_hweq(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_INIT) alc_update_coef_idx(codec, 0x1e, 0, 0x80); } static void alc269_fixup_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; } static void alc271_fixup_dmic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const struct hda_verb verbs[] = { {0x20, AC_VERB_SET_COEF_INDEX, 0x0d}, {0x20, AC_VERB_SET_PROC_COEF, 0x4000}, {} }; unsigned int cfg; if (strcmp(codec->core.chip_name, "ALC271X") && strcmp(codec->core.chip_name, "ALC269VB")) return; cfg = snd_hda_codec_get_pincfg(codec, 0x12); if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED) snd_hda_sequence_write(codec, verbs); } /* Fix the speaker amp after resume, etc */ static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_INIT) alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000); } static void alc269_fixup_pcm_44k(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action != HDA_FIXUP_ACT_PROBE) return; /* Due to a hardware problem on Lenovo Ideadpad, we need to * fix the sample rate of analog I/O to 44.1kHz */ spec->gen.stream_analog_playback = &alc269_44k_pcm_analog_playback; spec->gen.stream_analog_capture = &alc269_44k_pcm_analog_capture; } static void alc269_fixup_stereo_dmic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* The digital-mic unit sends PDM (differential signal) instead of * the standard PCM, thus you can't record a valid mono stream as is. * Below is a workaround specific to ALC269 to control the dmic * signal source as mono. */ if (action == HDA_FIXUP_ACT_INIT) alc_update_coef_idx(codec, 0x07, 0, 0x80); } static void alc269_quanta_automute(struct hda_codec *codec) { snd_hda_gen_update_outputs(codec); alc_write_coef_idx(codec, 0x0c, 0x680); alc_write_coef_idx(codec, 0x0c, 0x480); } static void alc269_fixup_quanta_mute(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action != HDA_FIXUP_ACT_PROBE) return; spec->gen.automute_hook = alc269_quanta_automute; } static void alc269_x101_hp_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; int vref; msleep(200); snd_hda_gen_hp_automute(codec, jack); vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0; msleep(100); snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, vref); msleep(500); snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, vref); } /* * Magic sequence to make Huawei Matebook X right speaker working (bko#197801) */ struct hda_alc298_mbxinit { unsigned char value_0x23; unsigned char value_0x25; }; static void alc298_huawei_mbx_stereo_seq(struct hda_codec *codec, const struct hda_alc298_mbxinit *initval, bool first) { snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x0); alc_write_coef_idx(codec, 0x26, 0xb000); if (first) snd_hda_codec_write(codec, 0x21, 0, AC_VERB_GET_PIN_SENSE, 0x0); snd_hda_codec_write(codec, 0x6, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80); alc_write_coef_idx(codec, 0x26, 0xf000); alc_write_coef_idx(codec, 0x23, initval->value_0x23); if (initval->value_0x23 != 0x1e) alc_write_coef_idx(codec, 0x25, initval->value_0x25); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010); } static void alc298_fixup_huawei_mbx_stereo(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* Initialization magic */ static const struct hda_alc298_mbxinit dac_init[] = { {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0x00}, {0x0f, 0x00}, {0x10, 0x00}, {0x1a, 0x40}, {0x1b, 0x82}, {0x1c, 0x00}, {0x1d, 0x00}, {0x1e, 0x00}, {0x1f, 0x00}, {0x20, 0xc2}, {0x21, 0xc8}, {0x22, 0x26}, {0x23, 0x24}, {0x27, 0xff}, {0x28, 0xff}, {0x29, 0xff}, {0x2a, 0x8f}, {0x2b, 0x02}, {0x2c, 0x48}, {0x2d, 0x34}, {0x2e, 0x00}, {0x2f, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, {0x34, 0x00}, {0x35, 0x01}, {0x36, 0x93}, {0x37, 0x0c}, {0x38, 0x00}, {0x39, 0x00}, {0x3a, 0xf8}, {0x38, 0x80}, {} }; const struct hda_alc298_mbxinit *seq; if (action != HDA_FIXUP_ACT_INIT) return; /* Start */ snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x00); snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80); alc_write_coef_idx(codec, 0x26, 0xf000); alc_write_coef_idx(codec, 0x22, 0x31); alc_write_coef_idx(codec, 0x23, 0x0b); alc_write_coef_idx(codec, 0x25, 0x00); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010); for (seq = dac_init; seq->value_0x23; seq++) alc298_huawei_mbx_stereo_seq(codec, seq, seq == dac_init); } static void alc269_fixup_x101_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_automute_hook = alc269_x101_hp_automute_hook; } } static void alc_update_vref_led(struct hda_codec *codec, hda_nid_t pin, bool polarity, bool on) { unsigned int pinval; if (!pin) return; if (polarity) on = !on; pinval = snd_hda_codec_get_pin_target(codec, pin); pinval &= ~AC_PINCTL_VREFEN; pinval |= on ? AC_PINCTL_VREF_80 : AC_PINCTL_VREF_HIZ; /* temporarily power up/down for setting VREF */ snd_hda_power_up_pm(codec); snd_hda_set_pin_ctl_cache(codec, pin, pinval); snd_hda_power_down_pm(codec); } /* update mute-LED according to the speaker mute state via mic VREF pin */ static int vref_mute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_vref_led(codec, spec->mute_led_nid, spec->mute_led_polarity, brightness); return 0; } /* Make sure the led works even in runtime suspend */ static unsigned int led_power_filter(struct hda_codec *codec, hda_nid_t nid, unsigned int power_state) { struct alc_spec *spec = codec->spec; if (power_state != AC_PWRST_D3 || nid == 0 || (nid != spec->mute_led_nid && nid != spec->cap_mute_led_nid)) return power_state; /* Set pin ctl again, it might have just been set to 0 */ snd_hda_set_pin_ctl(codec, nid, snd_hda_codec_get_pin_target(codec, nid)); return snd_hda_gen_path_power_filter(codec, nid, power_state); } static void alc269_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; const struct dmi_device *dev = NULL; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) { int pol, pin; if (sscanf(dev->name, "HP_Mute_LED_%d_%x", &pol, &pin) != 2) continue; if (pin < 0x0a || pin >= 0x10) break; spec->mute_led_polarity = pol; spec->mute_led_nid = pin - 0x0a + 0x18; snd_hda_gen_add_mute_led_cdev(codec, vref_mute_led_set); codec->power_filter = led_power_filter; codec_dbg(codec, "Detected mute LED for %x:%d\n", spec->mute_led_nid, spec->mute_led_polarity); break; } } static void alc269_fixup_hp_mute_led_micx(struct hda_codec *codec, const struct hda_fixup *fix, int action, hda_nid_t pin) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 0; spec->mute_led_nid = pin; snd_hda_gen_add_mute_led_cdev(codec, vref_mute_led_set); codec->power_filter = led_power_filter; } } static void alc269_fixup_hp_mute_led_mic1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc269_fixup_hp_mute_led_micx(codec, fix, action, 0x18); } static void alc269_fixup_hp_mute_led_mic2(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc269_fixup_hp_mute_led_micx(codec, fix, action, 0x19); } static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc269_fixup_hp_mute_led_micx(codec, fix, action, 0x1b); } /* update LED status via GPIO */ static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask, int polarity, bool enabled) { if (polarity) enabled = !enabled; alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */ } /* turn on/off mute LED via GPIO per vmaster hook */ static int gpio_mute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_gpio_led(codec, spec->gpio_mute_led_mask, spec->mute_led_polarity, !brightness); return 0; } /* turn on/off mic-mute LED via GPIO per capture hook */ static int micmute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_gpio_led(codec, spec->gpio_mic_led_mask, spec->micmute_led_polarity, !brightness); return 0; } /* setup mute and mic-mute GPIO bits, add hooks appropriately */ static void alc_fixup_hp_gpio_led(struct hda_codec *codec, int action, unsigned int mute_mask, unsigned int micmute_mask) { struct alc_spec *spec = codec->spec; alc_fixup_gpio(codec, action, mute_mask | micmute_mask); if (action != HDA_FIXUP_ACT_PRE_PROBE) return; if (mute_mask) { spec->gpio_mute_led_mask = mute_mask; snd_hda_gen_add_mute_led_cdev(codec, gpio_mute_led_set); } if (micmute_mask) { spec->gpio_mic_led_mask = micmute_mask; snd_hda_gen_add_micmute_led_cdev(codec, micmute_led_set); } } static void alc236_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0x02, 0x01); } static void alc269_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10); } static void alc285_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0x04, 0x01); } static void alc286_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0x02, 0x20); } static void alc287_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0x10, 0); } static void alc245_fixup_hp_gpio_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); } /* turn on/off mic-mute LED per capture hook via VREF change */ static int vref_micmute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_vref_led(codec, spec->cap_mute_led_nid, spec->micmute_led_polarity, brightness); return 0; } static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0x08, 0); if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* Like hp_gpio_mic1_led, but also needs GPIO4 low to * enable headphone amp */ spec->gpio_mask |= 0x10; spec->gpio_dir |= 0x10; spec->cap_mute_led_nid = 0x18; snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set); codec->power_filter = led_power_filter; } } static void alc280_fixup_hp_gpio4(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0x08, 0); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->cap_mute_led_nid = 0x18; snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set); codec->power_filter = led_power_filter; } } /* HP Spectre x360 14 model needs a unique workaround for enabling the amp; * it needs to toggle the GPIO0 once on and off at each time (bko#210633) */ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->gpio_mask |= 0x01; spec->gpio_dir |= 0x01; break; case HDA_FIXUP_ACT_INIT: /* need to toggle GPIO to enable the amp */ alc_update_gpio_data(codec, 0x01, true); msleep(100); alc_update_gpio_data(codec, 0x01, false); break; } } /* toggle GPIO2 at each time stream is started; we use PREPARE state instead */ static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream, int action) { switch (action) { case HDA_GEN_PCM_ACT_PREPARE: alc_update_gpio_data(codec, 0x04, true); break; case HDA_GEN_PCM_ACT_CLEANUP: alc_update_gpio_data(codec, 0x04, false); break; } } static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PROBE) { spec->gpio_mask |= 0x04; spec->gpio_dir |= 0x04; spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook; } } static void alc_update_coef_led(struct hda_codec *codec, struct alc_coef_led *led, bool polarity, bool on) { if (polarity) on = !on; /* temporarily power up/down for setting COEF bit */ alc_update_coef_idx(codec, led->idx, led->mask, on ? led->on : led->off); } /* update mute-LED according to the speaker mute state via COEF bit */ static int coef_mute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_coef_led(codec, &spec->mute_led_coef, spec->mute_led_polarity, brightness); return 0; } static void alc285_fixup_hp_mute_led_coefbit(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 0; spec->mute_led_coef.idx = 0x0b; spec->mute_led_coef.mask = 1 << 3; spec->mute_led_coef.on = 1 << 3; spec->mute_led_coef.off = 0; snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); } } static void alc236_fixup_hp_mute_led_coefbit(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 0; spec->mute_led_coef.idx = 0x34; spec->mute_led_coef.mask = 1 << 5; spec->mute_led_coef.on = 0; spec->mute_led_coef.off = 1 << 5; snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); } } static void alc236_fixup_hp_mute_led_coefbit2(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 0; spec->mute_led_coef.idx = 0x07; spec->mute_led_coef.mask = 1; spec->mute_led_coef.on = 1; spec->mute_led_coef.off = 0; snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); } } static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 0; spec->mute_led_coef.idx = 0x0b; spec->mute_led_coef.mask = 3 << 2; spec->mute_led_coef.on = 2 << 2; spec->mute_led_coef.off = 1 << 2; snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); } } /* turn on/off mic-mute LED per capture hook by coef bit */ static int coef_micmute_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent); struct alc_spec *spec = codec->spec; alc_update_coef_led(codec, &spec->mic_led_coef, spec->micmute_led_polarity, brightness); return 0; } static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mic_led_coef.idx = 0x19; spec->mic_led_coef.mask = 1 << 13; spec->mic_led_coef.on = 1 << 13; spec->mic_led_coef.off = 0; snd_hda_gen_add_micmute_led_cdev(codec, coef_micmute_led_set); } } static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); } static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mic_led_coef.idx = 0x35; spec->mic_led_coef.mask = 3 << 2; spec->mic_led_coef.on = 2 << 2; spec->mic_led_coef.off = 1 << 2; snd_hda_gen_add_micmute_led_cdev(codec, coef_micmute_led_set); } } static void alc285_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc285_fixup_hp_mute_led_coefbit(codec, fix, action); alc285_fixup_hp_coef_micmute_led(codec, fix, action); } static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc285_fixup_hp_mute_led_coefbit(codec, fix, action); alc285_fixup_hp_gpio_micmute_led(codec, fix, action); } static void alc236_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc236_fixup_hp_mute_led_coefbit(codec, fix, action); alc236_fixup_hp_coef_micmute_led(codec, fix, action); } static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->cap_mute_led_nid = 0x1a; snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set); codec->power_filter = led_power_filter; } } static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc236_fixup_hp_mute_led_coefbit(codec, fix, action); alc236_fixup_hp_micmute_led_vref(codec, fix, action); } static inline void alc298_samsung_write_coef_pack(struct hda_codec *codec, const unsigned short coefs[2]) { alc_write_coef_idx(codec, 0x23, coefs[0]); alc_write_coef_idx(codec, 0x25, coefs[1]); alc_write_coef_idx(codec, 0x26, 0xb011); } struct alc298_samsung_amp_desc { unsigned char nid; unsigned short init_seq[2][2]; }; static void alc298_fixup_samsung_amp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { int i, j; static const unsigned short init_seq[][2] = { { 0x19, 0x00 }, { 0x20, 0xc0 }, { 0x22, 0x44 }, { 0x23, 0x08 }, { 0x24, 0x85 }, { 0x25, 0x41 }, { 0x35, 0x40 }, { 0x36, 0x01 }, { 0x38, 0x81 }, { 0x3a, 0x03 }, { 0x3b, 0x81 }, { 0x40, 0x3e }, { 0x41, 0x07 }, { 0x400, 0x1 } }; static const struct alc298_samsung_amp_desc amps[] = { { 0x3a, { { 0x18, 0x1 }, { 0x26, 0x0 } } }, { 0x39, { { 0x18, 0x2 }, { 0x26, 0x1 } } } }; if (action != HDA_FIXUP_ACT_INIT) return; for (i = 0; i < ARRAY_SIZE(amps); i++) { alc_write_coef_idx(codec, 0x22, amps[i].nid); for (j = 0; j < ARRAY_SIZE(amps[i].init_seq); j++) alc298_samsung_write_coef_pack(codec, amps[i].init_seq[j]); for (j = 0; j < ARRAY_SIZE(init_seq); j++) alc298_samsung_write_coef_pack(codec, init_seq[j]); } } struct alc298_samsung_v2_amp_desc { unsigned short nid; int init_seq_size; unsigned short init_seq[18][2]; }; static const struct alc298_samsung_v2_amp_desc alc298_samsung_v2_amp_desc_tbl[] = { { 0x38, 18, { { 0x23e1, 0x0000 }, { 0x2012, 0x006f }, { 0x2014, 0x0000 }, { 0x201b, 0x0001 }, { 0x201d, 0x0001 }, { 0x201f, 0x00fe }, { 0x2021, 0x0000 }, { 0x2022, 0x0010 }, { 0x203d, 0x0005 }, { 0x203f, 0x0003 }, { 0x2050, 0x002c }, { 0x2076, 0x000e }, { 0x207c, 0x004a }, { 0x2081, 0x0003 }, { 0x2399, 0x0003 }, { 0x23a4, 0x00b5 }, { 0x23a5, 0x0001 }, { 0x23ba, 0x0094 } }}, { 0x39, 18, { { 0x23e1, 0x0000 }, { 0x2012, 0x006f }, { 0x2014, 0x0000 }, { 0x201b, 0x0002 }, { 0x201d, 0x0002 }, { 0x201f, 0x00fd }, { 0x2021, 0x0001 }, { 0x2022, 0x0010 }, { 0x203d, 0x0005 }, { 0x203f, 0x0003 }, { 0x2050, 0x002c }, { 0x2076, 0x000e }, { 0x207c, 0x004a }, { 0x2081, 0x0003 }, { 0x2399, 0x0003 }, { 0x23a4, 0x00b5 }, { 0x23a5, 0x0001 }, { 0x23ba, 0x0094 } }}, { 0x3c, 15, { { 0x23e1, 0x0000 }, { 0x2012, 0x006f }, { 0x2014, 0x0000 }, { 0x201b, 0x0001 }, { 0x201d, 0x0001 }, { 0x201f, 0x00fe }, { 0x2021, 0x0000 }, { 0x2022, 0x0010 }, { 0x203d, 0x0005 }, { 0x203f, 0x0003 }, { 0x2050, 0x002c }, { 0x2076, 0x000e }, { 0x207c, 0x004a }, { 0x2081, 0x0003 }, { 0x23ba, 0x008d } }}, { 0x3d, 15, { { 0x23e1, 0x0000 }, { 0x2012, 0x006f }, { 0x2014, 0x0000 }, { 0x201b, 0x0002 }, { 0x201d, 0x0002 }, { 0x201f, 0x00fd }, { 0x2021, 0x0001 }, { 0x2022, 0x0010 }, { 0x203d, 0x0005 }, { 0x203f, 0x0003 }, { 0x2050, 0x002c }, { 0x2076, 0x000e }, { 0x207c, 0x004a }, { 0x2081, 0x0003 }, { 0x23ba, 0x008d } }} }; static void alc298_samsung_v2_enable_amps(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; static const unsigned short enable_seq[][2] = { { 0x203a, 0x0081 }, { 0x23ff, 0x0001 }, }; int i, j; for (i = 0; i < spec->num_speaker_amps; i++) { alc_write_coef_idx(codec, 0x22, alc298_samsung_v2_amp_desc_tbl[i].nid); for (j = 0; j < ARRAY_SIZE(enable_seq); j++) alc298_samsung_write_coef_pack(codec, enable_seq[j]); codec_dbg(codec, "alc298_samsung_v2: Enabled speaker amp 0x%02x\n", alc298_samsung_v2_amp_desc_tbl[i].nid); } } static void alc298_samsung_v2_disable_amps(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; static const unsigned short disable_seq[][2] = { { 0x23ff, 0x0000 }, { 0x203a, 0x0080 }, }; int i, j; for (i = 0; i < spec->num_speaker_amps; i++) { alc_write_coef_idx(codec, 0x22, alc298_samsung_v2_amp_desc_tbl[i].nid); for (j = 0; j < ARRAY_SIZE(disable_seq); j++) alc298_samsung_write_coef_pack(codec, disable_seq[j]); codec_dbg(codec, "alc298_samsung_v2: Disabled speaker amp 0x%02x\n", alc298_samsung_v2_amp_desc_tbl[i].nid); } } static void alc298_samsung_v2_playback_hook(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream, int action) { /* Dynamically enable/disable speaker amps before and after playback */ if (action == HDA_GEN_PCM_ACT_OPEN) alc298_samsung_v2_enable_amps(codec); if (action == HDA_GEN_PCM_ACT_CLOSE) alc298_samsung_v2_disable_amps(codec); } static void alc298_samsung_v2_init_amps(struct hda_codec *codec, int num_speaker_amps) { struct alc_spec *spec = codec->spec; int i, j; /* Set spec's num_speaker_amps before doing anything else */ spec->num_speaker_amps = num_speaker_amps; /* Disable speaker amps before init to prevent any physical damage */ alc298_samsung_v2_disable_amps(codec); /* Initialize the speaker amps */ for (i = 0; i < spec->num_speaker_amps; i++) { alc_write_coef_idx(codec, 0x22, alc298_samsung_v2_amp_desc_tbl[i].nid); for (j = 0; j < alc298_samsung_v2_amp_desc_tbl[i].init_seq_size; j++) { alc298_samsung_write_coef_pack(codec, alc298_samsung_v2_amp_desc_tbl[i].init_seq[j]); } alc_write_coef_idx(codec, 0x89, 0x0); codec_dbg(codec, "alc298_samsung_v2: Initialized speaker amp 0x%02x\n", alc298_samsung_v2_amp_desc_tbl[i].nid); } /* register hook to enable speaker amps only when they are needed */ spec->gen.pcm_playback_hook = alc298_samsung_v2_playback_hook; } static void alc298_fixup_samsung_amp_v2_2_amps(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PROBE) alc298_samsung_v2_init_amps(codec, 2); } static void alc298_fixup_samsung_amp_v2_4_amps(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PROBE) alc298_samsung_v2_init_amps(codec, 4); } #if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { struct alc_spec *spec = codec->spec; /* GPIO2 just toggles on a keypress/keyrelease cycle. Therefore send both key on and key off event for every interrupt. */ input_report_key(spec->kb_dev, spec->alc_mute_keycode_map[ALC_KEY_MICMUTE_INDEX], 1); input_sync(spec->kb_dev); input_report_key(spec->kb_dev, spec->alc_mute_keycode_map[ALC_KEY_MICMUTE_INDEX], 0); input_sync(spec->kb_dev); } static int alc_register_micmute_input_device(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; int i; spec->kb_dev = input_allocate_device(); if (!spec->kb_dev) { codec_err(codec, "Out of memory (input_allocate_device)\n"); return -ENOMEM; } spec->alc_mute_keycode_map[ALC_KEY_MICMUTE_INDEX] = KEY_MICMUTE; spec->kb_dev->name = "Microphone Mute Button"; spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY); spec->kb_dev->keycodesize = sizeof(spec->alc_mute_keycode_map[0]); spec->kb_dev->keycodemax = ARRAY_SIZE(spec->alc_mute_keycode_map); spec->kb_dev->keycode = spec->alc_mute_keycode_map; for (i = 0; i < ARRAY_SIZE(spec->alc_mute_keycode_map); i++) set_bit(spec->alc_mute_keycode_map[i], spec->kb_dev->keybit); if (input_register_device(spec->kb_dev)) { codec_err(codec, "input_register_device failed\n"); input_free_device(spec->kb_dev); spec->kb_dev = NULL; return -ENOMEM; } return 0; } /* GPIO1 = set according to SKU external amp * GPIO2 = mic mute hotkey * GPIO3 = mute LED * GPIO4 = mic mute LED */ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; if (alc_register_micmute_input_device(codec) != 0) return; spec->gpio_mask |= 0x06; spec->gpio_dir |= 0x02; spec->gpio_data |= 0x02; snd_hda_codec_write_cache(codec, codec->core.afg, 0, AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x04); snd_hda_jack_detect_enable_callback(codec, codec->core.afg, gpio2_mic_hotkey_event); return; } if (!spec->kb_dev) return; switch (action) { case HDA_FIXUP_ACT_FREE: input_unregister_device(spec->kb_dev); spec->kb_dev = NULL; } } /* Line2 = mic mute hotkey * GPIO2 = mic mute LED */ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; if (alc_register_micmute_input_device(codec) != 0) return; snd_hda_jack_detect_enable_callback(codec, 0x1b, gpio2_mic_hotkey_event); return; } if (!spec->kb_dev) return; switch (action) { case HDA_FIXUP_ACT_FREE: input_unregister_device(spec->kb_dev); spec->kb_dev = NULL; } } #else /* INPUT */ #define alc280_fixup_hp_gpio2_mic_hotkey NULL #define alc233_fixup_lenovo_line2_mic_hotkey NULL #endif /* INPUT */ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc269_fixup_hp_mute_led_micx(codec, fix, action, 0x1a); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->cap_mute_led_nid = 0x18; snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set); } } static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) { if (delay <= 0) delay = 75; snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(delay); snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(delay); } static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay) { if (delay <= 0) delay = 75; snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); msleep(delay); snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); msleep(delay); } static const struct coef_fw alc225_pre_hsmode[] = { UPDATE_COEF(0x4a, 1<<8, 0), UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), UPDATE_COEF(0x63, 3<<14, 3<<14), UPDATE_COEF(0x4a, 3<<4, 2<<4), UPDATE_COEF(0x4a, 3<<10, 3<<10), UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10), UPDATE_COEF(0x4a, 3<<10, 0), {} }; static void alc_headset_mode_unplugged(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; static const struct coef_fw coef0255[] = { WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ {} }; static const struct coef_fw coef0256[] = { WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ WRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ {} }; static const struct coef_fw coef0233[] = { WRITE_COEF(0x1b, 0x0c0b), WRITE_COEF(0x45, 0xc429), UPDATE_COEF(0x35, 0x4000, 0), WRITE_COEF(0x06, 0x2104), WRITE_COEF(0x1a, 0x0001), WRITE_COEF(0x26, 0x0004), WRITE_COEF(0x32, 0x42a3), {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xc400), UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; static const struct coef_fw coef0298[] = { UPDATE_COEF(0x19, 0x1300, 0x0300), {} }; static const struct coef_fw coef0292[] = { WRITE_COEF(0x76, 0x000e), WRITE_COEF(0x6c, 0x2400), WRITE_COEF(0x18, 0x7308), WRITE_COEF(0x6b, 0xc429), {} }; static const struct coef_fw coef0293[] = { UPDATE_COEF(0x10, 7<<8, 6<<8), /* SET Line1 JD to 0 */ UPDATE_COEFEX(0x57, 0x05, 1<<15|1<<13, 0x0), /* SET charge pump by verb */ UPDATE_COEFEX(0x57, 0x03, 1<<10, 1<<10), /* SET EN_OSW to 1 */ UPDATE_COEF(0x1a, 1<<3, 1<<3), /* Combo JD gating with LINE1-VREFO */ WRITE_COEF(0x45, 0xc429), /* Set to TRS type */ UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */ {} }; static const struct coef_fw coef0668[] = { WRITE_COEF(0x15, 0x0d40), WRITE_COEF(0xb7, 0x802b), {} }; static const struct coef_fw coef0225[] = { UPDATE_COEF(0x63, 3<<14, 0), {} }; static const struct coef_fw coef0274[] = { UPDATE_COEF(0x4a, 0x0100, 0), UPDATE_COEFEX(0x57, 0x05, 0x4000, 0), UPDATE_COEF(0x6b, 0xf000, 0x5000), UPDATE_COEF(0x4a, 0x0010, 0), UPDATE_COEF(0x4a, 0x0c00, 0x0c00), WRITE_COEF(0x45, 0x5289), UPDATE_COEF(0x4a, 0x0c00, 0), {} }; if (spec->no_internal_mic_pin) { alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12); return; } switch (codec->core.vendor_id) { case 0x10ec0255: alc_process_coef_fw(codec, coef0255); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_hp_mute_disable(codec, 75); alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_process_coef_fw(codec, coef0274); break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); break; case 0x10ec0286: case 0x10ec0288: alc_process_coef_fw(codec, coef0288); break; case 0x10ec0298: alc_process_coef_fw(codec, coef0298); alc_process_coef_fw(codec, coef0288); break; case 0x10ec0292: alc_process_coef_fw(codec, coef0292); break; case 0x10ec0293: alc_process_coef_fw(codec, coef0293); break; case 0x10ec0668: alc_process_coef_fw(codec, coef0668); break; case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_hp_mute_disable(codec, 75); alc_process_coef_fw(codec, alc225_pre_hsmode); alc_process_coef_fw(codec, coef0225); break; case 0x10ec0867: alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); break; } codec_dbg(codec, "Headset jack set to unplugged mode.\n"); } static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, hda_nid_t mic_pin) { static const struct coef_fw coef0255[] = { WRITE_COEFEX(0x57, 0x03, 0x8aa6), WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; static const struct coef_fw coef0256[] = { UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEFEX(0x57, 0x03, 0x09a3), WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */ {} }; static const struct coef_fw coef0233[] = { UPDATE_COEF(0x35, 0, 1<<14), WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x1a, 0x0021), WRITE_COEF(0x26, 0x008c), {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0x00c0, 0), UPDATE_COEF(0x50, 0x2000, 0), UPDATE_COEF(0x56, 0x0006, 0), UPDATE_COEF(0x4f, 0xfcc0, 0xc400), UPDATE_COEF(0x66, 0x0008, 0x0008), UPDATE_COEF(0x67, 0x2000, 0x2000), {} }; static const struct coef_fw coef0292[] = { WRITE_COEF(0x19, 0xa208), WRITE_COEF(0x2e, 0xacf0), {} }; static const struct coef_fw coef0293[] = { UPDATE_COEFEX(0x57, 0x05, 0, 1<<15|1<<13), /* SET charge pump by verb */ UPDATE_COEFEX(0x57, 0x03, 1<<10, 0), /* SET EN_OSW to 0 */ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */ {} }; static const struct coef_fw coef0688[] = { WRITE_COEF(0xb7, 0x802b), WRITE_COEF(0xb5, 0x1040), UPDATE_COEF(0xc3, 0, 1<<12), {} }; static const struct coef_fw coef0225[] = { UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), UPDATE_COEF(0x4a, 3<<4, 2<<4), UPDATE_COEF(0x63, 3<<14, 0), {} }; static const struct coef_fw coef0274[] = { UPDATE_COEFEX(0x57, 0x05, 0x4000, 0x4000), UPDATE_COEF(0x4a, 0x0010, 0), UPDATE_COEF(0x6b, 0xf000, 0), {} }; switch (codec->core.vendor_id) { case 0x10ec0255: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0255); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0256); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_write_coef_idx(codec, 0x45, 0x4689); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0274); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0233: case 0x10ec0283: alc_write_coef_idx(codec, 0x45, 0xc429); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0233); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0286: case 0x10ec0288: case 0x10ec0298: snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0288); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0292: snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0292); break; case 0x10ec0293: /* Set to TRS mode */ alc_write_coef_idx(codec, 0x45, 0xc429); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0293); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0867: alc_update_coefex_idx(codec, 0x57, 0x5, 0, 1<<14); fallthrough; case 0x10ec0221: case 0x10ec0662: snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0668: alc_write_coef_idx(codec, 0x11, 0x0001); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0688); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_process_coef_fw(codec, alc225_pre_hsmode); alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0225); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; } codec_dbg(codec, "Headset jack set to mic-in mode.\n"); } static void alc_headset_mode_default(struct hda_codec *codec) { static const struct coef_fw coef0225[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x30<<10), UPDATE_COEF(0x45, 0x3f<<10, 0x31<<10), UPDATE_COEF(0x49, 3<<8, 0<<8), UPDATE_COEF(0x4a, 3<<4, 3<<4), UPDATE_COEF(0x63, 3<<14, 0), UPDATE_COEF(0x67, 0xf000, 0x3000), {} }; static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xc089), WRITE_COEF(0x45, 0xc489), WRITE_COEFEX(0x57, 0x03, 0x8ea6), WRITE_COEF(0x49, 0x0049), {} }; static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xc489), WRITE_COEFEX(0x57, 0x03, 0x0da3), WRITE_COEF(0x49, 0x0049), UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ WRITE_COEF(0x06, 0x6100), {} }; static const struct coef_fw coef0233[] = { WRITE_COEF(0x06, 0x2100), WRITE_COEF(0x32, 0x4ea3), {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xc400), /* Set to TRS type */ UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; static const struct coef_fw coef0292[] = { WRITE_COEF(0x76, 0x000e), WRITE_COEF(0x6c, 0x2400), WRITE_COEF(0x6b, 0xc429), WRITE_COEF(0x18, 0x7308), {} }; static const struct coef_fw coef0293[] = { UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */ WRITE_COEF(0x45, 0xC429), /* Set to TRS type */ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */ {} }; static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0041), WRITE_COEF(0x15, 0x0d40), WRITE_COEF(0xb7, 0x802b), {} }; static const struct coef_fw coef0274[] = { WRITE_COEF(0x45, 0x4289), UPDATE_COEF(0x4a, 0x0010, 0x0010), UPDATE_COEF(0x6b, 0x0f00, 0), UPDATE_COEF(0x49, 0x0300, 0x0300), {} }; switch (codec->core.vendor_id) { case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_process_coef_fw(codec, alc225_pre_hsmode); alc_process_coef_fw(codec, coef0225); alc_hp_enable_unmute(codec, 75); break; case 0x10ec0255: alc_process_coef_fw(codec, coef0255); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x45, 0xc089); msleep(50); alc_process_coef_fw(codec, coef0256); alc_hp_enable_unmute(codec, 75); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_process_coef_fw(codec, coef0274); break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); break; case 0x10ec0286: case 0x10ec0288: case 0x10ec0298: alc_process_coef_fw(codec, coef0288); break; case 0x10ec0292: alc_process_coef_fw(codec, coef0292); break; case 0x10ec0293: alc_process_coef_fw(codec, coef0293); break; case 0x10ec0668: alc_process_coef_fw(codec, coef0688); break; case 0x10ec0867: alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); break; } codec_dbg(codec, "Headset jack set to headphone (default) mode.\n"); } /* Iphone type */ static void alc_headset_mode_ctia(struct hda_codec *codec) { int val; static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ WRITE_COEF(0x1b, 0x0c2b), WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ WRITE_COEF(0x1b, 0x0e6b), {} }; static const struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xd429), WRITE_COEF(0x1b, 0x0c2b), WRITE_COEF(0x32, 0x4ea3), {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; static const struct coef_fw coef0292[] = { WRITE_COEF(0x6b, 0xd429), WRITE_COEF(0x76, 0x0008), WRITE_COEF(0x18, 0x7388), {} }; static const struct coef_fw coef0293[] = { WRITE_COEF(0x45, 0xd429), /* Set to ctia type */ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */ {} }; static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0x15, 0x0d60), WRITE_COEF(0xc3, 0x0000), {} }; static const struct coef_fw coef0225_1[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), UPDATE_COEF(0x63, 3<<14, 2<<14), {} }; static const struct coef_fw coef0225_2[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), UPDATE_COEF(0x63, 3<<14, 1<<14), {} }; switch (codec->core.vendor_id) { case 0x10ec0255: alc_process_coef_fw(codec, coef0255); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_process_coef_fw(codec, coef0256); alc_hp_enable_unmute(codec, 75); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_write_coef_idx(codec, 0x45, 0xd689); break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); break; case 0x10ec0298: val = alc_read_coef_idx(codec, 0x50); if (val & (1 << 12)) { alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020); alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400); msleep(300); } else { alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010); alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400); msleep(300); } break; case 0x10ec0286: case 0x10ec0288: alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400); msleep(300); alc_process_coef_fw(codec, coef0288); break; case 0x10ec0292: alc_process_coef_fw(codec, coef0292); break; case 0x10ec0293: alc_process_coef_fw(codec, coef0293); break; case 0x10ec0668: alc_process_coef_fw(codec, coef0688); break; case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: val = alc_read_coef_idx(codec, 0x45); if (val & (1 << 9)) alc_process_coef_fw(codec, coef0225_2); else alc_process_coef_fw(codec, coef0225_1); alc_hp_enable_unmute(codec, 75); break; case 0x10ec0867: alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); break; } codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n"); } /* Nokia type */ static void alc_headset_mode_omtp(struct hda_codec *codec) { static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ WRITE_COEF(0x1b, 0x0c2b), WRITE_COEFEX(0x57, 0x03, 0x8ea6), {} }; static const struct coef_fw coef0256[] = { WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ WRITE_COEF(0x1b, 0x0e6b), {} }; static const struct coef_fw coef0233[] = { WRITE_COEF(0x45, 0xe429), WRITE_COEF(0x1b, 0x0c2b), WRITE_COEF(0x32, 0x4ea3), {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), {} }; static const struct coef_fw coef0292[] = { WRITE_COEF(0x6b, 0xe429), WRITE_COEF(0x76, 0x0008), WRITE_COEF(0x18, 0x7388), {} }; static const struct coef_fw coef0293[] = { WRITE_COEF(0x45, 0xe429), /* Set to omtp type */ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */ {} }; static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0x15, 0x0d50), WRITE_COEF(0xc3, 0x0000), {} }; static const struct coef_fw coef0225[] = { UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10), UPDATE_COEF(0x63, 3<<14, 2<<14), {} }; switch (codec->core.vendor_id) { case 0x10ec0255: alc_process_coef_fw(codec, coef0255); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_process_coef_fw(codec, coef0256); alc_hp_enable_unmute(codec, 75); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_write_coef_idx(codec, 0x45, 0xe689); break; case 0x10ec0233: case 0x10ec0283: alc_process_coef_fw(codec, coef0233); break; case 0x10ec0298: alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);/* Headset output enable */ alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400); msleep(300); break; case 0x10ec0286: case 0x10ec0288: alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400); msleep(300); alc_process_coef_fw(codec, coef0288); break; case 0x10ec0292: alc_process_coef_fw(codec, coef0292); break; case 0x10ec0293: alc_process_coef_fw(codec, coef0293); break; case 0x10ec0668: alc_process_coef_fw(codec, coef0688); break; case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_process_coef_fw(codec, coef0225); alc_hp_enable_unmute(codec, 75); break; } codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n"); } static void alc_determine_headset_type(struct hda_codec *codec) { int val; bool is_ctia = false; struct alc_spec *spec = codec->spec; static const struct coef_fw coef0255[] = { WRITE_COEF(0x45, 0xd089), /* combo jack auto switch control(Check type)*/ WRITE_COEF(0x49, 0x0149), /* combo jack auto switch control(Vref conteol) */ {} }; static const struct coef_fw coef0288[] = { UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */ {} }; static const struct coef_fw coef0298[] = { UPDATE_COEF(0x50, 0x2000, 0x2000), UPDATE_COEF(0x56, 0x0006, 0x0006), UPDATE_COEF(0x66, 0x0008, 0), UPDATE_COEF(0x67, 0x2000, 0), UPDATE_COEF(0x19, 0x1300, 0x1300), {} }; static const struct coef_fw coef0293[] = { UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */ WRITE_COEF(0x45, 0xD429), /* Set to ctia type */ {} }; static const struct coef_fw coef0688[] = { WRITE_COEF(0x11, 0x0001), WRITE_COEF(0xb7, 0x802b), WRITE_COEF(0x15, 0x0d60), WRITE_COEF(0xc3, 0x0c00), {} }; static const struct coef_fw coef0274[] = { UPDATE_COEF(0x4a, 0x0010, 0), UPDATE_COEF(0x4a, 0x8000, 0), WRITE_COEF(0x45, 0xd289), UPDATE_COEF(0x49, 0x0300, 0x0300), {} }; if (spec->no_internal_mic_pin) { alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12); return; } switch (codec->core.vendor_id) { case 0x10ec0255: alc_process_coef_fw(codec, coef0255); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x06, 0x6104); alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); alc_process_coef_fw(codec, coef0255); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; if (!is_ctia) { alc_write_coef_idx(codec, 0x45, 0xe089); msleep(100); val = alc_read_coef_idx(codec, 0x46); if ((val & 0x0070) == 0x0070) is_ctia = false; else is_ctia = true; } alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3); alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: alc_process_coef_fw(codec, coef0274); msleep(850); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x00f0) == 0x00f0; break; case 0x10ec0233: case 0x10ec0283: alc_write_coef_idx(codec, 0x45, 0xd029); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; break; case 0x10ec0298: snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(100); snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); msleep(200); val = alc_read_coef_idx(codec, 0x50); if (val & (1 << 12)) { alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020); alc_process_coef_fw(codec, coef0288); msleep(350); val = alc_read_coef_idx(codec, 0x50); is_ctia = (val & 0x0070) == 0x0070; } else { alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010); alc_process_coef_fw(codec, coef0288); msleep(350); val = alc_read_coef_idx(codec, 0x50); is_ctia = (val & 0x0070) == 0x0070; } alc_process_coef_fw(codec, coef0298); snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); msleep(75); snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); break; case 0x10ec0286: case 0x10ec0288: alc_process_coef_fw(codec, coef0288); msleep(350); val = alc_read_coef_idx(codec, 0x50); is_ctia = (val & 0x0070) == 0x0070; break; case 0x10ec0292: alc_write_coef_idx(codec, 0x6b, 0xd429); msleep(300); val = alc_read_coef_idx(codec, 0x6c); is_ctia = (val & 0x001c) == 0x001c; break; case 0x10ec0293: alc_process_coef_fw(codec, coef0293); msleep(300); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x0070) == 0x0070; break; case 0x10ec0668: alc_process_coef_fw(codec, coef0688); msleep(300); val = alc_read_coef_idx(codec, 0xbe); is_ctia = (val & 0x1c02) == 0x1c02; break; case 0x10ec0215: case 0x10ec0225: case 0x10ec0285: case 0x10ec0295: case 0x10ec0289: case 0x10ec0299: alc_process_coef_fw(codec, alc225_pre_hsmode); alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000); val = alc_read_coef_idx(codec, 0x45); if (val & (1 << 9)) { alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10); alc_update_coef_idx(codec, 0x49, 3<<8, 2<<8); msleep(800); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x00f0) == 0x00f0; } else { alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10); alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8); msleep(800); val = alc_read_coef_idx(codec, 0x46); is_ctia = (val & 0x00f0) == 0x00f0; } if (!is_ctia) { alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10); alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8); msleep(100); val = alc_read_coef_idx(codec, 0x46); if ((val & 0x00f0) == 0x00f0) is_ctia = false; else is_ctia = true; } alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6); alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4); alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); break; case 0x10ec0867: is_ctia = true; break; } codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n", is_ctia ? "yes" : "no"); spec->current_headset_type = is_ctia ? ALC_HEADSET_TYPE_CTIA : ALC_HEADSET_TYPE_OMTP; } static void alc_update_headset_mode(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; hda_nid_t hp_pin = alc_get_hp_pin(spec); int new_headset_mode; if (!snd_hda_jack_detect(codec, hp_pin)) new_headset_mode = ALC_HEADSET_MODE_UNPLUGGED; else if (mux_pin == spec->headset_mic_pin) new_headset_mode = ALC_HEADSET_MODE_HEADSET; else if (mux_pin == spec->headphone_mic_pin) new_headset_mode = ALC_HEADSET_MODE_MIC; else new_headset_mode = ALC_HEADSET_MODE_HEADPHONE; if (new_headset_mode == spec->current_headset_mode) { snd_hda_gen_update_outputs(codec); return; } switch (new_headset_mode) { case ALC_HEADSET_MODE_UNPLUGGED: alc_headset_mode_unplugged(codec); spec->current_headset_mode = ALC_HEADSET_MODE_UNKNOWN; spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN; spec->gen.hp_jack_present = false; break; case ALC_HEADSET_MODE_HEADSET: if (spec->current_headset_type == ALC_HEADSET_TYPE_UNKNOWN) alc_determine_headset_type(codec); if (spec->current_headset_type == ALC_HEADSET_TYPE_CTIA) alc_headset_mode_ctia(codec); else if (spec->current_headset_type == ALC_HEADSET_TYPE_OMTP) alc_headset_mode_omtp(codec); spec->gen.hp_jack_present = true; break; case ALC_HEADSET_MODE_MIC: alc_headset_mode_mic_in(codec, hp_pin, spec->headphone_mic_pin); spec->gen.hp_jack_present = false; break; case ALC_HEADSET_MODE_HEADPHONE: alc_headset_mode_default(codec); spec->gen.hp_jack_present = true; break; } if (new_headset_mode != ALC_HEADSET_MODE_MIC) { snd_hda_set_pin_ctl_cache(codec, hp_pin, AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN); if (spec->headphone_mic_pin && spec->headphone_mic_pin != hp_pin) snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin, PIN_VREFHIZ); } spec->current_headset_mode = new_headset_mode; snd_hda_gen_update_outputs(codec); } static void alc_update_headset_mode_hook(struct hda_codec *codec, struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { alc_update_headset_mode(codec); } static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_callback *jack) { snd_hda_gen_hp_automute(codec, jack); alc_update_headset_mode(codec); } static void alc_probe_headset_mode(struct hda_codec *codec) { int i; struct alc_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->gen.autocfg; /* Find mic pins */ for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].is_headset_mic && !spec->headset_mic_pin) spec->headset_mic_pin = cfg->inputs[i].pin; if (cfg->inputs[i].is_headphone_mic && !spec->headphone_mic_pin) spec->headphone_mic_pin = cfg->inputs[i].pin; } WARN_ON(spec->gen.cap_sync_hook); spec->gen.cap_sync_hook = alc_update_headset_mode_hook; spec->gen.automute_hook = alc_update_headset_mode; spec->gen.hp_automute_hook = alc_update_headset_jack_cb; } static void alc_fixup_headset_mode(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->parse_flags |= HDA_PINCFG_HEADSET_MIC | HDA_PINCFG_HEADPHONE_MIC; break; case HDA_FIXUP_ACT_PROBE: alc_probe_headset_mode(codec); break; case HDA_FIXUP_ACT_INIT: if (is_s3_resume(codec) || is_s4_resume(codec)) { spec->current_headset_mode = ALC_HEADSET_MODE_UNKNOWN; spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN; } alc_update_headset_mode(codec); break; } } static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { struct alc_spec *spec = codec->spec; spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; } else alc_fixup_headset_mode(codec, fix, action); } static void alc255_set_default_jack_type(struct hda_codec *codec) { /* Set to iphone type */ static const struct coef_fw alc255fw[] = { WRITE_COEF(0x1b, 0x880b), WRITE_COEF(0x45, 0xd089), WRITE_COEF(0x1b, 0x080b), WRITE_COEF(0x46, 0x0004), WRITE_COEF(0x1b, 0x0c0b), {} }; static const struct coef_fw alc256fw[] = { WRITE_COEF(0x1b, 0x884b), WRITE_COEF(0x45, 0xd089), WRITE_COEF(0x1b, 0x084b), WRITE_COEF(0x46, 0x0004), WRITE_COEF(0x1b, 0x0c4b), {} }; switch (codec->core.vendor_id) { case 0x10ec0255: alc_process_coef_fw(codec, alc255fw); break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: alc_process_coef_fw(codec, alc256fw); break; } msleep(30); } static void alc_fixup_headset_mode_alc255(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { alc255_set_default_jack_type(codec); } alc_fixup_headset_mode(codec, fix, action); } static void alc_fixup_headset_mode_alc255_no_hp_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { struct alc_spec *spec = codec->spec; spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; alc255_set_default_jack_type(codec); } else alc_fixup_headset_mode(codec, fix, action); } static void alc288_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; alc_update_headset_jack_cb(codec, jack); /* Headset Mic enable or disable, only for Dell Dino */ alc_update_gpio_data(codec, 0x40, spec->gen.hp_jack_present); } static void alc_fixup_headset_mode_dell_alc288(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_headset_mode(codec, fix, action); if (action == HDA_FIXUP_ACT_PROBE) { struct alc_spec *spec = codec->spec; /* toggled via hp_automute_hook */ spec->gpio_mask |= 0x40; spec->gpio_dir |= 0x40; spec->gen.hp_automute_hook = alc288_update_headset_jack_cb; } } static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { struct alc_spec *spec = codec->spec; spec->gen.auto_mute_via_amp = 1; } } static void alc_fixup_no_shutup(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { struct alc_spec *spec = codec->spec; spec->no_shutup_pins = 1; } } static void alc_fixup_disable_aamix(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { struct alc_spec *spec = codec->spec; /* Disable AA-loopback as it causes white noise */ spec->gen.mixer_nid = 0; } } /* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */ static void alc_fixup_tpt440_dock(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const struct hda_pintbl pincfgs[] = { { 0x16, 0x21211010 }, /* dock headphone */ { 0x19, 0x21a11010 }, /* dock mic */ { } }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ snd_hda_apply_pincfgs(codec, pincfgs); } } static void alc_fixup_tpt470_dock(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const struct hda_pintbl pincfgs[] = { { 0x17, 0x21211010 }, /* dock headphone */ { 0x19, 0x21a11010 }, /* dock mic */ { } }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; snd_hda_apply_pincfgs(codec, pincfgs); } else if (action == HDA_FIXUP_ACT_INIT) { /* Enable DOCK device */ snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); /* Enable DOCK device */ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); } } static void alc_fixup_tpt470_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise * the speaker output becomes too low by some reason on Thinkpads with * ALC298 codec */ static const hda_nid_t preferred_pairs[] = { 0x14, 0x03, 0x17, 0x02, 0x21, 0x02, 0 }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->gen.preferred_dacs = preferred_pairs; } static void alc295_fixup_asus_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t preferred_pairs[] = { 0x17, 0x02, 0x21, 0x03, 0 }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->gen.preferred_dacs = preferred_pairs; } static void alc_shutup_dell_xps13(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; int hp_pin = alc_get_hp_pin(spec); /* Prevent pop noises when headphones are plugged in */ snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); msleep(20); } static void alc_fixup_dell_xps13(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->gen.input_mux; int i; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise * it causes a click noise at start up */ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); spec->shutup = alc_shutup_dell_xps13; break; case HDA_FIXUP_ACT_PROBE: /* Make the internal mic the default input source. */ for (i = 0; i < imux->num_items; i++) { if (spec->gen.imux_pins[i] == 0x12) { spec->gen.cur_mux[0] = i; break; } } break; } } static void alc_fixup_headset_mode_alc662(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ /* Disable boost for mic-in permanently. (This code is only called from quirks that guarantee that the headphone is at NID 0x1b.) */ snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000); snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP); } else alc_fixup_headset_mode(codec, fix, action); } static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { alc_write_coef_idx(codec, 0xc4, 0x8000); alc_update_coef_idx(codec, 0xc2, ~0xfe, 0); snd_hda_set_pin_ctl_cache(codec, 0x18, 0); } alc_fixup_headset_mode(codec, fix, action); } /* Returns the nid of the external mic input pin, or 0 if it cannot be found. */ static int find_ext_mic_pin(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->gen.autocfg; hda_nid_t nid; unsigned int defcfg; int i; for (i = 0; i < cfg->num_inputs; i++) { if (cfg->inputs[i].type != AUTO_PIN_MIC) continue; nid = cfg->inputs[i].pin; defcfg = snd_hda_codec_get_pincfg(codec, nid); if (snd_hda_get_input_pin_attr(defcfg) == INPUT_PIN_ATTR_INT) continue; return nid; } return 0; } static void alc271_hp_gate_mic_jack(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PROBE) { int mic_pin = find_ext_mic_pin(codec); int hp_pin = alc_get_hp_pin(spec); if (snd_BUG_ON(!mic_pin || !hp_pin)) return; snd_hda_jack_set_gating_jack(codec, mic_pin, hp_pin); } } static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->gen.autocfg; int i; /* The mic boosts on level 2 and 3 are too noisy on the internal mic input. Therefore limit the boost to 0 or 1. */ if (action != HDA_FIXUP_ACT_PROBE) return; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; unsigned int defcfg; if (cfg->inputs[i].type != AUTO_PIN_MIC) continue; defcfg = snd_hda_codec_get_pincfg(codec, nid); if (snd_hda_get_input_pin_attr(defcfg) != INPUT_PIN_ATTR_INT) continue; snd_hda_override_amp_caps(codec, nid, HDA_INPUT, (0x00 << AC_AMPCAP_OFFSET_SHIFT) | (0x01 << AC_AMPCAP_NUM_STEPS_SHIFT) | (0x2f << AC_AMPCAP_STEP_SIZE_SHIFT) | (0 << AC_AMPCAP_MUTE_SHIFT)); } } static void alc283_hp_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; int vref; msleep(200); snd_hda_gen_hp_automute(codec, jack); vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0; msleep(600); snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, vref); } static void alc283_fixup_chromebook(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_override_wcaps(codec, 0x03, 0); /* Disable AA-loopback as it causes white noise */ spec->gen.mixer_nid = 0; break; case HDA_FIXUP_ACT_INIT: /* MIC2-VREF control */ /* Set to manual mode */ alc_update_coef_idx(codec, 0x06, 0x000c, 0); /* Enable Line1 input control by verb */ alc_update_coef_idx(codec, 0x1a, 0, 1 << 4); break; } } static void alc283_fixup_sense_combo_jack(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->gen.hp_automute_hook = alc283_hp_automute_hook; break; case HDA_FIXUP_ACT_INIT: /* MIC2-VREF control */ /* Set to manual mode */ alc_update_coef_idx(codec, 0x06, 0x000c, 0); break; } } /* mute tablet speaker pin (0x14) via dock plugging in addition */ static void asus_tx300_automute(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; snd_hda_gen_update_outputs(codec); if (snd_hda_jack_detect(codec, 0x1b)) spec->gen.mute_bits |= (1ULL << 0x14); } static void alc282_fixup_asus_tx300(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const struct hda_pintbl dock_pins[] = { { 0x1b, 0x21114000 }, /* dock speaker pin */ {} }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->init_amp = ALC_INIT_DEFAULT; /* TX300 needs to set up GPIO2 for the speaker amp */ alc_setup_gpio(codec, 0x04); snd_hda_apply_pincfgs(codec, dock_pins); spec->gen.auto_mute_via_amp = 1; spec->gen.automute_hook = asus_tx300_automute; snd_hda_jack_detect_enable_callback(codec, 0x1b, snd_hda_gen_hp_automute); break; case HDA_FIXUP_ACT_PROBE: spec->init_amp = ALC_INIT_DEFAULT; break; case HDA_FIXUP_ACT_BUILD: /* this is a bit tricky; give more sane names for the main * (tablet) speaker and the dock speaker, respectively */ rename_ctl(codec, "Speaker Playback Switch", "Dock Speaker Playback Switch"); rename_ctl(codec, "Bass Speaker Playback Switch", "Speaker Playback Switch"); break; } } static void alc290_fixup_mono_speakers(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* DAC node 0x03 is giving mono output. We therefore want to make sure 0x14 (front speaker) and 0x15 (headphones) use the stereo DAC, while leaving 0x17 (bass speaker) for node 0x03. */ static const hda_nid_t conn1[] = { 0x0c }; snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn1), conn1); } } static void alc298_fixup_speaker_volume(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* The speaker is routed to the Node 0x06 by a mistake, as a result we can't adjust the speaker's volume since this node does not has Amp-out capability. we change the speaker's route to: Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 ( Pin Complex), since Node 0x02 has Amp-out caps, we can adjust speaker's volume now. */ static const hda_nid_t conn1[] = { 0x0c }; snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn1), conn1); } } /* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */ static void alc295_fixup_disable_dac3(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { static const hda_nid_t conn[] = { 0x02, 0x03 }; snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); } } /* force NID 0x17 (Bass Speaker) to DAC1 to share it with the main speaker */ static void alc285_fixup_speaker2_to_dac1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { static const hda_nid_t conn[] = { 0x02 }; snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); } } /* disable DAC3 (0x06) selection on NID 0x15 - share Speaker/Bass Speaker DAC 0x03 */ static void alc294_fixup_bass_speaker_15(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { static const hda_nid_t conn[] = { 0x02, 0x03 }; snd_hda_override_conn_list(codec, 0x15, ARRAY_SIZE(conn), conn); } } /* Hook to update amp GPIO4 for automute */ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; snd_hda_gen_hp_automute(codec, jack); /* mute_led_polarity is set to 0, so we pass inverted value here */ alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity, !spec->gen.hp_jack_present); } /* Manage GPIOs for HP EliteBook Folio 9480m. * * GPIO4 is the headphone amplifier power control * GPIO3 is the audio output mute indicator LED */ static void alc280_fixup_hp_9480m(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0x08, 0); if (action == HDA_FIXUP_ACT_PRE_PROBE) { /* amp at GPIO4; toggled via alc280_hp_gpio4_automute_hook() */ spec->gpio_mask |= 0x10; spec->gpio_dir |= 0x10; spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook; } } static void alc275_fixup_gpio4_off(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->gpio_mask |= 0x04; spec->gpio_dir |= 0x04; /* set data bit low */ } } /* Quirk for Thinkpad X1 7th and 8th Gen * The following fixed routing needed * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp */ static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ static const hda_nid_t preferred_pairs[] = { 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0 }; struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); spec->gen.preferred_dacs = preferred_pairs; break; case HDA_FIXUP_ACT_BUILD: /* The generic parser creates somewhat unintuitive volume ctls * with the fixed routing above, and the shared DAC2 may be * confusing for PA. * Rename those to unique names so that PA doesn't touch them * and use only Master volume. */ rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume"); rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume"); break; } } static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_dual_codecs(codec, fix, action); switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /* override card longname to provide a unique UCM profile */ strcpy(codec->card->longname, "HDAudio-Lenovo-DualCodecs"); break; case HDA_FIXUP_ACT_BUILD: /* rename Capture controls depending on the codec */ rename_ctl(codec, "Capture Volume", codec->addr == 0 ? "Rear-Panel Capture Volume" : "Front-Panel Capture Volume"); rename_ctl(codec, "Capture Switch", codec->addr == 0 ? "Rear-Panel Capture Switch" : "Front-Panel Capture Switch"); break; } } static void alc225_fixup_s3_pop_noise(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action != HDA_FIXUP_ACT_PRE_PROBE) return; codec->power_save_node = 1; } /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */ static void alc274_fixup_bind_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const hda_nid_t preferred_pairs[] = { 0x21, 0x03, 0x1b, 0x03, 0x16, 0x02, 0 }; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; spec->gen.preferred_dacs = preferred_pairs; spec->gen.auto_mute_via_amp = 1; codec->power_save_node = 0; } /* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */ static void alc289_fixup_asus_ga401(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t preferred_pairs[] = { 0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0 }; struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->gen.preferred_dacs = preferred_pairs; } /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action != HDA_FIXUP_ACT_PRE_PROBE) return; snd_hda_override_wcaps(codec, 0x03, 0); } static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) { switch (codec->core.vendor_id) { case 0x10ec0274: case 0x10ec0294: case 0x10ec0225: case 0x10ec0295: case 0x10ec0299: alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); break; case 0x10ec0230: case 0x10ec0235: case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: case 0x10ec0257: case 0x19e58326: alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); break; } } static void alc295_fixup_chromebook(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->ultra_low_power = true; break; case HDA_FIXUP_ACT_INIT: alc_combo_jack_hp_jd_restart(codec); break; } } static void alc256_fixup_chromebook(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->gen.suppress_auto_mute = 1; spec->gen.suppress_auto_mic = 1; spec->en_3kpull_low = false; break; } } static void alc_fixup_disable_mic_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } static void alc294_gx502_toggle_output(struct hda_codec *codec, struct hda_jack_callback *cb) { /* The Windows driver sets the codec up in a very different way where * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it */ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT) alc_write_coef_idx(codec, 0x10, 0x8a20); else alc_write_coef_idx(codec, 0x10, 0x0a20); } static void alc294_fixup_gx502_hp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* Pin 0x21: headphones/headset mic */ if (!is_jack_detectable(codec, 0x21)) return; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_jack_detect_enable_callback(codec, 0x21, alc294_gx502_toggle_output); break; case HDA_FIXUP_ACT_INIT: /* Make sure to start in a correct state, i.e. if * headphones have been plugged in before powering up the system */ alc294_gx502_toggle_output(codec, NULL); break; } } static void alc294_gu502_toggle_output(struct hda_codec *codec, struct hda_jack_callback *cb) { /* Windows sets 0x10 to 0x8420 for Node 0x20 which is * responsible from changes between speakers and headphones */ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT) alc_write_coef_idx(codec, 0x10, 0x8420); else alc_write_coef_idx(codec, 0x10, 0x0a20); } static void alc294_fixup_gu502_hp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (!is_jack_detectable(codec, 0x21)) return; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_jack_detect_enable_callback(codec, 0x21, alc294_gu502_toggle_output); break; case HDA_FIXUP_ACT_INIT: alc294_gu502_toggle_output(codec, NULL); break; } } static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action != HDA_FIXUP_ACT_INIT) return; msleep(100); alc_write_coef_idx(codec, 0x65, 0x0); } static void alc274_fixup_hp_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { switch (action) { case HDA_FIXUP_ACT_INIT: alc_combo_jack_hp_jd_restart(codec); break; } } static void alc_fixup_no_int_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /* Mic RING SLEEVE swap for combo jack */ alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12); spec->no_internal_mic_pin = true; break; case HDA_FIXUP_ACT_INIT: alc_combo_jack_hp_jd_restart(codec); break; } } /* GPIO1 = amplifier on/off * GPIO3 = mic mute LED */ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t conn[] = { 0x02 }; struct alc_spec *spec = codec->spec; static const struct hda_pintbl pincfgs[] = { { 0x14, 0x90170110 }, /* front/high speakers */ { 0x17, 0x90170130 }, /* back/bass speakers */ { } }; //enable micmute led alc_fixup_hp_gpio_led(codec, action, 0x00, 0x04); switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->micmute_led_polarity = 1; /* needed for amp of back speakers */ spec->gpio_mask |= 0x01; spec->gpio_dir |= 0x01; snd_hda_apply_pincfgs(codec, pincfgs); /* share DAC to have unified volume control */ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); break; case HDA_FIXUP_ACT_INIT: /* need to toggle GPIO to enable the amp of back speakers */ alc_update_gpio_data(codec, 0x01, true); msleep(100); alc_update_gpio_data(codec, 0x01, false); break; } } static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const hda_nid_t conn[] = { 0x02 }; static const struct hda_pintbl pincfgs[] = { { 0x14, 0x90170110 }, /* rear speaker */ { } }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); /* force front speaker to DAC1 */ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); break; } } static void alc285_fixup_hp_envy_x360(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const struct coef_fw coefs[] = { WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023), WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03), WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a), WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014), WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15), WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0), WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000), WRITE_COEF(0x6e, 0x1005), { } }; static const struct hda_pintbl pincfgs[] = { { 0x12, 0xb7a60130 }, /* Internal microphone*/ { 0x14, 0x90170150 }, /* B&O soundbar speakers */ { 0x17, 0x90170153 }, /* Side speakers */ { 0x19, 0x03a11040 }, /* Headset microphone */ { } }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); /* Fixes volume control problem for side speakers */ alc295_fixup_disable_dac3(codec, fix, action); /* Fixes no sound from headset speaker */ snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0); /* Auto-enable headset mic when plugged */ snd_hda_jack_set_gating_jack(codec, 0x19, 0x21); /* Headset mic volume enhancement */ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50); break; case HDA_FIXUP_ACT_INIT: alc_process_coef_fw(codec, coefs); break; case HDA_FIXUP_ACT_BUILD: rename_ctl(codec, "Bass Speaker Playback Volume", "B&O-Tuned Playback Volume"); rename_ctl(codec, "Front Playback Switch", "B&O Soundbar Playback Switch"); rename_ctl(codec, "Bass Speaker Playback Switch", "Side Speaker Playback Switch"); break; } } /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */ hda_fixup_thinkpad_acpi(codec, fix, action); } /* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->gen.suppress_auto_mute = 1; break; } } static void comp_acpi_device_notify(acpi_handle handle, u32 event, void *data) { struct hda_codec *cdc = data; struct alc_spec *spec = cdc->spec; codec_info(cdc, "ACPI Notification %d\n", event); hda_component_acpi_device_notify(&spec->comps, handle, event, data); } static int comp_bind(struct device *dev) { struct hda_codec *cdc = dev_to_hda_codec(dev); struct alc_spec *spec = cdc->spec; int ret; ret = hda_component_manager_bind(cdc, &spec->comps); if (ret) return ret; return hda_component_manager_bind_acpi_notifications(cdc, &spec->comps, comp_acpi_device_notify, cdc); } static void comp_unbind(struct device *dev) { struct hda_codec *cdc = dev_to_hda_codec(dev); struct alc_spec *spec = cdc->spec; hda_component_manager_unbind_acpi_notifications(cdc, &spec->comps, comp_acpi_device_notify); hda_component_manager_unbind(cdc, &spec->comps); } static const struct component_master_ops comp_master_ops = { .bind = comp_bind, .unbind = comp_unbind, }; static void comp_generic_playback_hook(struct hda_pcm_stream *hinfo, struct hda_codec *cdc, struct snd_pcm_substream *sub, int action) { struct alc_spec *spec = cdc->spec; hda_component_manager_playback_hook(&spec->comps, action); } static void comp_generic_fixup(struct hda_codec *cdc, int action, const char *bus, const char *hid, const char *match_str, int count) { struct alc_spec *spec = cdc->spec; int ret; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: ret = hda_component_manager_init(cdc, &spec->comps, count, bus, hid, match_str, &comp_master_ops); if (ret) return; spec->gen.pcm_playback_hook = comp_generic_playback_hook; break; case HDA_FIXUP_ACT_FREE: hda_component_manager_free(&spec->comps, &comp_master_ops); break; } } static void find_cirrus_companion_amps(struct hda_codec *cdc) { struct device *dev = hda_codec_dev(cdc); struct acpi_device *adev; struct fwnode_handle *fwnode __free(fwnode_handle) = NULL; const char *bus = NULL; static const struct { const char *hid; const char *name; } acpi_ids[] = {{ "CSC3554", "cs35l54-hda" }, { "CSC3556", "cs35l56-hda" }, { "CSC3557", "cs35l57-hda" }}; char *match; int i, count = 0, count_devindex = 0; for (i = 0; i < ARRAY_SIZE(acpi_ids); ++i) { adev = acpi_dev_get_first_match_dev(acpi_ids[i].hid, NULL, -1); if (adev) break; } if (!adev) { codec_dbg(cdc, "Did not find ACPI entry for a Cirrus Amp\n"); return; } count = i2c_acpi_client_count(adev); if (count > 0) { bus = "i2c"; } else { count = acpi_spi_count_resources(adev); if (count > 0) bus = "spi"; } fwnode = fwnode_handle_get(acpi_fwnode_handle(adev)); acpi_dev_put(adev); if (!bus) { codec_err(cdc, "Did not find any buses for %s\n", acpi_ids[i].hid); return; } if (!fwnode) { codec_err(cdc, "Could not get fwnode for %s\n", acpi_ids[i].hid); return; } /* * When available the cirrus,dev-index property is an accurate * count of the amps in a system and is used in preference to * the count of bus devices that can contain additional address * alias entries. */ count_devindex = fwnode_property_count_u32(fwnode, "cirrus,dev-index"); if (count_devindex > 0) count = count_devindex; match = devm_kasprintf(dev, GFP_KERNEL, "-%%s:00-%s.%%d", acpi_ids[i].name); if (!match) return; codec_info(cdc, "Found %d %s on %s (%s)\n", count, acpi_ids[i].hid, bus, match); comp_generic_fixup(cdc, HDA_FIXUP_ACT_PRE_PROBE, bus, acpi_ids[i].hid, match, count); } static void cs35l41_fixup_i2c_two(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "CSC3551", "-%s:00-cs35l41-hda.%d", 2); } static void cs35l41_fixup_i2c_four(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "CSC3551", "-%s:00-cs35l41-hda.%d", 4); } static void cs35l41_fixup_spi_two(struct hda_codec *codec, const struct hda_fixup *fix, int action) { comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 2); } static void cs35l41_fixup_spi_four(struct hda_codec *codec, const struct hda_fixup *fix, int action) { comp_generic_fixup(codec, action, "spi", "CSC3551", "-%s:00-cs35l41-hda.%d", 4); } static void alc287_fixup_legion_16achg6_speakers(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "CLSA0100", "-%s:00-cs35l41-hda.%d", 2); } static void alc287_fixup_legion_16ithg6_speakers(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "CLSA0101", "-%s:00-cs35l41-hda.%d", 2); } static void alc285_fixup_asus_ga403u(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { /* * The same SSID has been re-used in different hardware, they have * different codecs and the newer GA403U has a ALC285. */ if (cdc->core.vendor_id != 0x10ec0285) alc_fixup_inv_dmic(cdc, fix, action); } static void tas2781_fixup_i2c(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "TIAS2781", "-%s:00", 1); } static void yoga7_14arb7_fixup_i2c(struct hda_codec *cdc, const struct hda_fixup *fix, int action) { comp_generic_fixup(cdc, action, "i2c", "INT8866", "-%s:00", 1); } static void alc256_fixup_acer_sfg16_micmute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { alc_fixup_hp_gpio_led(codec, action, 0, 0x04); } /* for alc295_fixup_hp_top_speakers */ #include "hp_x360_helper.c" /* for alc285_fixup_ideapad_s740_coef() */ #include "ideapad_s740_helper.c" static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = { WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000), WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000), WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089), {} }; static void alc256_fixup_set_coef_defaults(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * A certain other OS sets these coeffs to different values. On at least * one TongFang barebone these settings might survive even a cold * reboot. So to restore a clean slate the values are explicitly reset * to default here. Without this, the external microphone is always in a * plugged-in state, while the internal microphone is always in an * unplugged state, breaking the ability to use the internal microphone. */ alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs); } static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = { WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06), WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074), WRITE_COEF(0x49, 0x0149), {} }; static void alc233_fixup_no_audio_jack(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * The audio jack input and output is not detected on the ASRock NUC Box * 1100 series when cold booting without this fix. Warm rebooting from a * certain other OS makes the audio functional, as COEF settings are * preserved in this case. This fix sets these altered COEF values as * the default. */ alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs); } static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec, * but uses the 0x8686 subproduct id in both cases. The ALC256 codec * needs an additional quirk for sound working after suspend and resume. */ if (codec->core.vendor_id == 0x10ec0256) { alc_update_coef_idx(codec, 0x10, 1<<9, 0); snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120); } else { snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c); } } static void alc256_decrease_headphone_amp_val(struct hda_codec *codec, const struct hda_fixup *fix, int action) { u32 caps; u8 nsteps, offs; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; caps = query_amp_caps(codec, 0x3, HDA_OUTPUT); nsteps = ((caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT) - 10; offs = ((caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT) - 10; caps &= ~AC_AMPCAP_NUM_STEPS & ~AC_AMPCAP_OFFSET; caps |= (nsteps << AC_AMPCAP_NUM_STEPS_SHIFT) | (offs << AC_AMPCAP_OFFSET_SHIFT); if (snd_hda_override_amp_caps(codec, 0x3, HDA_OUTPUT, caps)) codec_warn(codec, "failed to override amp caps for NID 0x3\n"); } static void alc_fixup_dell4_mic_no_presence_quiet(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; struct hda_input_mux *imux = &spec->gen.input_mux; int i; alc269_fixup_limit_int_mic_boost(codec, fix, action); switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /** * Set the vref of pin 0x19 (Headset Mic) and pin 0x1b (Headphone Mic) * to Hi-Z to avoid pop noises at startup and when plugging and * unplugging headphones. */ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); snd_hda_codec_set_pin_target(codec, 0x1b, PIN_VREFHIZ); break; case HDA_FIXUP_ACT_PROBE: /** * Make the internal mic (0x12) the default input source to * prevent pop noises on cold boot. */ for (i = 0; i < imux->num_items; i++) { if (spec->gen.imux_pins[i] == 0x12) { spec->gen.cur_mux[0] = i; break; } } break; } } static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * The Pin Complex 0x17 for the bass speakers is wrongly reported as * unconnected. */ static const struct hda_pintbl pincfgs[] = { { 0x17, 0x90170121 }, { } }; /* * Avoid DAC 0x06 and 0x08, as they have no volume controls. * DAC 0x02 and 0x03 would be fine. */ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* * Prefer both speakerbar (0x14) and bass speakers (0x17) connected to DAC 0x02. * Headphones (0x21) are connected to DAC 0x03. */ static const hda_nid_t preferred_pairs[] = { 0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0 }; struct alc_spec *spec = codec->spec; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); spec->gen.preferred_dacs = preferred_pairs; break; } } static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec, const struct hda_fixup *fix, int action) { static const struct hda_pintbl pincfgs[] = { { 0x14, 0x90170151 }, { 0x17, 0x90170150 }, { } }; static const hda_nid_t conn[] = { 0x02, 0x03 }; static const hda_nid_t preferred_pairs[] = { 0x14, 0x02, 0x17, 0x03, 0x21, 0x02, 0 }; struct alc_spec *spec = codec->spec; alc_fixup_no_shutup(codec, fix, action); switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); spec->gen.preferred_dacs = preferred_pairs; break; } } /* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */ static void alc287_fixup_bind_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ static const hda_nid_t preferred_pairs[] = { 0x17, 0x02, 0x21, 0x03, 0 }; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); spec->gen.preferred_dacs = preferred_pairs; spec->gen.auto_mute_via_amp = 1; if (spec->gen.autocfg.speaker_pins[0] != 0x14) { snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); /* Make sure 0x14 was disable */ } } /* Fix none verb table of Headset Mic pin */ static void alc_fixup_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const struct hda_pintbl pincfgs[] = { { 0x19, 0x03a1103c }, { } }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12); spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; break; } } static void alc245_fixup_hp_spectre_x360_eu0xxx(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * The Pin Complex 0x14 for the treble speakers is wrongly reported as * unconnected. * The Pin Complex 0x17 for the bass speakers has the lowest association * and sequence values so shift it up a bit to squeeze 0x14 in. */ static const struct hda_pintbl pincfgs[] = { { 0x14, 0x90170110 }, // top/treble { 0x17, 0x90170111 }, // bottom/bass { } }; /* * Force DAC 0x02 for the bass speakers 0x17. */ static const hda_nid_t conn[] = { 0x02 }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_apply_pincfgs(codec, pincfgs); snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); break; } cs35l41_fixup_i2c_two(codec, fix, action); alc245_fixup_hp_mute_led_coefbit(codec, fix, action); alc245_fixup_hp_gpio_led(codec, fix, action); } /* some changes for Spectre x360 16, 2024 model */ static void alc245_fixup_hp_spectre_x360_16_aa0xxx(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* * The Pin Complex 0x14 for the treble speakers is wrongly reported as * unconnected. * The Pin Complex 0x17 for the bass speakers has the lowest association * and sequence values so shift it up a bit to squeeze 0x14 in. */ struct alc_spec *spec = codec->spec; static const struct hda_pintbl pincfgs[] = { { 0x14, 0x90170110 }, // top/treble { 0x17, 0x90170111 }, // bottom/bass { } }; /* * Force DAC 0x02 for the bass speakers 0x17. */ static const hda_nid_t conn[] = { 0x02 }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: /* needed for amp of back speakers */ spec->gpio_mask |= 0x01; spec->gpio_dir |= 0x01; snd_hda_apply_pincfgs(codec, pincfgs); snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); break; case HDA_FIXUP_ACT_INIT: /* need to toggle GPIO to enable the amp of back speakers */ alc_update_gpio_data(codec, 0x01, true); msleep(100); alc_update_gpio_data(codec, 0x01, false); break; } cs35l41_fixup_i2c_two(codec, fix, action); alc245_fixup_hp_mute_led_coefbit(codec, fix, action); alc245_fixup_hp_gpio_led(codec, fix, action); } /* * ALC287 PCM hooks */ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream, int action) { switch (action) { case HDA_GEN_PCM_ACT_OPEN: alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */ break; case HDA_GEN_PCM_ACT_CLOSE: alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */ break; } } static void alc287_s4_power_gpio3_default(struct hda_codec *codec) { if (is_s4_suspend(codec)) { alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */ } } static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const struct coef_fw coefs[] = { WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC300), WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023), WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC301), WRITE_COEF(0x28, 0x0001), WRITE_COEF(0x29, 0xb023), }; if (action != HDA_FIXUP_ACT_PRE_PROBE) return; alc_update_coef_idx(codec, 0x10, 1<<11, 1<<11); alc_process_coef_fw(codec, coefs); spec->power_hook = alc287_s4_power_gpio3_default; spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook; } enum { ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, ALC269_FIXUP_SKU_IGNORE, ALC269_FIXUP_ASUS_G73JW, ALC269_FIXUP_ASUS_N7601ZM_PINS, ALC269_FIXUP_ASUS_N7601ZM, ALC269_FIXUP_LENOVO_EAPD, ALC275_FIXUP_SONY_HWEQ, ALC275_FIXUP_SONY_DISABLE_AAMIX, ALC271_FIXUP_DMIC, ALC269_FIXUP_PCM_44K, ALC269_FIXUP_STEREO_DMIC, ALC269_FIXUP_HEADSET_MIC, ALC269_FIXUP_QUANTA_MUTE, ALC269_FIXUP_LIFEBOOK, ALC269_FIXUP_LIFEBOOK_EXTMIC, ALC269_FIXUP_LIFEBOOK_HP_PIN, ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, ALC269_FIXUP_AMIC, ALC269_FIXUP_DMIC, ALC269VB_FIXUP_AMIC, ALC269VB_FIXUP_DMIC, ALC269_FIXUP_HP_MUTE_LED, ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC269_FIXUP_HP_MUTE_LED_MIC2, ALC269_FIXUP_HP_MUTE_LED_MIC3, ALC269_FIXUP_HP_GPIO_LED, ALC269_FIXUP_HP_GPIO_MIC1_LED, ALC269_FIXUP_HP_LINE1_MIC1_LED, ALC269_FIXUP_INV_DMIC, ALC269_FIXUP_LENOVO_DOCK, ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, ALC269_FIXUP_NO_SHUTUP, ALC286_FIXUP_SONY_MIC_NO_PRESENCE, ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ALC269_FIXUP_DELL1_LIMIT_INT_MIC_BOOST, ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET, ALC269_FIXUP_HEADSET_MODE, ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, ALC269_FIXUP_ASPIRE_HEADSET_MIC, ALC269_FIXUP_ASUS_X101_FUNC, ALC269_FIXUP_ASUS_X101_VERB, ALC269_FIXUP_ASUS_X101, ALC271_FIXUP_AMIC_MIC2, ALC271_FIXUP_HP_GATE_MIC_JACK, ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572, ALC269_FIXUP_ACER_AC700, ALC269_FIXUP_LIMIT_INT_MIC_BOOST, ALC269VB_FIXUP_ASUS_ZENBOOK, ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A, ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE, ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED, ALC269VB_FIXUP_ORDISSIMO_EVE2, ALC283_FIXUP_CHROME_BOOK, ALC283_FIXUP_SENSE_COMBO_JACK, ALC282_FIXUP_ASUS_TX300, ALC283_FIXUP_INT_MIC, ALC290_FIXUP_MONO_SPEAKERS, ALC290_FIXUP_MONO_SPEAKERS_HSJACK, ALC290_FIXUP_SUBWOOFER, ALC290_FIXUP_SUBWOOFER_HSJACK, ALC269_FIXUP_THINKPAD_ACPI, ALC269_FIXUP_DMIC_THINKPAD_ACPI, ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13, ALC269VC_FIXUP_INFINIX_Y4_MAX, ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO, ALC255_FIXUP_ACER_MIC_NO_PRESENCE, ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC255_FIXUP_DELL1_LIMIT_INT_MIC_BOOST, ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, ALC255_FIXUP_HEADSET_MODE, ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_FIXUP_TPT440_DOCK, ALC292_FIXUP_TPT440, ALC283_FIXUP_HEADSET_MIC, ALC255_FIXUP_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, ALC269VB_FIXUP_ASPIRE_E1_COEF, ALC280_FIXUP_HP_GPIO4, ALC286_FIXUP_HP_GPIO_LED, ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, ALC280_FIXUP_HP_DOCK_PINS, ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, ALC280_FIXUP_HP_9480M, ALC245_FIXUP_HP_X360_AMP, ALC285_FIXUP_HP_SPECTRE_X360_EB1, ALC285_FIXUP_HP_ENVY_X360, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13, ALC288_FIXUP_DISABLE_AAMIX, ALC292_FIXUP_DELL_E7X_AAMIX, ALC292_FIXUP_DELL_E7X, ALC292_FIXUP_DISABLE_AAMIX, ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE, ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, ALC275_FIXUP_DELL_XPS, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ALC255_FIXUP_DELL_SPK_NOISE, ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC295_FIXUP_DISABLE_DAC3, ALC285_FIXUP_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_HEADSET_MIC, ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS, ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_I2C_HEADSET_MIC, ALC280_FIXUP_HP_HEADSET_MIC, ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, ALC298_FIXUP_SPK_VOLUME, ALC298_FIXUP_LENOVO_SPK_VOLUME, ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, ALC269_FIXUP_ATIV_BOOK_8, ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE, ALC221_FIXUP_HP_MIC_NO_PRESENCE, ALC256_FIXUP_ASUS_HEADSET_MODE, ALC256_FIXUP_ASUS_MIC, ALC256_FIXUP_ASUS_AIO_GPIO2, ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, ALC233_FIXUP_LENOVO_MULTI_CODECS, ALC233_FIXUP_ACER_HEADSET_MIC, ALC294_FIXUP_LENOVO_MIC_LOCATION, ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, ALC225_FIXUP_S3_POP_NOISE, ALC700_FIXUP_INTEL_REFERENCE, ALC274_FIXUP_DELL_BIND_DACS, ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, ALC298_FIXUP_TPT470_DOCK_FIX, ALC298_FIXUP_TPT470_DOCK, ALC255_FIXUP_DUMMY_LINEOUT_VERB, ALC255_FIXUP_DELL_HEADSET_MIC, ALC256_FIXUP_HUAWEI_MACH_WX9_PINS, ALC298_FIXUP_HUAWEI_MBX_STEREO, ALC295_FIXUP_HP_X360, ALC221_FIXUP_HP_HEADSET_MIC, ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, ALC295_FIXUP_HP_AUTO_MUTE, ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, ALC294_FIXUP_ASUS_MIC, ALC294_FIXUP_ASUS_HEADSET_MIC, ALC294_FIXUP_ASUS_SPK, ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, ALC255_FIXUP_ACER_HEADSET_MIC, ALC295_FIXUP_CHROME_BOOK, ALC225_FIXUP_HEADSET_JACK, ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE, ALC225_FIXUP_WYSE_AUTO_MUTE, ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, ALC286_FIXUP_ACER_AIO_HEADSET_MIC, ALC256_FIXUP_ASUS_HEADSET_MIC, ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC255_FIXUP_PREDATOR_SUBWOOFER, ALC299_FIXUP_PREDATOR_SPK, ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, ALC289_FIXUP_DELL_SPK1, ALC289_FIXUP_DELL_SPK2, ALC289_FIXUP_DUAL_SPK, ALC289_FIXUP_RTK_AMP_DUAL_SPK, ALC294_FIXUP_SPK2_TO_DAC1, ALC294_FIXUP_ASUS_DUAL_SPK, ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_ALLY, ALC294_FIXUP_ASUS_ALLY_X, ALC294_FIXUP_ASUS_ALLY_PINS, ALC294_FIXUP_ASUS_ALLY_VERBS, ALC294_FIXUP_ASUS_ALLY_SPEAKER, ALC294_FIXUP_ASUS_HPE, ALC294_FIXUP_ASUS_COEF_1B, ALC294_FIXUP_ASUS_GX502_HP, ALC294_FIXUP_ASUS_GX502_PINS, ALC294_FIXUP_ASUS_GX502_VERBS, ALC294_FIXUP_ASUS_GU502_HP, ALC294_FIXUP_ASUS_GU502_PINS, ALC294_FIXUP_ASUS_GU502_VERBS, ALC294_FIXUP_ASUS_G513_PINS, ALC285_FIXUP_ASUS_G533Z_PINS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_COEFBIT2, ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, ALC236_FIXUP_LENOVO_INV_DMIC, ALC298_FIXUP_SAMSUNG_AMP, ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS, ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, ALC269VC_FIXUP_ACER_HEADSET_MIC, ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, ALC289_FIXUP_ASUS_GA401, ALC289_FIXUP_ASUS_GA502, ALC256_FIXUP_ACER_MIC_NO_PRESENCE, ALC285_FIXUP_HP_GPIO_AMP_INIT, ALC269_FIXUP_CZC_B20, ALC269_FIXUP_CZC_TMI, ALC269_FIXUP_CZC_L101, ALC269_FIXUP_LEMOTE_A1802, ALC269_FIXUP_LEMOTE_A190X, ALC256_FIXUP_INTEL_NUC8_RUGGED, ALC233_FIXUP_INTEL_NUC8_DMIC, ALC233_FIXUP_INTEL_NUC8_BOOST, ALC256_FIXUP_INTEL_NUC10, ALC255_FIXUP_XIAOMI_HEADSET_MIC, ALC274_FIXUP_HP_MIC, ALC274_FIXUP_HP_HEADSET_MIC, ALC274_FIXUP_HP_ENVY_GPIO, ALC256_FIXUP_ASUS_HPE, ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, ALC287_FIXUP_HP_GPIO_LED, ALC256_FIXUP_HP_HEADSET_MIC, ALC245_FIXUP_HP_GPIO_LED, ALC236_FIXUP_DELL_AIO_HEADSET_MIC, ALC282_FIXUP_ACER_DISABLE_LINEOUT, ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST, ALC256_FIXUP_ACER_HEADSET_MIC, ALC285_FIXUP_IDEAPAD_S740_COEF, ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST, ALC295_FIXUP_ASUS_DACS, ALC295_FIXUP_HP_OMEN, ALC285_FIXUP_HP_SPECTRE_X360, ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, ALC623_FIXUP_LENOVO_THINKSTATION_P340, ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST, ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS, ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, ALC298_FIXUP_LENOVO_C940_DUET7, ALC287_FIXUP_13S_GEN2_SPEAKERS, ALC256_FIXUP_SET_COEF_DEFAULTS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, ALC233_FIXUP_NO_AUDIO_JACK, ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, ALC287_FIXUP_LEGION_16ACHG6, ALC287_FIXUP_CS35L41_I2C_2, ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED, ALC287_FIXUP_CS35L41_I2C_4, ALC245_FIXUP_CS35L41_SPI_2, ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED, ALC245_FIXUP_CS35L41_SPI_4, ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED, ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED, ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE, ALC287_FIXUP_LEGION_16ITHG6, ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK, ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN, ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS, ALC236_FIXUP_DELL_DUAL_CODECS, ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, ALC287_FIXUP_TAS2781_I2C, ALC287_FIXUP_YOGA7_14ARB7_I2C, ALC245_FIXUP_HP_MUTE_LED_COEFBIT, ALC245_FIXUP_HP_X360_MUTE_LEDS, ALC287_FIXUP_THINKPAD_I2S_SPK, ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD, ALC2XX_FIXUP_HEADSET_MIC, ALC289_FIXUP_DELL_CS35L41_SPI_2, ALC294_FIXUP_CS35L41_I2C_2, ALC256_FIXUP_ACER_SFG16_MICMUTE_LED, ALC256_FIXUP_HEADPHONE_AMP_VOL, ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX, ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX, ALC285_FIXUP_ASUS_GA403U, ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC, ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1, ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC, ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1, ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318, ALC256_FIXUP_CHROME_BOOK, ALC245_FIXUP_CLEVO_NOISY_MIC, ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE, ALC233_FIXUP_MEDION_MTL_SPK, ALC294_FIXUP_BASS_SPEAKER_15, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; * both have the very same PCI SSID, and we need to apply different fixups * depending on the codec ID */ static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec, const struct hda_fixup *fix, int action) { int id; if (codec->core.vendor_id == 0x10ec0298) id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */ else id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */ __snd_hda_apply_fixup(codec, id, action, 0); } static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_gpio2, }, [ALC269_FIXUP_SONY_VAIO] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { {0x19, PIN_VREFGRD}, {} } }, [ALC275_FIXUP_SONY_VAIO_GPIO2] = { .type = HDA_FIXUP_FUNC, .v.func = alc275_fixup_gpio4_off, .chained = true, .chain_id = ALC269_FIXUP_SONY_VAIO }, [ALC269_FIXUP_DELL_M101Z] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Enables internal speaker */ {0x20, AC_VERB_SET_COEF_INDEX, 13}, {0x20, AC_VERB_SET_PROC_COEF, 0x4040}, {} } }, [ALC269_FIXUP_SKU_IGNORE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_sku_ignore, }, [ALC269_FIXUP_ASUS_G73JW] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x99130111 }, /* subwoofer */ { } } }, [ALC269_FIXUP_ASUS_N7601ZM_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03A11050 }, { 0x1a, 0x03A11C30 }, { 0x21, 0x03211420 }, { } } }, [ALC269_FIXUP_ASUS_N7601ZM] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x20, AC_VERB_SET_COEF_INDEX, 0x62}, {0x20, AC_VERB_SET_PROC_COEF, 0xa007}, {0x20, AC_VERB_SET_COEF_INDEX, 0x10}, {0x20, AC_VERB_SET_PROC_COEF, 0x8420}, {0x20, AC_VERB_SET_COEF_INDEX, 0x0f}, {0x20, AC_VERB_SET_PROC_COEF, 0x7774}, { } }, .chained = true, .chain_id = ALC269_FIXUP_ASUS_N7601ZM_PINS, }, [ALC269_FIXUP_LENOVO_EAPD] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0}, {} } }, [ALC275_FIXUP_SONY_HWEQ] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hweq, .chained = true, .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2 }, [ALC275_FIXUP_SONY_DISABLE_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC269_FIXUP_SONY_VAIO }, [ALC271_FIXUP_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc271_fixup_dmic, }, [ALC269_FIXUP_PCM_44K] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pcm_44k, .chained = true, .chain_id = ALC269_FIXUP_QUANTA_MUTE }, [ALC269_FIXUP_STEREO_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_stereo_dmic, }, [ALC269_FIXUP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_headset_mic, }, [ALC269_FIXUP_QUANTA_MUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_quanta_mute, }, [ALC269_FIXUP_LIFEBOOK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x2101103f }, /* dock line-out */ { 0x1b, 0x23a11040 }, /* dock mic-in */ { } }, .chained = true, .chain_id = ALC269_FIXUP_QUANTA_MUTE }, [ALC269_FIXUP_LIFEBOOK_EXTMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1903c }, /* headset mic, with jack detect */ { } }, }, [ALC269_FIXUP_LIFEBOOK_HP_PIN] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x21, 0x0221102f }, /* HP out */ { } }, }, [ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, }, [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_U7x7_headset_mic, }, [ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x90170151 }, /* use as internal speaker (LFE) */ { 0x1b, 0x90170152 }, /* use as internal speaker (back) */ { } }, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, [ALC269VC_FIXUP_INFINIX_Y4_MAX] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x90170150 }, /* use as internal speaker */ { } }, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x03a19020 }, /* headset mic */ { 0x1b, 0x90170150 }, /* speaker */ { } }, }, [ALC269_FIXUP_AMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0121401f }, /* HP out */ { 0x18, 0x01a19c20 }, /* mic */ { 0x19, 0x99a3092f }, /* int-mic */ { } }, }, [ALC269_FIXUP_DMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x99a3092f }, /* int-mic */ { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0121401f }, /* HP out */ { 0x18, 0x01a19c20 }, /* mic */ { } }, }, [ALC269VB_FIXUP_AMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x18, 0x01a19c20 }, /* mic */ { 0x19, 0x99a3092f }, /* int-mic */ { 0x21, 0x0121401f }, /* HP out */ { } }, }, [ALC269VB_FIXUP_DMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x99a3092f }, /* int-mic */ { 0x14, 0x99130110 }, /* speaker */ { 0x18, 0x01a19c20 }, /* mic */ { 0x21, 0x0121401f }, /* HP out */ { } }, }, [ALC269_FIXUP_HP_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_mute_led, }, [ALC269_FIXUP_HP_MUTE_LED_MIC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_mute_led_mic1, }, [ALC269_FIXUP_HP_MUTE_LED_MIC2] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_mute_led_mic2, }, [ALC269_FIXUP_HP_MUTE_LED_MIC3] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_mute_led_mic3, .chained = true, .chain_id = ALC295_FIXUP_HP_AUTO_MUTE }, [ALC269_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_gpio_led, }, [ALC269_FIXUP_HP_GPIO_MIC1_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_gpio_mic1_led, }, [ALC269_FIXUP_HP_LINE1_MIC1_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_hp_line1_mic1_led, }, [ALC269_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, }, [ALC269_FIXUP_NO_SHUTUP] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_shutup, }, [ALC269_FIXUP_LENOVO_DOCK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x23a11040 }, /* dock mic */ { 0x1b, 0x2121103f }, /* dock headphone */ { } }, .chained = true, .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT }, [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC269_FIXUP_LENOVO_DOCK, }, [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC269_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC269_FIXUP_DELL1_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC269_FIXUP_DELL2_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x21014020 }, /* dock line out */ { 0x19, 0x21a19030 }, /* dock mic */ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1b, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC269_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_no_hp_mic, }, [ALC269_FIXUP_ASPIRE_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* headset mic w/o jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC256_FIXUP_HUAWEI_MACH_WX9_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { {0x12, 0x90a60130}, {0x13, 0x40000000}, {0x14, 0x90170110}, {0x18, 0x411111f0}, {0x19, 0x04a11040}, {0x1a, 0x411111f0}, {0x1b, 0x90170112}, {0x1d, 0x40759a05}, {0x1e, 0x411111f0}, {0x21, 0x04211020}, { } }, .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, [ALC298_FIXUP_HUAWEI_MBX_STEREO] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_huawei_mbx_stereo, .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, [ALC269_FIXUP_ASUS_X101_FUNC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_x101_headset_mic, }, [ALC269_FIXUP_ASUS_X101_VERB] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0}, {0x20, AC_VERB_SET_COEF_INDEX, 0x08}, {0x20, AC_VERB_SET_PROC_COEF, 0x0310}, { } }, .chained = true, .chain_id = ALC269_FIXUP_ASUS_X101_FUNC }, [ALC269_FIXUP_ASUS_X101] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x04a1182c }, /* Headset mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_ASUS_X101_VERB }, [ALC271_FIXUP_AMIC_MIC2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x19, 0x01a19c20 }, /* mic */ { 0x1b, 0x99a7012f }, /* int-mic */ { 0x21, 0x0121401f }, /* HP out */ { } }, }, [ALC271_FIXUP_HP_GATE_MIC_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc271_hp_gate_mic_jack, .chained = true, .chain_id = ALC271_FIXUP_AMIC_MIC2, }, [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK, }, [ALC269_FIXUP_ACER_AC700] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x99a3092f }, /* int-mic */ { 0x14, 0x99130110 }, /* speaker */ { 0x18, 0x03a11c20 }, /* mic */ { 0x1e, 0x0346101e }, /* SPDIF1 */ { 0x21, 0x0321101f }, /* HP out */ { } }, .chained = true, .chain_id = ALC271_FIXUP_DMIC, }, [ALC269_FIXUP_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC269VB_FIXUP_ASUS_ZENBOOK] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC269VB_FIXUP_DMIC, }, [ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* class-D output amp +5dB */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x12 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2800 }, {} }, .chained = true, .chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK, }, [ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a110f0 }, /* use as headset mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1, }, [ALC269VB_FIXUP_ORDISSIMO_EVE2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x99a3092f }, /* int-mic */ { 0x18, 0x03a11d20 }, /* mic */ { 0x19, 0x411111f0 }, /* Unused bogus pin */ { } }, }, [ALC283_FIXUP_CHROME_BOOK] = { .type = HDA_FIXUP_FUNC, .v.func = alc283_fixup_chromebook, }, [ALC283_FIXUP_SENSE_COMBO_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc283_fixup_sense_combo_jack, .chained = true, .chain_id = ALC283_FIXUP_CHROME_BOOK, }, [ALC282_FIXUP_ASUS_TX300] = { .type = HDA_FIXUP_FUNC, .v.func = alc282_fixup_asus_tx300, }, [ALC283_FIXUP_INT_MIC] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x20, AC_VERB_SET_COEF_INDEX, 0x1a}, {0x20, AC_VERB_SET_PROC_COEF, 0x0011}, { } }, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, [ALC290_FIXUP_SUBWOOFER_HSJACK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x90170112 }, /* subwoofer */ { } }, .chained = true, .chain_id = ALC290_FIXUP_MONO_SPEAKERS_HSJACK, }, [ALC290_FIXUP_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x90170112 }, /* subwoofer */ { } }, .chained = true, .chain_id = ALC290_FIXUP_MONO_SPEAKERS, }, [ALC290_FIXUP_MONO_SPEAKERS] = { .type = HDA_FIXUP_FUNC, .v.func = alc290_fixup_mono_speakers, }, [ALC290_FIXUP_MONO_SPEAKERS_HSJACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc290_fixup_mono_speakers, .chained = true, .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, }, [ALC269_FIXUP_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_thinkpad_acpi, .chained = true, .chain_id = ALC269_FIXUP_SKU_IGNORE, }, [ALC269_FIXUP_DMIC_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC255_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC255_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC255_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_DELL1_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC255_FIXUP_DELL2_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC255_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc255, .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc255_no_hp_mic, }, [ALC293_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC292_FIXUP_TPT440_DOCK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_tpt440_dock, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, [ALC292_FIXUP_TPT440] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC292_FIXUP_TPT440_DOCK, }, [ALC283_FIXUP_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x04a110f0 }, { }, }, }, [ALC255_FIXUP_MIC_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_micmute_led, }, [ALC282_FIXUP_ASPIRE_V5_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x90a60130 }, { 0x14, 0x90170110 }, { 0x17, 0x40000008 }, { 0x18, 0x411111f0 }, { 0x19, 0x01a1913c }, { 0x1a, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1d, 0x40f89b2d }, { 0x1e, 0x411111f0 }, { 0x21, 0x0321101f }, { }, }, }, [ALC269VB_FIXUP_ASPIRE_E1_COEF] = { .type = HDA_FIXUP_FUNC, .v.func = alc269vb_fixup_aspire_e1_coef, }, [ALC280_FIXUP_HP_GPIO4] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_gpio4, }, [ALC286_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc286_fixup_hp_gpio_led, }, [ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_gpio2_mic_hotkey, }, [ALC280_FIXUP_HP_DOCK_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x21011020 }, /* line-out */ { 0x1a, 0x01a1903c }, /* headset mic */ { 0x18, 0x2181103f }, /* line-in */ { }, }, .chained = true, .chain_id = ALC280_FIXUP_HP_GPIO4 }, [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x21011020 }, /* line-out */ { 0x18, 0x2181103f }, /* line-in */ { }, }, .chained = true, .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED }, [ALC280_FIXUP_HP_9480M] = { .type = HDA_FIXUP_FUNC, .v.func = alc280_fixup_hp_9480m, }, [ALC245_FIXUP_HP_X360_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_x360_amp, .chained = true, .chain_id = ALC245_FIXUP_HP_GPIO_LED }, [ALC288_FIXUP_DELL_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_dell_alc288, .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, [ALC288_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC288_FIXUP_DELL_HEADSET_MODE }, [ALC288_FIXUP_DISABLE_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC288_FIXUP_DELL_XPS_13] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_xps13, .chained = true, .chain_id = ALC288_FIXUP_DISABLE_AAMIX }, [ALC292_FIXUP_DISABLE_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE }, [ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC292_FIXUP_DELL_E7X_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_xps13, .chained = true, .chain_id = ALC292_FIXUP_DISABLE_AAMIX }, [ALC292_FIXUP_DELL_E7X] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_micmute_led, /* micmute fixup must be applied at last */ .chained_before = true, .chain_id = ALC292_FIXUP_DELL_E7X_AAMIX, }, [ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* headset mic w/o jack detect */ { } }, .chained_before = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, [ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC275_FIXUP_DELL_XPS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Enables internal speaker */ {0x20, AC_VERB_SET_COEF_INDEX, 0x1f}, {0x20, AC_VERB_SET_PROC_COEF, 0x00c0}, {0x20, AC_VERB_SET_COEF_INDEX, 0x30}, {0x20, AC_VERB_SET_PROC_COEF, 0x00b1}, {} } }, [ALC293_FIXUP_LENOVO_SPK_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_line2_mic_hotkey, }, [ALC233_FIXUP_INTEL_NUC8_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, .chained = true, .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST, }, [ALC233_FIXUP_INTEL_NUC8_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost }, [ALC255_FIXUP_DELL_SPK_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC225_FIXUP_DISABLE_MIC_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_mic_vref, .chained = true, .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Disable pass-through path for FRONT 14h */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, {} }, .chained = true, .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF }, [ALC280_FIXUP_HP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC, }, [ALC221_FIXUP_HP_FRONT_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x02a19020 }, /* Front Mic */ { } }, }, [ALC292_FIXUP_TPT460] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_tpt440_dock, .chained = true, .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE, }, [ALC298_FIXUP_SPK_VOLUME] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_speaker_volume, .chained = true, .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, }, [ALC298_FIXUP_LENOVO_SPK_VOLUME] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_speaker_volume, }, [ALC295_FIXUP_DISABLE_DAC3] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_disable_dac3, }, [ALC285_FIXUP_SPEAKER2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC245_FIXUP_CS35L41_SPI_2 }, [ALC285_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, { 0x1b, 0x03a11c30 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1 }, [ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x90170120 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC }, [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC287_FIXUP_CS35L41_I2C_2 }, [ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, { 0x1b, 0x03a11c30 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1 }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x90170151 }, { } }, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC269_FIXUP_ATIV_BOOK_8] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_auto_mute_via_amp, .chained = true, .chain_id = ALC269_FIXUP_NO_SHUTUP }, [ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01813030 }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC221_FIXUP_HP_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC256_FIXUP_ASUS_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, }, [ALC256_FIXUP_ASUS_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x13, 0x90a60160 }, /* use as internal mic */ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, [ALC256_FIXUP_ASUS_AIO_GPIO2] = { .type = HDA_FIXUP_FUNC, /* Set up GPIO2 for the speaker amp */ .v.func = alc_fixup_gpio4, }, [ALC233_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Enables internal speaker */ {0x20, AC_VERB_SET_COEF_INDEX, 0x40}, {0x20, AC_VERB_SET_PROC_COEF, 0x8800}, {} }, .chained = true, .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE }, [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, .chained = true, .chain_id = ALC269_FIXUP_GPIO2 }, [ALC233_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, { } }, .chained = true, .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE }, [ALC294_FIXUP_LENOVO_MIC_LOCATION] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { /* Change the mic location from front to right, otherwise there are two front mics with the same name, pulseaudio can't handle them. This is just a temporary workaround, after applying this fixup, there will be one "Front Mic" and one "Mic" in this machine. */ { 0x1a, 0x04a19040 }, { } }, }, [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x0101102f }, /* Rear Headset HP */ { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */ { 0x1a, 0x01a19030 }, /* Rear Headset MIC */ { 0x1b, 0x02011020 }, { } }, .chained = true, .chain_id = ALC225_FIXUP_S3_POP_NOISE }, [ALC225_FIXUP_S3_POP_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc225_fixup_s3_pop_noise, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC700_FIXUP_INTEL_REFERENCE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Enables internal speaker */ {0x20, AC_VERB_SET_COEF_INDEX, 0x45}, {0x20, AC_VERB_SET_PROC_COEF, 0x5289}, {0x20, AC_VERB_SET_COEF_INDEX, 0x4A}, {0x20, AC_VERB_SET_PROC_COEF, 0x001b}, {0x58, AC_VERB_SET_COEF_INDEX, 0x00}, {0x58, AC_VERB_SET_PROC_COEF, 0x3888}, {0x20, AC_VERB_SET_COEF_INDEX, 0x6f}, {0x20, AC_VERB_SET_PROC_COEF, 0x2c0b}, {} } }, [ALC274_FIXUP_DELL_BIND_DACS] = { .type = HDA_FIXUP_FUNC, .v.func = alc274_fixup_bind_dacs, .chained = true, .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x0401102f }, { } }, .chained = true, .chain_id = ALC274_FIXUP_DELL_BIND_DACS }, [ALC298_FIXUP_TPT470_DOCK_FIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_tpt470_dock, .chained = true, .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE }, [ALC298_FIXUP_TPT470_DOCK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_tpt470_dacs, .chained = true, .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX }, [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x0201101f }, { } }, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC255_FIXUP_DELL_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC295_FIXUP_HP_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_hp_top_speakers, .chained = true, .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC3 }, [ALC221_FIXUP_HP_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x0181313f}, { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_invalidate_dacs, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC295_FIXUP_HP_AUTO_MUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_auto_mute_via_amp, }, [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x13, 0x90a60160 }, /* use as internal mic */ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1103c }, /* use as headset mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_SPK] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, [ALC295_FIXUP_CHROME_BOOK] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_chromebook, .chained = true, .chain_id = ALC225_FIXUP_HEADSET_JACK }, [ALC225_FIXUP_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, }, [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Disable PCBEEP-IN passthrough */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE }, [ALC255_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11130 }, { 0x1a, 0x90a60140 }, /* use as internal mic */ { } }, .chained = true, .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x01011020 }, /* Rear Line out */ { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE }, [ALC225_FIXUP_WYSE_AUTO_MUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_auto_mute_via_amp, .chained = true, .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF }, [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_mic_vref, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f }, { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 }, { } }, .chained = true, .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE }, [ALC256_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11020 }, /* headset mic with jack detect */ { } }, .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, [ALC255_FIXUP_PREDATOR_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x90170151 }, /* use as internal speaker (LFE) */ { 0x1b, 0x90170152 } /* use as internal speaker (back) */ } }, [ALC299_FIXUP_PREDATOR_SPK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */ { } } }, [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x04a11040 }, { 0x21, 0x04211020 }, { } }, .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, [ALC289_FIXUP_DELL_SPK1] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x90170140 }, { } }, .chained = true, .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE }, [ALC289_FIXUP_DELL_SPK2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x90170130 }, /* bass spk */ { } }, .chained = true, .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE }, [ALC289_FIXUP_DUAL_SPK] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC289_FIXUP_DELL_SPK2 }, [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC289_FIXUP_DELL_SPK1 }, [ALC294_FIXUP_SPK2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, [ALC294_FIXUP_ASUS_DUAL_SPK] = { .type = HDA_FIXUP_FUNC, /* The GPIO must be pulled to initialize the AMP */ .v.func = alc_fixup_gpio4, .chained = true, .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 }, [ALC294_FIXUP_ASUS_ALLY] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_two, .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS }, [ALC294_FIXUP_ASUS_ALLY_X] = { .type = HDA_FIXUP_FUNC, .v.func = tas2781_fixup_i2c, .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS }, [ALC294_FIXUP_ASUS_ALLY_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, { 0x1a, 0x03a11c30 }, { 0x21, 0x03211420 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_VERBS }, [ALC294_FIXUP_ASUS_ALLY_VERBS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x46 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0004 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x47 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xa47a }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x49 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0049}, { 0x20, AC_VERB_SET_COEF_INDEX, 0x4a }, { 0x20, AC_VERB_SET_PROC_COEF, 0x201b }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x6b }, { 0x20, AC_VERB_SET_PROC_COEF, 0x4278}, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_SPEAKER }, [ALC294_FIXUP_ASUS_ALLY_SPEAKER] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, }, [ALC285_FIXUP_THINKPAD_X1_GEN7] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_thinkpad_x1_gen7, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7 }, [ALC294_FIXUP_ASUS_HPE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, [ALC294_FIXUP_ASUS_GX502_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, /* front HP mic */ { 0x1a, 0x01a11830 }, /* rear external mic */ { 0x21, 0x03211020 }, /* front HP out */ { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_GX502_VERBS }, [ALC294_FIXUP_ASUS_GX502_VERBS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* set 0x15 to HP-OUT ctrl */ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 }, /* unmute the 0x15 amp */ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_GX502_HP }, [ALC294_FIXUP_ASUS_GX502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gx502_hp, }, [ALC294_FIXUP_ASUS_GU502_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a11050 }, /* rear HP mic */ { 0x1a, 0x01a11830 }, /* rear external mic */ { 0x21, 0x012110f0 }, /* rear HP out */ { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS }, [ALC294_FIXUP_ASUS_GU502_VERBS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* set 0x15 to HP-OUT ctrl */ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 }, /* unmute the 0x15 amp */ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 }, /* set 0x1b to HP-OUT */ { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_GU502_HP }, [ALC294_FIXUP_ASUS_GU502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gu502_hp, }, [ALC294_FIXUP_ASUS_G513_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, /* front HP mic */ { 0x1a, 0x03a11c30 }, /* rear external mic */ { 0x21, 0x03211420 }, /* front HP out */ { } }, }, [ALC285_FIXUP_ASUS_G533Z_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x90170152 }, /* Speaker Surround Playback Switch */ { 0x19, 0x03a19020 }, /* Mic Boost Volume */ { 0x1a, 0x03a11c30 }, /* Mic Boost Volume */ { 0x1e, 0x90170151 }, /* Rear jack, IN OUT EAPD Detect */ { 0x21, 0x03211420 }, { } }, }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Set bit 10 to correct noisy output after reboot from * Windows 10 (due to pop noise reduction?) */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b }, { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b }, { } }, .chained = true, .chain_id = ALC289_FIXUP_ASUS_GA401, }, [ALC285_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_gpio_led, }, [ALC285_FIXUP_HP_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_mute_led, }, [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_mute_led, }, [ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_coefbit2, }, [ALC236_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_gpio_led, }, [ALC236_FIXUP_HP_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led, }, [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_micmute_vref, }, [ALC236_FIXUP_LENOVO_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, .chained = true, .chain_id = ALC283_FIXUP_INT_MIC, }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, .chained = true, .chain_id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET }, [ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp_v2_2_amps }, [ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp_v2_4_amps }, [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc5 }, { } }, }, [ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x08}, { 0x20, AC_VERB_SET_PROC_COEF, 0x2fcf}, { } }, }, [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x90100120 }, /* use as internal speaker */ { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x01011020 }, /* use as line out */ { }, }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC269VC_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x02a11030 }, /* use as headset mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC289_FIXUP_ASUS_GA401] = { .type = HDA_FIXUP_FUNC, .v.func = alc289_fixup_asus_ga401, .chained = true, .chain_id = ALC289_FIXUP_ASUS_GA502, }, [ALC289_FIXUP_ASUS_GA502] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11020 }, /* headset mic with jack detect */ { } }, }, [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_gpio_amp_init, .chained = true, .chain_id = ALC285_FIXUP_HP_GPIO_LED }, [ALC269_FIXUP_CZC_B20] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x411111f0 }, { 0x14, 0x90170110 }, /* speaker */ { 0x15, 0x032f1020 }, /* HP out */ { 0x17, 0x411111f0 }, { 0x18, 0x03ab1040 }, /* mic */ { 0x19, 0xb7a7013f }, { 0x1a, 0x0181305f }, { 0x1b, 0x411111f0 }, { 0x1d, 0x411111f0 }, { 0x1e, 0x411111f0 }, { } }, .chain_id = ALC269_FIXUP_DMIC, }, [ALC269_FIXUP_CZC_TMI] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x4000c000 }, { 0x14, 0x90170110 }, /* speaker */ { 0x15, 0x0421401f }, /* HP out */ { 0x17, 0x411111f0 }, { 0x18, 0x04a19020 }, /* mic */ { 0x19, 0x411111f0 }, { 0x1a, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1d, 0x40448505 }, { 0x1e, 0x411111f0 }, { 0x20, 0x8000ffff }, { } }, .chain_id = ALC269_FIXUP_DMIC, }, [ALC269_FIXUP_CZC_L101] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x40000000 }, { 0x14, 0x01014010 }, /* speaker */ { 0x15, 0x411111f0 }, /* HP out */ { 0x16, 0x411111f0 }, { 0x18, 0x01a19020 }, /* mic */ { 0x19, 0x02a19021 }, { 0x1a, 0x0181302f }, { 0x1b, 0x0221401f }, { 0x1c, 0x411111f0 }, { 0x1d, 0x4044c601 }, { 0x1e, 0x411111f0 }, { } }, .chain_id = ALC269_FIXUP_DMIC, }, [ALC269_FIXUP_LEMOTE_A1802] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0x40000000 }, { 0x14, 0x90170110 }, /* speaker */ { 0x17, 0x411111f0 }, { 0x18, 0x03a19040 }, /* mic1 */ { 0x19, 0x90a70130 }, /* mic2 */ { 0x1a, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1d, 0x40489d2d }, { 0x1e, 0x411111f0 }, { 0x20, 0x0003ffff }, { 0x21, 0x03214020 }, { } }, .chain_id = ALC269_FIXUP_DMIC, }, [ALC269_FIXUP_LEMOTE_A190X] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0121401f }, /* HP out */ { 0x18, 0x01a19c20 }, /* rear mic */ { 0x19, 0x99a3092f }, /* front mic */ { 0x1b, 0x0201401f }, /* front lineout */ { } }, .chain_id = ALC269_FIXUP_DMIC, }, [ALC256_FIXUP_INTEL_NUC8_RUGGED] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC256_FIXUP_INTEL_NUC10] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_XIAOMI_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, { } }, .chained = true, .chain_id = ALC289_FIXUP_ASUS_GA502 }, [ALC274_FIXUP_HP_MIC] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, { } }, }, [ALC274_FIXUP_HP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc274_fixup_hp_headset_mic, .chained = true, .chain_id = ALC274_FIXUP_HP_MIC }, [ALC274_FIXUP_HP_ENVY_GPIO] = { .type = HDA_FIXUP_FUNC, .v.func = alc274_fixup_hp_envy_gpio, }, [ALC256_FIXUP_ASUS_HPE] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, { 0x20, AC_VERB_SET_PROC_COEF, 0x7778 }, { } }, .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, [ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC287_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_hp_gpio_led, }, [ALC256_FIXUP_HP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc274_fixup_hp_headset_mic, }, [ALC236_FIXUP_DELL_AIO_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_int_mic, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC282_FIXUP_ACER_DISABLE_LINEOUT] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x411111f0 }, { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { }, }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE, }, [ALC256_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x02a1113c }, /* use as headset mic, without its own jack detect */ { 0x1a, 0x90a1092f }, /* use as internal mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC285_FIXUP_IDEAPAD_S740_COEF] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_ideapad_s740_coef, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC285_FIXUP_HP_MUTE_LED, }, [ALC295_FIXUP_ASUS_DACS] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_asus_dacs, }, [ALC295_FIXUP_HP_OMEN] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x12, 0xb7a60130 }, { 0x13, 0x40000000 }, { 0x14, 0x411111f0 }, { 0x16, 0x411111f0 }, { 0x17, 0x90170110 }, { 0x18, 0x411111f0 }, { 0x19, 0x02a11030 }, { 0x1a, 0x411111f0 }, { 0x1b, 0x04a19030 }, { 0x1d, 0x40600001 }, { 0x1e, 0x411111f0 }, { 0x21, 0x03211020 }, {} }, .chained = true, .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED, }, [ALC285_FIXUP_HP_SPECTRE_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360, }, [ALC285_FIXUP_HP_SPECTRE_X360_EB1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_eb1 }, [ALC285_FIXUP_HP_ENVY_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_envy_x360, .chained = true, .chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT, }, [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_ideapad_s740_coef, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK, }, [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_shutup, .chained = true, .chain_id = ALC283_FIXUP_HEADSET_MIC, }, [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x21, 0x03211030 }, /* Change the Headphone location to Left */ { } }, .chained = true, .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC }, [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, }, [ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_ideapad_s740_coef, .chained = true, .chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, }, [ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_legion_15imhg05_speakers, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI, }, [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { .type = HDA_FIXUP_VERBS, //.v.verbs = legion_15imhg05_coefs, .v.verbs = (const struct hda_verb[]) { // set left speaker Legion 7i. { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x1a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, // set right speaker Legion 7i. { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x42 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, {} }, .chained = true, .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, }, [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_legion_15imhg05_speakers, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { // set left speaker Yoga 7i. { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x1a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, // set right speaker Yoga 7i. { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x46 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, {} }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, [ALC298_FIXUP_LENOVO_C940_DUET7] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_lenovo_c940_duet7, }, [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x42 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, {} }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, [ALC256_FIXUP_SET_COEF_DEFAULTS] = { .type = HDA_FIXUP_FUNC, .v.func = alc256_fixup_set_coef_defaults, }, [ALC245_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_gpio_led, }, [ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11120 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, }, [ALC233_FIXUP_NO_AUDIO_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_no_audio_jack, }, [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = { .type = HDA_FIXUP_FUNC, .v.func = alc256_fixup_mic_no_presence_and_resume, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC287_FIXUP_LEGION_16ACHG6] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_legion_16achg6_speakers, }, [ALC287_FIXUP_CS35L41_I2C_2] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_two, }, [ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_two, .chained = true, .chain_id = ALC285_FIXUP_HP_MUTE_LED, }, [ALC287_FIXUP_CS35L41_I2C_4] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_four, }, [ALC245_FIXUP_CS35L41_SPI_2] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_two, }, [ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_two, .chained = true, .chain_id = ALC285_FIXUP_HP_GPIO_LED, }, [ALC245_FIXUP_CS35L41_SPI_4] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_four, }, [ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_four, .chained = true, .chain_id = ALC285_FIXUP_HP_GPIO_LED, }, [ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x19 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x8e11 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_HP_MUTE_LED, }, [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell4_mic_no_presence_quiet, .chained = true, .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, }, [ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x02a1112c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC287_FIXUP_LEGION_16ITHG6] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_legion_16ithg6_speakers, }, [ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { // enable left speaker { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x1a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xf }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x42 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x10 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x40 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, // enable right speaker { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x46 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2a }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xf }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x46 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x10 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x44 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, { }, }, }, [ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin, .chained = true, .chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK, }, [ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin, .chained = true, .chain_id = ALC287_FIXUP_CS35L41_I2C_2, }, [ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = { .type = HDA_FIXUP_FUNC, .v.func = alc295_fixup_dell_inspiron_top_speakers, .chained = true, .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, }, [ALC236_FIXUP_DELL_DUAL_CODECS] = { .type = HDA_FIXUP_PINS, .v.func = alc1220_fixup_gb_dual_codecs, .chained = true, .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, }, [ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_two, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, }, [ALC287_FIXUP_TAS2781_I2C] = { .type = HDA_FIXUP_FUNC, .v.func = tas2781_fixup_i2c, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK, }, [ALC287_FIXUP_YOGA7_14ARB7_I2C] = { .type = HDA_FIXUP_FUNC, .v.func = yoga7_14arb7_fixup_i2c, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK, }, [ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_mute_led_coefbit, }, [ALC245_FIXUP_HP_X360_MUTE_LEDS] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_mute_led_coefbit, .chained = true, .chain_id = ALC245_FIXUP_HP_GPIO_LED }, [ALC287_FIXUP_THINKPAD_I2S_SPK] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_bind_dacs, .chained = true, .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, }, [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_bind_dacs, .chained = true, .chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, }, [ALC2XX_FIXUP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mic, }, [ALC289_FIXUP_DELL_CS35L41_SPI_2] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_spi_two, .chained = true, .chain_id = ALC289_FIXUP_DUAL_SPK }, [ALC294_FIXUP_CS35L41_I2C_2] = { .type = HDA_FIXUP_FUNC, .v.func = cs35l41_fixup_i2c_two, }, [ALC256_FIXUP_ACER_SFG16_MICMUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc256_fixup_acer_sfg16_micmute_led, }, [ALC256_FIXUP_HEADPHONE_AMP_VOL] = { .type = HDA_FIXUP_FUNC, .v.func = alc256_decrease_headphone_amp_val, }, [ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_spectre_x360_eu0xxx, }, [ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX] = { .type = HDA_FIXUP_FUNC, .v.func = alc245_fixup_hp_spectre_x360_16_aa0xxx, }, [ALC285_FIXUP_ASUS_GA403U] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_asus_ga403u, }, [ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, { 0x1b, 0x03a11c30 }, { } }, .chained = true, .chain_id = ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1 }, [ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC, }, [ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, { 0x1b, 0x03a11c30 }, { } }, }, [ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_speaker2_to_dac1, .chained = true, .chain_id = ALC285_FIXUP_ASUS_GA403U, }, [ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_lenovo_thinkpad_with_alc1318, .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, [ALC256_FIXUP_CHROME_BOOK] = { .type = HDA_FIXUP_FUNC, .v.func = alc256_fixup_chromebook, .chained = true, .chain_id = ALC225_FIXUP_HEADSET_JACK }, [ALC245_FIXUP_CLEVO_NOISY_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, .chained = true, .chain_id = ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, }, [ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ { 0x1b, 0x20a11040 }, /* dock mic */ { } }, .chained = true, .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST }, [ALC233_FIXUP_MEDION_MTL_SPK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x90170110 }, { } }, }, [ALC294_FIXUP_BASS_SPEAKER_15] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_bass_speaker_15, }, }; static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF), SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x1167, "Acer Veriton N6640G", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x1025, 0x1177, "Acer Predator G9-593", ALC255_FIXUP_PREDATOR_SUBWOOFER), SND_PCI_QUIRK(0x1025, 0x1178, "Acer Predator G9-593", ALC255_FIXUP_PREDATOR_SUBWOOFER), SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x129d, "Acer SWIFT SF313-51", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), SND_PCI_QUIRK(0x1025, 0x1534, "Acer Predator PH315-54", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x169a, "Acer Swift SFG16", ALC256_FIXUP_ACER_SFG16_MICMUTE_LED), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a38, "Dell Latitude 7520", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET), SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC), SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0b27, "Dell", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0b28, "Dell", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0c0b, "Dell Oasis 14 RPL-P", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0c0d, "Dell Oasis", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0c0e, "Dell Oasis 16", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c28, "Dell Inspiron 16 Plus 7630", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x0c4d, "Dell", ALC287_FIXUP_CS35L41_I2C_4), SND_PCI_QUIRK(0x1028, 0x0c94, "Dell Polaris 3 metal", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1028, 0x0c96, "Dell Polaris 2in1", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY), SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS), SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS), SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M), SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2b5e, "HP 288 Pro G2 MT", ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC), SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC), SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC295_FIXUP_HP_X360), SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x87df, "HP ProBook 430 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x88dd, "HP Pavilion 15z-ec200", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4), SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89c0, "HP ZBook Power 15.6 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x89e7, "HP Elite x2 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8a28, "HP Envy 13", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a29, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a2a, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a2b, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a2c, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a2d, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4), SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ab9, "HP EliteBook 840 G8 (MB 8AB8)", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ad8, "HP 800 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8b3a, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8b3f, "HP mt440 Mobile Thin Client U91", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b5f, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b77, "HP ElieBook 865 G10", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be0, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be1, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be2, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be3, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be5, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be6, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be7, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be8, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8be9, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c15, "HP Spectre x360 2-in-1 Laptop 14-eu0xxx", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre x360 2-in-1 Laptop 16-aa0xxx", ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX), SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c4d, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c4e, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c4f, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c50, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c51, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c52, "HP EliteBook 1040 G11", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c53, "HP Elite x360 1040 2-in-1 G11", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c66, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c67, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c68, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c6a, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8caf, "HP Elite mt645 G8 Mobile Thin Client", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM), SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301VV/VQ/VU/VJ/VA/VC/VE/VVC/VQC/VUC/VJC/VEC/VCC", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16d3, "ASUS UX5304VA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x16f3, "ASUS UX7602VI/BZ", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY), SND_PCI_QUIRK(0x1043, 0x1eb3, "ROG Ally X RC72LA", ALC294_FIXUP_ASUS_ALLY_X), SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1970, "ASUS UX550VE", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS G713PI/PU/PV/PVN", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15), SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f00, "ASUS Strix G815LH_LM_LP", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), SND_PCI_QUIRK(0x10ec, 0x119e, "Positivo SU C1400", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x12f6, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc1a4, "Samsung Galaxy Book Pro 360 (NT935QBD)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xca06, "Samsung Galaxy Book3 360 (NP730QFG)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc870, "Samsung Galaxy Book2 Pro (NP950XED)", ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS), SND_PCI_QUIRK(0x144d, 0xc872, "Samsung Galaxy Book2 Pro (NP950XEE)", ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS), SND_PCI_QUIRK(0x144d, 0xc886, "Samsung Galaxy Book3 Pro (NP964XFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), SND_PCI_QUIRK(0x144d, 0xc1cc, "Samsung Galaxy Book3 Ultra (NT960XFH)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x152d, 0x1262, "Huawei NBLB-WAX9N", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x0353, "Clevo V35[05]SN[CDE]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x2624, "Clevo L240TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x28c1, "Clevo V370VND", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4041, "Clevo NV4[15]PZ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5015, "Clevo NH5[58]H[HJK]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5017, "Clevo NH7[79]H[HJK]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50a3, "Clevo NJ51GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50b3, "Clevo NK50S[BEZ]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50b6, "Clevo NK50S5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50e1, "Clevo NH5[58]HPQ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50e2, "Clevo NH7[79]HPQ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x51b3, "Clevo NS70AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x7724, "Clevo L140AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8535, "Clevo NH50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8536, "Clevo NH79D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8550, "Clevo NH[57][0-9][ER][ACDH]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8551, "Clevo NH[57][0-9][ER][ACDH]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8560, "Clevo NH[57][0-9][ER][ACDH]Q", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8561, "Clevo NH[57][0-9][ER][ACDH]Q", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME), SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa554, "VAIO VJFH52", ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa741, "Clevo V54x_6x_TNE", ALC245_FIXUP_CLEVO_NOISY_MIC), SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC), SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440), SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x2234, "Thinkpad ICE-1", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x231e, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318), SND_PCI_QUIRK(0x17aa, 0x231f, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318), SND_PCI_QUIRK(0x17aa, 0x2326, "Hera2", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3111, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7), SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), HDA_CODEC_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), SND_PCI_QUIRK(0x17aa, 0x3847, "Legion 7 16ACHG6", ALC287_FIXUP_LEGION_16ACHG6), SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6), SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), HDA_CODEC_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x386e, "Yoga Pro 7 14ARP8", ALC285_FIXUP_SPEAKER2_TO_DAC1), HDA_CODEC_QUIRK(0x17aa, 0x386f, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C), SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x387f, "Yoga S780-16 pro dual LX", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3880, "Yoga S780-16 pro dual YC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x38a5, "Y580P AMD dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b8, "Yoga S780-14.5 proX AMD YC Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38b9, "Yoga S780-14.5 proX AMD LX Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38ba, "Yoga S780-14.5 Air AMD quad YC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38bb, "Yoga S780-14.5 Air AMD quad AAC", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x38d3, "Yoga S990-16 Pro IMH YC Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d4, "Yoga S990-16 Pro IMH VECO Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d5, "Yoga S990-16 Pro IMH YC Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d6, "Yoga S990-16 Pro IMH VECO Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x38df, "Yoga Y990 Intel YC Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38e0, "Yoga Y990 Intel VECO Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38f8, "Yoga Book 9i", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL), SND_PCI_QUIRK(0x1854, 0x0441, "LG CQ6 AIO", ALC256_FIXUP_HEADPHONE_AMP_VOL), SND_PCI_QUIRK(0x1854, 0x0488, "LG gram 16 (16Z90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), SND_PCI_QUIRK(0x1854, 0x048a, "LG gram 17 (17ZD90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x19e5, 0x3212, "Huawei KLV-WX9 ", ALC256_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1111, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1119, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1129, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX), SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX), SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x2782, 0x4900, "MEDION E15443", ALC233_FIXUP_MEDION_MTL_SPK), SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10), SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), #if 0 /* Below is a quirk table taken from the old code. * Basically the device should work as is without the fixup table. * If BIOS doesn't give a proper info, enable the corresponding * fixup entry. */ SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x11b3, "ASUS K52DR", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1383, "ASUS UJ30Jc", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x13d3, "ASUS N61JA", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1413, "ASUS UL50", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1443, "ASUS UL30", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1453, "ASUS M60Jv", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1483, "ASUS UL80", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x14f3, "ASUS F83Vf", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS UL20", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1513, "ASUS UX30", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1593, "ASUS N51Vn", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x15a3, "ASUS N60Jv", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x15b3, "ASUS N60Dp", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x15c3, "ASUS N70De", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x15e3, "ASUS F83T", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1643, "ASUS M60J", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1653, "ASUS U50", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1693, "ASUS F50N", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS F5Q", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1723, "ASUS P80", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1743, "ASUS U80", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1773, "ASUS U20A", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x1043, 0x1883, "ASUS F81Se", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x17aa, 0x3be9, "Quanta Wistron", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_AMIC), SND_PCI_QUIRK(0x17ff, 0x059a, "Quanta EL3", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x17ff, 0x059b, "Quanta JR1", ALC269_FIXUP_DMIC), #endif {} }; static const struct hda_quirk alc269_fixup_vendor_tbl[] = { SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI), SND_PCI_QUIRK_VENDOR(0x19e5, "Huawei Matebook", ALC255_FIXUP_MIC_MUTE_LED), {} }; static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"}, {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"}, {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"}, {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"}, {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC269_FIXUP_HEADSET_MIC, .name = "headset-mic"}, {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"}, {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, {.id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, .name = "dell-headset3"}, {.id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, .name = "dell-headset4"}, {.id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET, .name = "dell-headset4-quiet"}, {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"}, {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"}, {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"}, {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"}, {.id = ALC269_FIXUP_SONY_VAIO, .name = "vaio"}, {.id = ALC269_FIXUP_DELL_M101Z, .name = "dell-m101z"}, {.id = ALC269_FIXUP_ASUS_G73JW, .name = "asus-g73jw"}, {.id = ALC269_FIXUP_LENOVO_EAPD, .name = "lenovo-eapd"}, {.id = ALC275_FIXUP_SONY_HWEQ, .name = "sony-hweq"}, {.id = ALC269_FIXUP_PCM_44K, .name = "pcm44k"}, {.id = ALC269_FIXUP_LIFEBOOK, .name = "lifebook"}, {.id = ALC269_FIXUP_LIFEBOOK_EXTMIC, .name = "lifebook-extmic"}, {.id = ALC269_FIXUP_LIFEBOOK_HP_PIN, .name = "lifebook-hp-pin"}, {.id = ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, .name = "lifebook-u7x7"}, {.id = ALC269VB_FIXUP_AMIC, .name = "alc269vb-amic"}, {.id = ALC269VB_FIXUP_DMIC, .name = "alc269vb-dmic"}, {.id = ALC269_FIXUP_HP_MUTE_LED_MIC1, .name = "hp-mute-led-mic1"}, {.id = ALC269_FIXUP_HP_MUTE_LED_MIC2, .name = "hp-mute-led-mic2"}, {.id = ALC269_FIXUP_HP_MUTE_LED_MIC3, .name = "hp-mute-led-mic3"}, {.id = ALC269_FIXUP_HP_GPIO_MIC1_LED, .name = "hp-gpio-mic1"}, {.id = ALC269_FIXUP_HP_LINE1_MIC1_LED, .name = "hp-line1-mic1"}, {.id = ALC269_FIXUP_NO_SHUTUP, .name = "noshutup"}, {.id = ALC286_FIXUP_SONY_MIC_NO_PRESENCE, .name = "sony-nomic"}, {.id = ALC269_FIXUP_ASPIRE_HEADSET_MIC, .name = "aspire-headset-mic"}, {.id = ALC269_FIXUP_ASUS_X101, .name = "asus-x101"}, {.id = ALC271_FIXUP_HP_GATE_MIC_JACK, .name = "acer-ao7xx"}, {.id = ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572, .name = "acer-aspire-e1"}, {.id = ALC269_FIXUP_ACER_AC700, .name = "acer-ac700"}, {.id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST, .name = "limit-mic-boost"}, {.id = ALC269VB_FIXUP_ASUS_ZENBOOK, .name = "asus-zenbook"}, {.id = ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A, .name = "asus-zenbook-ux31a"}, {.id = ALC269VB_FIXUP_ORDISSIMO_EVE2, .name = "ordissimo"}, {.id = ALC282_FIXUP_ASUS_TX300, .name = "asus-tx300"}, {.id = ALC283_FIXUP_INT_MIC, .name = "alc283-int-mic"}, {.id = ALC290_FIXUP_MONO_SPEAKERS_HSJACK, .name = "mono-speakers"}, {.id = ALC290_FIXUP_SUBWOOFER_HSJACK, .name = "alc290-subwoofer"}, {.id = ALC269_FIXUP_THINKPAD_ACPI, .name = "thinkpad"}, {.id = ALC269_FIXUP_DMIC_THINKPAD_ACPI, .name = "dmic-thinkpad"}, {.id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE, .name = "alc255-acer"}, {.id = ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, .name = "alc255-asus"}, {.id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, {.id = ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "alc255-dell2"}, {.id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc293-dell1"}, {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"}, {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"}, {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"}, {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"}, {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"}, {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"}, {.id = ALC280_FIXUP_HP_DOCK_PINS, .name = "hp-dock-pins"}, {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic"}, {.id = ALC280_FIXUP_HP_9480M, .name = "hp-9480m"}, {.id = ALC288_FIXUP_DELL_HEADSET_MODE, .name = "alc288-dell-headset"}, {.id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc288-dell1"}, {.id = ALC288_FIXUP_DELL_XPS_13, .name = "alc288-dell-xps13"}, {.id = ALC292_FIXUP_DELL_E7X, .name = "dell-e7x"}, {.id = ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, .name = "alc293-dell"}, {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"}, {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"}, {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"}, {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, {.id = ALC285_FIXUP_SPEAKER2_TO_DAC1, .name = "alc285-speaker2-to-dac1"}, {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, {.id = ALC298_FIXUP_SPK_VOLUME, .name = "alc298-spk-volume"}, {.id = ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, .name = "dell-inspiron-7559"}, {.id = ALC269_FIXUP_ATIV_BOOK_8, .name = "ativ-book"}, {.id = ALC221_FIXUP_HP_MIC_NO_PRESENCE, .name = "alc221-hp-mic"}, {.id = ALC256_FIXUP_ASUS_HEADSET_MODE, .name = "alc256-asus-headset"}, {.id = ALC256_FIXUP_ASUS_MIC, .name = "alc256-asus-mic"}, {.id = ALC256_FIXUP_ASUS_AIO_GPIO2, .name = "alc256-asus-aio"}, {.id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, .name = "alc233-asus"}, {.id = ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, .name = "alc233-eapd"}, {.id = ALC294_FIXUP_LENOVO_MIC_LOCATION, .name = "alc294-lenovo-mic"}, {.id = ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, .name = "alc225-wyse"}, {.id = ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, .name = "alc274-dell-aio"}, {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"}, {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"}, {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"}, {.id = ALC256_FIXUP_CHROME_BOOK, .name = "alc-2024y-chromebook"}, {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, {.id = ALC298_FIXUP_SAMSUNG_AMP, .name = "alc298-samsung-amp"}, {.id = ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS, .name = "alc298-samsung-amp-v2-2-amps"}, {.id = ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS, .name = "alc298-samsung-amp-v2-4-amps"}, {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"}, {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"}, {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"}, {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"}, {} }; #define ALC225_STANDARD_PINS \ {0x21, 0x04211020} #define ALC256_STANDARD_PINS \ {0x12, 0x90a60140}, \ {0x14, 0x90170110}, \ {0x21, 0x02211020} #define ALC282_STANDARD_PINS \ {0x14, 0x90170110} #define ALC290_STANDARD_PINS \ {0x12, 0x99a30130} #define ALC292_STANDARD_PINS \ {0x14, 0x90170110}, \ {0x15, 0x0221401f} #define ALC295_STANDARD_PINS \ {0x12, 0xb7a60130}, \ {0x14, 0x90170110}, \ {0x21, 0x04211020} #define ALC298_STANDARD_PINS \ {0x12, 0x90a60130}, \ {0x21, 0x03211020} static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0221, 0x103c, "HP Workstation", ALC221_FIXUP_HP_HEADSET_MIC, {0x14, 0x01014020}, {0x17, 0x90170110}, {0x18, 0x02a11030}, {0x19, 0x0181303F}, {0x21, 0x0221102f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE, {0x12, 0x90a601c0}, {0x14, 0x90171120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1043, "ASUS", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x1b, 0x90a70130}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1043, "ASUS", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, {0x1a, 0x90a70130}, {0x1b, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x14, 0x901701a0}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x14, 0x901701b0}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60150}, {0x14, 0x901701a0}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60150}, {0x14, 0x901701b0}), SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x1b, 0x90170110}), SND_HDA_PIN_QUIRK(0x10ec0233, 0x8086, "Intel NUC Skull Canyon", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, {0x1b, 0x01111010}, {0x1e, 0x01451130}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, {0x12, 0x90a60140}, {0x14, 0x90170110}, {0x19, 0x02a11030}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, {0x14, 0x90170110}, {0x19, 0x02a11030}, {0x1a, 0x02a11040}, {0x1b, 0x01014020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, {0x14, 0x90170110}, {0x19, 0x02a11030}, {0x1a, 0x02a11040}, {0x1b, 0x01011020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, {0x14, 0x90170110}, {0x19, 0x02a11020}, {0x1a, 0x02a11030}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC, {0x21, 0x02211010}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC, {0x14, 0x90170110}, {0x19, 0x02a11020}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x21, 0x02211040}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170110}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x1b, 0x02011020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x1b, 0x01011020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x1b, 0x01014020}, {0x21, 0x0221103f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x1b, 0x01011020}, {0x21, 0x0221103f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170130}, {0x1b, 0x02011020}, {0x21, 0x0221103f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170150}, {0x1b, 0x02011020}, {0x21, 0x0221105f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x1b, 0x01014020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x17, 0x90170140}, {0x21, 0x0321102f}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170130}, {0x21, 0x02211040}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170140}, {0x21, 0x02211050}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60170}, {0x14, 0x90170120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60170}, {0x14, 0x90170130}, {0x21, 0x02211040}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60170}, {0x14, 0x90171130}, {0x21, 0x02211040}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60170}, {0x14, 0x90170140}, {0x21, 0x02211050}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60180}, {0x14, 0x90170130}, {0x21, 0x02211040}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60180}, {0x14, 0x90170120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x1b, 0x01011020}, {0x21, 0x02211010}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, {0x14, 0x90170110}, {0x1b, 0x90a70130}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, {0x14, 0x90170110}, {0x1b, 0x90a70130}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, {0x1a, 0x90a70130}, {0x1b, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC, {0x14, 0x90170110}, {0x19, 0x02a11020}, {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0274, 0x103c, "HP", ALC274_FIXUP_HP_HEADSET_MIC, {0x17, 0x90170110}, {0x19, 0x03a11030}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x15, 0x0421101f}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED, {0x12, 0x90a60140}, {0x14, 0x90170110}, {0x15, 0x0421101f}, {0x18, 0x02811030}, {0x1a, 0x04a1103f}, {0x1b, 0x02011020}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP 15 Touchsmart", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC282_STANDARD_PINS, {0x12, 0x99a30130}, {0x19, 0x03a11020}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC282_STANDARD_PINS, {0x12, 0x99a30130}, {0x19, 0x03a11020}, {0x21, 0x03211040}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC282_STANDARD_PINS, {0x12, 0x99a30130}, {0x19, 0x03a11030}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC282_STANDARD_PINS, {0x12, 0x99a30130}, {0x19, 0x04a11020}, {0x21, 0x0421101f}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED, ALC282_STANDARD_PINS, {0x12, 0x90a60140}, {0x19, 0x04a11030}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ALC282_STANDARD_PINS, {0x12, 0x90a609c0}, {0x18, 0x03a11830}, {0x19, 0x04a19831}, {0x1a, 0x0481303f}, {0x1b, 0x04211020}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ALC282_STANDARD_PINS, {0x12, 0x90a60940}, {0x18, 0x03a11830}, {0x19, 0x04a19831}, {0x1a, 0x0481303f}, {0x1b, 0x04211020}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ALC282_STANDARD_PINS, {0x12, 0x90a60130}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ALC282_STANDARD_PINS, {0x12, 0x90a60130}, {0x19, 0x03a11020}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x19, 0x04a11040}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, {0x14, 0x90170110}, {0x19, 0x04a11040}, {0x1d, 0x40600001}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, {0x14, 0x90170110}, {0x19, 0x04a11040}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_HEADSET_JACK, {0x14, 0x90170110}, {0x17, 0x90170111}, {0x19, 0x03a11030}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK, {0x17, 0x90170110}, {0x19, 0x03a11030}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK, {0x17, 0x90170110}, /* 0x231f with RTK I2S AMP */ {0x19, 0x04a11040}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60120}, {0x14, 0x90170110}, {0x21, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x15, 0x04211040}, {0x18, 0x90170112}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x15, 0x04211040}, {0x18, 0x90170110}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x15, 0x0421101f}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x15, 0x04211020}, {0x1a, 0x04a11040}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x14, 0x90170110}, {0x15, 0x04211020}, {0x1a, 0x04a11040}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x14, 0x90170110}, {0x15, 0x04211020}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x14, 0x90170110}, {0x15, 0x0421101f}, {0x1a, 0x04a11020}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x16, 0x01014020}, {0x19, 0x01a19030}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x16, 0x01014020}, {0x18, 0x02a19031}, {0x19, 0x01a1903e}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x13, 0x90a60140}, {0x16, 0x21014020}, {0x19, 0x21a19030}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x13, 0x90a60140}), SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE, {0x17, 0x90170110}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC, {0x14, 0x90170110}, {0x1b, 0x90a70130}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, {0x12, 0x90a60120}, {0x17, 0x90170110}, {0x21, 0x04211030}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170110}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170140}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170150}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_SPK_VOLUME, {0x12, 0xb7a60140}, {0x13, 0xb7a60150}, {0x17, 0x90170110}, {0x1a, 0x03011020}, {0x21, 0x03211030}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE, {0x12, 0xb7a60140}, {0x17, 0x90170110}, {0x1a, 0x03a11030}, {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x17, 0x90170110}), SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, {0x14, 0x01014010}, {0x17, 0x90170120}, {0x18, 0x02a11030}, {0x19, 0x02a1103f}, {0x21, 0x0221101f}), {} }; /* This is the fallback pin_fixup_tbl for alc269 family, to make the tbl match * more machines, don't need to match all valid pins, just need to match * all the pins defined in the tbl. Just because of this reason, it is possible * that a single machine matches multiple tbls, so there is one limitation: * at most one tbl is allowed to define for the same vendor and same codec */ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0256, 0x1025, "Acer", ALC2XX_FIXUP_HEADSET_MIC, {0x19, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, {0x19, 0x40000000}, {0x1b, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET, {0x19, 0x40000000}, {0x1b, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x19, 0x40000000}, {0x1a, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_LIMIT_INT_MIC_BOOST, {0x19, 0x40000000}, {0x1a, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_LIMIT_INT_MIC_BOOST, {0x19, 0x40000000}, {0x1a, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC, {0x19, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1558, "Clevo", ALC2XX_FIXUP_HEADSET_MIC, {0x19, 0x40000000}), {} }; static void alc269_fill_coef(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; int val; if (spec->codec_variant != ALC269_TYPE_ALC269VB) return; if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { alc_write_coef_idx(codec, 0xf, 0x960b); alc_write_coef_idx(codec, 0xe, 0x8817); } if ((alc_get_coef0(codec) & 0x00ff) == 0x016) { alc_write_coef_idx(codec, 0xf, 0x960b); alc_write_coef_idx(codec, 0xe, 0x8814); } if ((alc_get_coef0(codec) & 0x00ff) == 0x017) { /* Power up output pin */ alc_update_coef_idx(codec, 0x04, 0, 1<<11); } if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { val = alc_read_coef_idx(codec, 0xd); if (val != -1 && (val & 0x0c00) >> 10 != 0x1) { /* Capless ramp up clock control */ alc_write_coef_idx(codec, 0xd, val | (1<<10)); } val = alc_read_coef_idx(codec, 0x17); if (val != -1 && (val & 0x01c0) >> 6 != 0x4) { /* Class D power on reset */ alc_write_coef_idx(codec, 0x17, val | (1<<7)); } } /* HP */ alc_update_coef_idx(codec, 0x4, 0, 1<<11); } /* */ static int patch_alc269(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; spec->en_3kpull_low = true; codec->patch_ops.suspend = alc269_suspend; codec->patch_ops.resume = alc269_resume; spec->shutup = alc_default_shutup; spec->init_hook = alc_default_init; switch (codec->core.vendor_id) { case 0x10ec0269: spec->codec_variant = ALC269_TYPE_ALC269VA; switch (alc_get_coef0(codec) & 0x00f0) { case 0x0010: if (codec->bus->pci && codec->bus->pci->subsystem_vendor == 0x1025 && spec->cdefine.platform_type == 1) err = alc_codec_rename(codec, "ALC271X"); spec->codec_variant = ALC269_TYPE_ALC269VB; break; case 0x0020: if (codec->bus->pci && codec->bus->pci->subsystem_vendor == 0x17aa && codec->bus->pci->subsystem_device == 0x21f3) err = alc_codec_rename(codec, "ALC3202"); spec->codec_variant = ALC269_TYPE_ALC269VC; break; case 0x0030: spec->codec_variant = ALC269_TYPE_ALC269VD; break; default: alc_fix_pll_init(codec, 0x20, 0x04, 15); } if (err < 0) goto error; spec->shutup = alc269_shutup; spec->init_hook = alc269_fill_coef; alc269_fill_coef(codec); break; case 0x10ec0280: case 0x10ec0290: spec->codec_variant = ALC269_TYPE_ALC280; break; case 0x10ec0282: spec->codec_variant = ALC269_TYPE_ALC282; spec->shutup = alc282_shutup; spec->init_hook = alc282_init; break; case 0x10ec0233: case 0x10ec0283: spec->codec_variant = ALC269_TYPE_ALC283; spec->shutup = alc283_shutup; spec->init_hook = alc283_init; break; case 0x10ec0284: case 0x10ec0292: spec->codec_variant = ALC269_TYPE_ALC284; break; case 0x10ec0293: spec->codec_variant = ALC269_TYPE_ALC293; break; case 0x10ec0286: case 0x10ec0288: spec->codec_variant = ALC269_TYPE_ALC286; break; case 0x10ec0298: spec->codec_variant = ALC269_TYPE_ALC298; break; case 0x10ec0235: case 0x10ec0255: spec->codec_variant = ALC269_TYPE_ALC255; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; break; case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: case 0x19e58326: spec->codec_variant = ALC269_TYPE_ALC256; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ if (codec->core.vendor_id == 0x10ec0236 && codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: case 0x10ec0285: case 0x10ec0289: if (alc_get_coef0(codec) & 0x0010) spec->codec_variant = ALC269_TYPE_ALC245; else spec->codec_variant = ALC269_TYPE_ALC215; spec->shutup = alc225_shutup; spec->init_hook = alc225_init; spec->gen.mixer_nid = 0; break; case 0x10ec0225: case 0x10ec0295: case 0x10ec0299: spec->codec_variant = ALC269_TYPE_ALC225; spec->shutup = alc225_shutup; spec->init_hook = alc225_init; spec->gen.mixer_nid = 0; /* no loopback on ALC225, ALC295 and ALC299 */ break; case 0x10ec0287: spec->codec_variant = ALC269_TYPE_ALC287; spec->shutup = alc225_shutup; spec->init_hook = alc225_init; spec->gen.mixer_nid = 0; /* no loopback on ALC287 */ break; case 0x10ec0234: case 0x10ec0274: case 0x10ec0294: spec->codec_variant = ALC269_TYPE_ALC294; spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ spec->init_hook = alc294_init; break; case 0x10ec0300: spec->codec_variant = ALC269_TYPE_ALC300; spec->gen.mixer_nid = 0; /* no loopback on ALC300 */ break; case 0x10ec0623: spec->codec_variant = ALC269_TYPE_ALC623; break; case 0x10ec0700: case 0x10ec0701: case 0x10ec0703: case 0x10ec0711: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ spec->init_hook = alc294_init; break; } if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { spec->has_alc5505_dsp = 1; spec->init_hook = alc5505_dsp_init; } alc_pre_init(codec); snd_hda_pick_fixup(codec, alc269_fixup_models, alc269_fixup_tbl, alc269_fixups); /* FIXME: both TX300 and ROG Strix G17 have the same SSID, and * the quirk breaks the latter (bko#214101). * Clear the wrong entry. */ if (codec->fixup_id == ALC282_FIXUP_ASUS_TX300 && codec->core.vendor_id == 0x10ec0294) { codec_dbg(codec, "Clear wrong fixup for ASUS ROG Strix G17\n"); codec->fixup_id = HDA_FIXUP_ID_NOT_SET; } snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups, true); snd_hda_pick_pin_fixup(codec, alc269_fallback_pin_fixup_tbl, alc269_fixups, false); snd_hda_pick_fixup(codec, NULL, alc269_fixup_vendor_tbl, alc269_fixups); /* * Check whether ACPI describes companion amplifiers that require * component binding */ find_cirrus_companion_amps(codec); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); alc_auto_parse_customize_define(codec); if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x01; /* automatic parse from the BIOS config */ err = alc269_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid) { err = set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC861 */ static int alc861_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc861_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc861_ssids[] = { 0x0e, 0x0f, 0x0b, 0 }; return alc_parse_auto_config(codec, alc861_ignore, alc861_ssids); } /* Pin config fixes */ enum { ALC861_FIXUP_FSC_AMILO_PI1505, ALC861_FIXUP_AMP_VREF_0F, ALC861_FIXUP_NO_JACK_DETECT, ALC861_FIXUP_ASUS_A6RP, ALC660_FIXUP_ASUS_W7J, }; /* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */ static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; unsigned int val; if (action != HDA_FIXUP_ACT_INIT) return; val = snd_hda_codec_get_pin_target(codec, 0x0f); if (!(val & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN))) val |= AC_PINCTL_IN_EN; val |= AC_PINCTL_VREF_50; snd_hda_set_pin_ctl(codec, 0x0f, val); spec->gen.keep_vref_in_automute = 1; } /* suppress the jack-detection */ static void alc_fixup_no_jack_detect(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) codec->no_jack_detect = 1; } static const struct hda_fixup alc861_fixups[] = { [ALC861_FIXUP_FSC_AMILO_PI1505] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x0b, 0x0221101f }, /* HP */ { 0x0f, 0x90170310 }, /* speaker */ { } } }, [ALC861_FIXUP_AMP_VREF_0F] = { .type = HDA_FIXUP_FUNC, .v.func = alc861_fixup_asus_amp_vref_0f, }, [ALC861_FIXUP_NO_JACK_DETECT] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_jack_detect, }, [ALC861_FIXUP_ASUS_A6RP] = { .type = HDA_FIXUP_FUNC, .v.func = alc861_fixup_asus_amp_vref_0f, .chained = true, .chain_id = ALC861_FIXUP_NO_JACK_DETECT, }, [ALC660_FIXUP_ASUS_W7J] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { /* ASUS W7J needs a magic pin setup on unused NID 0x10 * for enabling outputs */ {0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24}, { } }, } }; static const struct hda_quirk alc861_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1253, "ASUS W7J", ALC660_FIXUP_ASUS_W7J), SND_PCI_QUIRK(0x1043, 0x1263, "ASUS Z35HL", ALC660_FIXUP_ASUS_W7J), SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505), {} }; /* */ static int patch_alc861(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x15); if (err < 0) return err; spec = codec->spec; if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x23; spec->power_hook = alc_power_eapd; alc_pre_init(codec); snd_hda_pick_fixup(codec, NULL, alc861_fixup_tbl, alc861_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); /* automatic parse from the BIOS config */ err = alc861_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog) { err = set_beep_amp(spec, 0x23, 0, HDA_OUTPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC861-VD support * * Based on ALC882 * * In addition, an independent DAC */ static int alc861vd_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc861vd_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc861vd_ssids[] = { 0x15, 0x1b, 0x14, 0 }; return alc_parse_auto_config(codec, alc861vd_ignore, alc861vd_ssids); } enum { ALC660VD_FIX_ASUS_GPIO1, ALC861VD_FIX_DALLAS, }; /* exclude VREF80 */ static void alc861vd_fixup_dallas(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_PRE_PROBE) { snd_hda_override_pin_caps(codec, 0x18, 0x00000734); snd_hda_override_pin_caps(codec, 0x19, 0x0000073c); } } /* reset GPIO1 */ static void alc660vd_fixup_asus_gpio1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) spec->gpio_mask |= 0x02; alc_fixup_gpio(codec, action, 0x01); } static const struct hda_fixup alc861vd_fixups[] = { [ALC660VD_FIX_ASUS_GPIO1] = { .type = HDA_FIXUP_FUNC, .v.func = alc660vd_fixup_asus_gpio1, }, [ALC861VD_FIX_DALLAS] = { .type = HDA_FIXUP_FUNC, .v.func = alc861vd_fixup_dallas, }, }; static const struct hda_quirk alc861vd_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_FIX_DALLAS), SND_PCI_QUIRK(0x1043, 0x1339, "ASUS A7-K", ALC660VD_FIX_ASUS_GPIO1), SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba L30-149", ALC861VD_FIX_DALLAS), {} }; /* */ static int patch_alc861vd(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x23; spec->shutup = alc_eapd_shutup; alc_pre_init(codec); snd_hda_pick_fixup(codec, NULL, alc861vd_fixup_tbl, alc861vd_fixups); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); /* automatic parse from the BIOS config */ err = alc861vd_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog) { err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC662 support * * ALC662 is almost identical with ALC880 but has cleaner and more flexible * configuration. Each pin widget can choose any input DACs and a mixer. * Each ADC is connected from a mixer of all inputs. This makes possible * 6-channel independent captures. * * In addition, an independent DAC for the multi-playback (not used in this * driver yet). */ /* * BIOS auto configuration */ static int alc662_parse_auto_config(struct hda_codec *codec) { static const hda_nid_t alc662_ignore[] = { 0x1d, 0 }; static const hda_nid_t alc663_ssids[] = { 0x15, 0x1b, 0x14, 0x21 }; static const hda_nid_t alc662_ssids[] = { 0x15, 0x1b, 0x14, 0 }; const hda_nid_t *ssids; if (codec->core.vendor_id == 0x10ec0272 || codec->core.vendor_id == 0x10ec0663 || codec->core.vendor_id == 0x10ec0665 || codec->core.vendor_id == 0x10ec0670 || codec->core.vendor_id == 0x10ec0671) ssids = alc663_ssids; else ssids = alc662_ssids; return alc_parse_auto_config(codec, alc662_ignore, ssids); } static void alc272_fixup_mario(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action != HDA_FIXUP_ACT_PRE_PROBE) return; if (snd_hda_override_amp_caps(codec, 0x2, HDA_OUTPUT, (0x3b << AC_AMPCAP_OFFSET_SHIFT) | (0x3b << AC_AMPCAP_NUM_STEPS_SHIFT) | (0x03 << AC_AMPCAP_STEP_SIZE_SHIFT) | (0 << AC_AMPCAP_MUTE_SHIFT))) codec_warn(codec, "failed to override amp caps for NID 0x2\n"); } static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = { { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, SNDRV_CHMAP_LFE } }, /* LFE only on right */ { } }; /* override the 2.1 chmap */ static void alc_fixup_bass_chmap(struct hda_codec *codec, const struct hda_fixup *fix, int action) { if (action == HDA_FIXUP_ACT_BUILD) { struct alc_spec *spec = codec->spec; spec->gen.pcm_rec[0]->stream[0].chmap = asus_pcm_2_1_chmaps; } } /* avoid D3 for keeping GPIO up */ static unsigned int gpio_led_power_filter(struct hda_codec *codec, hda_nid_t nid, unsigned int power_state) { struct alc_spec *spec = codec->spec; if (nid == codec->core.afg && power_state == AC_PWRST_D3 && spec->gpio_data) return AC_PWRST_D0; return power_state; } static void alc662_fixup_led_gpio1(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; alc_fixup_hp_gpio_led(codec, action, 0x01, 0); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->mute_led_polarity = 1; codec->power_filter = gpio_led_power_filter; } } static void alc662_usi_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; int vref; msleep(200); snd_hda_gen_hp_automute(codec, jack); vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0; msleep(100); snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, vref); } static void alc662_fixup_usi_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_automute_hook = alc662_usi_automute_hook; } } static void alc662_aspire_ethos_mute_speakers(struct hda_codec *codec, struct hda_jack_callback *cb) { /* surround speakers at 0x1b already get muted automatically when * headphones are plugged in, but we have to mute/unmute the remaining * channels manually: * 0x15 - front left/front right * 0x18 - front center/ LFE */ if (snd_hda_jack_detect_state(codec, 0x1b) == HDA_JACK_PRESENT) { snd_hda_set_pin_ctl_cache(codec, 0x15, 0); snd_hda_set_pin_ctl_cache(codec, 0x18, 0); } else { snd_hda_set_pin_ctl_cache(codec, 0x15, PIN_OUT); snd_hda_set_pin_ctl_cache(codec, 0x18, PIN_OUT); } } static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec, const struct hda_fixup *fix, int action) { /* Pin 0x1b: shared headphones jack and surround speakers */ if (!is_jack_detectable(codec, 0x1b)) return; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: snd_hda_jack_detect_enable_callback(codec, 0x1b, alc662_aspire_ethos_mute_speakers); /* subwoofer needs an extra GPIO setting to become audible */ alc_setup_gpio(codec, 0x02); break; case HDA_FIXUP_ACT_INIT: /* Make sure to start in a correct state, i.e. if * headphones have been plugged in before powering up the system */ alc662_aspire_ethos_mute_speakers(codec, NULL); break; } } static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; static const struct hda_pintbl pincfgs[] = { { 0x19, 0x02a11040 }, /* use as headset mic, with its own jack detect */ { 0x1b, 0x0181304f }, { } }; switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: spec->gen.mixer_nid = 0; spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; snd_hda_apply_pincfgs(codec, pincfgs); break; case HDA_FIXUP_ACT_INIT: alc_write_coef_idx(codec, 0x19, 0xa054); break; } } static void alc897_hp_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) { struct alc_spec *spec = codec->spec; int vref; snd_hda_gen_hp_automute(codec, jack); vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP; snd_hda_set_pin_ctl(codec, 0x1b, vref); } static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->gen.hp_automute_hook = alc897_hp_automute_hook; spec->no_shutup_pins = 1; } if (action == HDA_FIXUP_ACT_PROBE) { snd_hda_set_pin_ctl_cache(codec, 0x1a, PIN_IN | AC_PINCTL_VREF_100); } } static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_automute_hook = alc897_hp_automute_hook; } } static const struct coef_fw alc668_coefs[] = { WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b, 0x0), WRITE_COEF(0x0c, 0x7cf7), WRITE_COEF(0x0d, 0x1080), WRITE_COEF(0x0e, 0x7f7f), WRITE_COEF(0x0f, 0xcccc), WRITE_COEF(0x10, 0xddcc), WRITE_COEF(0x11, 0x0001), WRITE_COEF(0x13, 0x0), WRITE_COEF(0x14, 0x2aa0), WRITE_COEF(0x17, 0xa940), WRITE_COEF(0x19, 0x0), WRITE_COEF(0x1a, 0x0), WRITE_COEF(0x1b, 0x0), WRITE_COEF(0x1c, 0x0), WRITE_COEF(0x1d, 0x0), WRITE_COEF(0x1e, 0x7418), WRITE_COEF(0x1f, 0x0804), WRITE_COEF(0x20, 0x4200), WRITE_COEF(0x21, 0x0468), WRITE_COEF(0x22, 0x8ccc), WRITE_COEF(0x23, 0x0250), WRITE_COEF(0x24, 0x7418), WRITE_COEF(0x27, 0x0), WRITE_COEF(0x28, 0x8ccc), WRITE_COEF(0x2a, 0xff00), WRITE_COEF(0x2b, 0x8000), WRITE_COEF(0xa7, 0xff00), WRITE_COEF(0xa8, 0x8000), WRITE_COEF(0xaa, 0x2e17), WRITE_COEF(0xab, 0xa0c0), WRITE_COEF(0xac, 0x0), WRITE_COEF(0xad, 0x0), WRITE_COEF(0xae, 0x2ac6), WRITE_COEF(0xaf, 0xa480), WRITE_COEF(0xb0, 0x0), WRITE_COEF(0xb1, 0x0), WRITE_COEF(0xb2, 0x0), WRITE_COEF(0xb3, 0x0), WRITE_COEF(0xb4, 0x0), WRITE_COEF(0xb5, 0x1040), WRITE_COEF(0xb6, 0xd697), WRITE_COEF(0xb7, 0x902b), WRITE_COEF(0xb8, 0xd697), WRITE_COEF(0xb9, 0x902b), WRITE_COEF(0xba, 0xb8ba), WRITE_COEF(0xbb, 0xaaab), WRITE_COEF(0xbc, 0xaaaf), WRITE_COEF(0xbd, 0x6aaa), WRITE_COEF(0xbe, 0x1c02), WRITE_COEF(0xc0, 0x00ff), WRITE_COEF(0xc1, 0x0fa6), {} }; static void alc668_restore_default_value(struct hda_codec *codec) { alc_process_coef_fw(codec, alc668_coefs); } enum { ALC662_FIXUP_ASPIRE, ALC662_FIXUP_LED_GPIO1, ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, ALC662_FIXUP_CZC_ET26, ALC662_FIXUP_CZC_P10T, ALC662_FIXUP_SKU_IGNORE, ALC662_FIXUP_HP_RP5800, ALC662_FIXUP_ASUS_MODE1, ALC662_FIXUP_ASUS_MODE2, ALC662_FIXUP_ASUS_MODE3, ALC662_FIXUP_ASUS_MODE4, ALC662_FIXUP_ASUS_MODE5, ALC662_FIXUP_ASUS_MODE6, ALC662_FIXUP_ASUS_MODE7, ALC662_FIXUP_ASUS_MODE8, ALC662_FIXUP_NO_JACK_DETECT, ALC662_FIXUP_ZOTAC_Z68, ALC662_FIXUP_INV_DMIC, ALC662_FIXUP_DELL_MIC_NO_PRESENCE, ALC668_FIXUP_DELL_MIC_NO_PRESENCE, ALC662_FIXUP_HEADSET_MODE, ALC668_FIXUP_HEADSET_MODE, ALC662_FIXUP_BASS_MODE4_CHMAP, ALC662_FIXUP_BASS_16, ALC662_FIXUP_BASS_1A, ALC662_FIXUP_BASS_CHMAP, ALC668_FIXUP_AUTO_MUTE, ALC668_FIXUP_DELL_DISABLE_AAMIX, ALC668_FIXUP_DELL_XPS13, ALC662_FIXUP_ASUS_Nx50, ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE, ALC668_FIXUP_ASUS_Nx51, ALC668_FIXUP_MIC_COEF, ALC668_FIXUP_ASUS_G751, ALC891_FIXUP_HEADSET_MODE, ALC891_FIXUP_DELL_MIC_NO_PRESENCE, ALC662_FIXUP_ACER_VERITON, ALC892_FIXUP_ASROCK_MOBO, ALC662_FIXUP_USI_FUNC, ALC662_FIXUP_USI_HEADSET_MODE, ALC662_FIXUP_LENOVO_MULTI_CODECS, ALC669_FIXUP_ACER_ASPIRE_ETHOS, ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET, ALC671_FIXUP_HP_HEADSET_MIC2, ALC662_FIXUP_ACER_X2660G_HEADSET_MODE, ALC662_FIXUP_ACER_NITRO_HEADSET_MODE, ALC668_FIXUP_ASUS_NO_HEADSET_MIC, ALC668_FIXUP_HEADSET_MIC, ALC668_FIXUP_MIC_DET_COEF, ALC897_FIXUP_LENOVO_HEADSET_MIC, ALC897_FIXUP_HEADSET_MIC_PIN, ALC897_FIXUP_HP_HSMIC_VERB, ALC897_FIXUP_LENOVO_HEADSET_MODE, ALC897_FIXUP_HEADSET_MIC_PIN2, ALC897_FIXUP_UNIS_H3C_X500S, ALC897_FIXUP_HEADSET_MIC_PIN3, }; static const struct hda_fixup alc662_fixups[] = { [ALC662_FIXUP_ASPIRE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x99130112 }, /* subwoofer */ { } } }, [ALC662_FIXUP_LED_GPIO1] = { .type = HDA_FIXUP_FUNC, .v.func = alc662_fixup_led_gpio1, }, [ALC662_FIXUP_IDEAPAD] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x17, 0x99130112 }, /* subwoofer */ { } }, .chained = true, .chain_id = ALC662_FIXUP_LED_GPIO1, }, [ALC272_FIXUP_MARIO] = { .type = HDA_FIXUP_FUNC, .v.func = alc272_fixup_mario, }, [ALC662_FIXUP_CZC_ET26] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { {0x12, 0x403cc000}, {0x14, 0x90170110}, /* speaker */ {0x15, 0x411111f0}, {0x16, 0x411111f0}, {0x18, 0x01a19030}, /* mic */ {0x19, 0x90a7013f}, /* int-mic */ {0x1a, 0x01014020}, {0x1b, 0x0121401f}, {0x1c, 0x411111f0}, {0x1d, 0x411111f0}, {0x1e, 0x40478e35}, {} }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_CZC_P10T] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0}, {} } }, [ALC662_FIXUP_SKU_IGNORE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_sku_ignore, }, [ALC662_FIXUP_HP_RP5800] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x0221201f }, /* HP out */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE1] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x18, 0x01a19c20 }, /* mic */ { 0x19, 0x99a3092f }, /* int-mic */ { 0x21, 0x0121401f }, /* HP out */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x18, 0x01a19820 }, /* mic */ { 0x19, 0x99a3092f }, /* int-mic */ { 0x1b, 0x0121401f }, /* HP out */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE3] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0121441f }, /* HP */ { 0x18, 0x01a19840 }, /* mic */ { 0x19, 0x99a3094f }, /* int-mic */ { 0x21, 0x01211420 }, /* HP2 */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE4] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x16, 0x99130111 }, /* speaker */ { 0x18, 0x01a19840 }, /* mic */ { 0x19, 0x99a3094f }, /* int-mic */ { 0x21, 0x0121441f }, /* HP */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE5] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x0121441f }, /* HP */ { 0x16, 0x99130111 }, /* speaker */ { 0x18, 0x01a19840 }, /* mic */ { 0x19, 0x99a3094f }, /* int-mic */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE6] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x15, 0x01211420 }, /* HP2 */ { 0x18, 0x01a19840 }, /* mic */ { 0x19, 0x99a3094f }, /* int-mic */ { 0x1b, 0x0121441f }, /* HP */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE7] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x17, 0x99130111 }, /* speaker */ { 0x18, 0x01a19840 }, /* mic */ { 0x19, 0x99a3094f }, /* int-mic */ { 0x1b, 0x01214020 }, /* HP */ { 0x21, 0x0121401f }, /* HP */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_ASUS_MODE8] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x14, 0x99130110 }, /* speaker */ { 0x12, 0x99a30970 }, /* int-mic */ { 0x15, 0x01214020 }, /* HP */ { 0x17, 0x99130111 }, /* speaker */ { 0x18, 0x01a19840 }, /* mic */ { 0x21, 0x0121401f }, /* HP */ { } }, .chained = true, .chain_id = ALC662_FIXUP_SKU_IGNORE }, [ALC662_FIXUP_NO_JACK_DETECT] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_no_jack_detect, }, [ALC662_FIXUP_ZOTAC_Z68] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x02214020 }, /* Front HP */ { } } }, [ALC662_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, }, [ALC668_FIXUP_DELL_XPS13] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_xps13, .chained = true, .chain_id = ALC668_FIXUP_DELL_DISABLE_AAMIX }, [ALC668_FIXUP_DELL_DISABLE_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, .chained = true, .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE }, [ALC668_FIXUP_AUTO_MUTE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_auto_mute_via_amp, .chained = true, .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE }, [ALC662_FIXUP_DELL_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ /* headphone mic by setting pin control of 0x1b (headphone out) to in + vref_50 */ { } }, .chained = true, .chain_id = ALC662_FIXUP_HEADSET_MODE }, [ALC662_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc662, }, [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */ { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC668_FIXUP_HEADSET_MODE }, [ALC668_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc668, }, [ALC662_FIXUP_BASS_MODE4_CHMAP] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_bass_chmap, .chained = true, .chain_id = ALC662_FIXUP_ASUS_MODE4 }, [ALC662_FIXUP_BASS_16] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { {0x16, 0x80106111}, /* bass speaker */ {} }, .chained = true, .chain_id = ALC662_FIXUP_BASS_CHMAP, }, [ALC662_FIXUP_BASS_1A] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { {0x1a, 0x80106111}, /* bass speaker */ {} }, .chained = true, .chain_id = ALC662_FIXUP_BASS_CHMAP, }, [ALC662_FIXUP_BASS_CHMAP] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_bass_chmap, }, [ALC662_FIXUP_ASUS_Nx50] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_auto_mute_via_amp, .chained = true, .chain_id = ALC662_FIXUP_BASS_1A }, [ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc668, .chain_id = ALC662_FIXUP_BASS_CHMAP }, [ALC668_FIXUP_ASUS_Nx51] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */ { 0x1a, 0x90170151 }, /* bass speaker */ { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */ {} }, .chained = true, .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE, }, [ALC668_FIXUP_MIC_COEF] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0xc3 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x4000 }, {} }, }, [ALC668_FIXUP_ASUS_G751] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x0421101f }, /* HP */ {} }, .chained = true, .chain_id = ALC668_FIXUP_MIC_COEF }, [ALC891_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, }, [ALC891_FIXUP_DELL_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */ { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC891_FIXUP_HEADSET_MODE }, [ALC662_FIXUP_ACER_VERITON] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x50170120 }, /* no internal speaker */ { } } }, [ALC892_FIXUP_ASROCK_MOBO] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x40f000f0 }, /* disabled */ { 0x16, 0x40f000f0 }, /* disabled */ { } } }, [ALC662_FIXUP_USI_FUNC] = { .type = HDA_FIXUP_FUNC, .v.func = alc662_fixup_usi_headset_mic, }, [ALC662_FIXUP_USI_HEADSET_MODE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x02a1913c }, /* use as headset mic, without its own jack detect */ { 0x18, 0x01a1903d }, { } }, .chained = true, .chain_id = ALC662_FIXUP_USI_FUNC }, [ALC662_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, }, [ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET] = { .type = HDA_FIXUP_FUNC, .v.func = alc662_fixup_aspire_ethos_hp, }, [ALC669_FIXUP_ACER_ASPIRE_ETHOS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x92130110 }, /* front speakers */ { 0x18, 0x99130111 }, /* center/subwoofer */ { 0x1b, 0x11130012 }, /* surround plus jack for HP */ { } }, .chained = true, .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET }, [ALC671_FIXUP_HP_HEADSET_MIC2] = { .type = HDA_FIXUP_FUNC, .v.func = alc671_fixup_hp_headset_mic2, }, [ALC662_FIXUP_ACER_X2660G_HEADSET_MODE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x02a1113c }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC662_FIXUP_USI_FUNC }, [ALC662_FIXUP_ACER_NITRO_HEADSET_MODE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ { 0x1b, 0x0221144f }, { } }, .chained = true, .chain_id = ALC662_FIXUP_USI_FUNC }, [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1b, 0x04a1112c }, { } }, .chained = true, .chain_id = ALC668_FIXUP_HEADSET_MIC }, [ALC668_FIXUP_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_headset_mic, .chained = true, .chain_id = ALC668_FIXUP_MIC_DET_COEF }, [ALC668_FIXUP_MIC_DET_COEF] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 }, {} }, }, [ALC897_FIXUP_LENOVO_HEADSET_MIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc897_fixup_lenovo_headset_mic, }, [ALC897_FIXUP_HEADSET_MIC_PIN] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x03a11050 }, { } }, .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC }, [ALC897_FIXUP_HP_HSMIC_VERB] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, }, [ALC897_FIXUP_LENOVO_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc897_fixup_lenovo_headset_mode, }, [ALC897_FIXUP_HEADSET_MIC_PIN2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */ { } }, .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE }, [ALC897_FIXUP_UNIS_H3C_X500S] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 }, {} }, }, [ALC897_FIXUP_HEADSET_MIC_PIN3] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11050 }, /* use as headset mic */ { } }, }, }; static const struct hda_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3), SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS), SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE), SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13), SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13), SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13), SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50), SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751), SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16), SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51), SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51), SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8), SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16), SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x1064, "Lenovo P3 Tower", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), SND_PCI_QUIRK(0x1c6c, 0x1239, "Compaq N14JP6-V2", ALC897_FIXUP_HP_HSMIC_VERB), #if 0 /* Below is a quirk table taken from the old code. * Basically the device should work as is without the fixup table. * If BIOS doesn't give a proper info, enable the corresponding * fixup entry. */ SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x11c3, "ASUS M70V", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x11d3, "ASUS NB", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x11f3, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1203, "ASUS NB", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1303, "ASUS G60J", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1333, "ASUS G60Jx", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1339, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x13e3, "ASUS N71JA", ALC662_FIXUP_ASUS_MODE7), SND_PCI_QUIRK(0x1043, 0x1463, "ASUS N71", ALC662_FIXUP_ASUS_MODE7), SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G72", ALC662_FIXUP_ASUS_MODE8), SND_PCI_QUIRK(0x1043, 0x1563, "ASUS N90", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x15d3, "ASUS N50SF F50SF", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x16c3, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x16f3, "ASUS K40C K50C", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1733, "ASUS N81De", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1753, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1763, "ASUS NB", ALC662_FIXUP_ASUS_MODE6), SND_PCI_QUIRK(0x1043, 0x1765, "ASUS NB", ALC662_FIXUP_ASUS_MODE6), SND_PCI_QUIRK(0x1043, 0x1783, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1793, "ASUS F50GX", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x17b3, "ASUS F70SL", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x17f3, "ASUS X58LE", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1813, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1823, "ASUS NB", ALC662_FIXUP_ASUS_MODE5), SND_PCI_QUIRK(0x1043, 0x1833, "ASUS NB", ALC662_FIXUP_ASUS_MODE6), SND_PCI_QUIRK(0x1043, 0x1843, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1853, "ASUS F50Z", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1864, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1876, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1893, "ASUS M50Vm", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x1894, "ASUS X55", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x18b3, "ASUS N80Vc", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x18c3, "ASUS VX5", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS N81Te", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x18f3, "ASUS N505Tp", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1903, "ASUS F5GL", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1913, "ASUS NB", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1933, "ASUS F80Q", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x1943, "ASUS Vx3V", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1953, "ASUS NB", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71C", ALC662_FIXUP_ASUS_MODE3), SND_PCI_QUIRK(0x1043, 0x1983, "ASUS N5051A", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x1993, "ASUS N20", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x19b3, "ASUS F7Z", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x19c3, "ASUS F5Z/F6x", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x1043, 0x19e3, "ASUS NB", ALC662_FIXUP_ASUS_MODE1), SND_PCI_QUIRK(0x1043, 0x19f3, "ASUS NB", ALC662_FIXUP_ASUS_MODE4), #endif {} }; static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_ASPIRE, .name = "aspire"}, {.id = ALC662_FIXUP_IDEAPAD, .name = "ideapad"}, {.id = ALC272_FIXUP_MARIO, .name = "mario"}, {.id = ALC662_FIXUP_HP_RP5800, .name = "hp-rp5800"}, {.id = ALC662_FIXUP_ASUS_MODE1, .name = "asus-mode1"}, {.id = ALC662_FIXUP_ASUS_MODE2, .name = "asus-mode2"}, {.id = ALC662_FIXUP_ASUS_MODE3, .name = "asus-mode3"}, {.id = ALC662_FIXUP_ASUS_MODE4, .name = "asus-mode4"}, {.id = ALC662_FIXUP_ASUS_MODE5, .name = "asus-mode5"}, {.id = ALC662_FIXUP_ASUS_MODE6, .name = "asus-mode6"}, {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"}, {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, {.id = ALC662_FIXUP_ZOTAC_Z68, .name = "zotac-z68"}, {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, {.id = ALC662_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc662-headset-multi"}, {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, {.id = ALC662_FIXUP_HEADSET_MODE, .name = "alc662-headset"}, {.id = ALC668_FIXUP_HEADSET_MODE, .name = "alc668-headset"}, {.id = ALC662_FIXUP_BASS_16, .name = "bass16"}, {.id = ALC662_FIXUP_BASS_1A, .name = "bass1a"}, {.id = ALC668_FIXUP_AUTO_MUTE, .name = "automute"}, {.id = ALC668_FIXUP_DELL_XPS13, .name = "dell-xps13"}, {.id = ALC662_FIXUP_ASUS_Nx50, .name = "asus-nx50"}, {.id = ALC668_FIXUP_ASUS_Nx51, .name = "asus-nx51"}, {.id = ALC668_FIXUP_ASUS_G751, .name = "asus-g751"}, {.id = ALC891_FIXUP_HEADSET_MODE, .name = "alc891-headset"}, {.id = ALC891_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc891-headset-multi"}, {.id = ALC662_FIXUP_ACER_VERITON, .name = "acer-veriton"}, {.id = ALC892_FIXUP_ASROCK_MOBO, .name = "asrock-mobo"}, {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"}, {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"}, {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"}, {} }; static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE, {0x17, 0x02211010}, {0x18, 0x01a19030}, {0x1a, 0x01813040}, {0x21, 0x01014020}), SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE, {0x16, 0x01813030}, {0x17, 0x02211010}, {0x18, 0x01a19040}, {0x21, 0x01014020}), SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE, {0x14, 0x01014010}, {0x18, 0x01a19020}, {0x1a, 0x0181302f}, {0x1b, 0x0221401f}), SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, {0x12, 0x99a30130}, {0x14, 0x90170110}, {0x15, 0x0321101f}, {0x16, 0x03011020}), SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, {0x12, 0x99a30140}, {0x14, 0x90170110}, {0x15, 0x0321101f}, {0x16, 0x03011020}), SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, {0x12, 0x99a30150}, {0x14, 0x90170110}, {0x15, 0x0321101f}, {0x16, 0x03011020}), SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE, {0x14, 0x90170110}, {0x15, 0x0321101f}, {0x16, 0x03011020}), SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x15, 0x0321101f}), SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, {0x14, 0x01014010}, {0x17, 0x90170150}, {0x19, 0x02a11060}, {0x1b, 0x01813030}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, {0x14, 0x01014010}, {0x18, 0x01a19040}, {0x1b, 0x01813030}, {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2, {0x14, 0x01014020}, {0x17, 0x90170110}, {0x18, 0x01a19050}, {0x1b, 0x01813040}, {0x21, 0x02211030}), {} }; /* */ static int patch_alc662(struct hda_codec *codec) { struct alc_spec *spec; int err; err = alc_alloc_spec(codec, 0x0b); if (err < 0) return err; spec = codec->spec; spec->shutup = alc_eapd_shutup; /* handle multiple HPs as is */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; alc_fix_pll_init(codec, 0x20, 0x04, 15); switch (codec->core.vendor_id) { case 0x10ec0668: spec->init_hook = alc668_restore_default_value; break; } alc_pre_init(codec); snd_hda_pick_fixup(codec, alc662_fixup_models, alc662_fixup_tbl, alc662_fixups); snd_hda_pick_pin_fixup(codec, alc662_pin_fixup_tbl, alc662_fixups, true); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); alc_auto_parse_customize_define(codec); if (has_cdefine_beep(codec)) spec->gen.beep_nid = 0x01; if ((alc_get_coef0(codec) & (1 << 14)) && codec->bus->pci && codec->bus->pci->subsystem_vendor == 0x1025 && spec->cdefine.platform_type == 1) { err = alc_codec_rename(codec, "ALC272X"); if (err < 0) goto error; } /* automatic parse from the BIOS config */ err = alc662_parse_auto_config(codec); if (err < 0) goto error; if (!spec->gen.no_analog && spec->gen.beep_nid) { switch (codec->core.vendor_id) { case 0x10ec0662: err = set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); break; case 0x10ec0272: case 0x10ec0663: case 0x10ec0665: case 0x10ec0668: err = set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); break; case 0x10ec0273: err = set_beep_amp(spec, 0x0b, 0x03, HDA_INPUT); break; } if (err < 0) goto error; } snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE); return 0; error: alc_free(codec); return err; } /* * ALC680 support */ static int alc680_parse_auto_config(struct hda_codec *codec) { return alc_parse_auto_config(codec, NULL, NULL); } /* */ static int patch_alc680(struct hda_codec *codec) { int err; /* ALC680 has no aa-loopback mixer */ err = alc_alloc_spec(codec, 0); if (err < 0) return err; /* automatic parse from the BIOS config */ err = alc680_parse_auto_config(codec); if (err < 0) { alc_free(codec); return err; } return 0; } /* * patch entries */ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269), HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269), HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), HDA_CODEC_ENTRY(0x10ec0230, "ALC236", patch_alc269), HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269), HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262), HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268), HDA_CODEC_ENTRY(0x10ec0268, "ALC268", patch_alc268), HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269), HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269), HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662), HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269), HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269), HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269), HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269), HDA_CODEC_ENTRY(0x10ec0282, "ALC282", patch_alc269), HDA_CODEC_ENTRY(0x10ec0283, "ALC283", patch_alc269), HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269), HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269), HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269), HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269), HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269), HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269), HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269), HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269), HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269), HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269), HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269), HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269), HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269), HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), HDA_CODEC_ENTRY(0x10ec0862, "ALC861-VD", patch_alc861vd), HDA_CODEC_REV_ENTRY(0x10ec0662, 0x100002, "ALC662 rev2", patch_alc882), HDA_CODEC_REV_ENTRY(0x10ec0662, 0x100101, "ALC662 rev1", patch_alc662), HDA_CODEC_REV_ENTRY(0x10ec0662, 0x100300, "ALC662 rev3", patch_alc662), HDA_CODEC_ENTRY(0x10ec0663, "ALC663", patch_alc662), HDA_CODEC_ENTRY(0x10ec0665, "ALC665", patch_alc662), HDA_CODEC_ENTRY(0x10ec0667, "ALC667", patch_alc662), HDA_CODEC_ENTRY(0x10ec0668, "ALC668", patch_alc662), HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269), HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269), HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269), HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269), HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662), HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), HDA_CODEC_ENTRY(0x10ec0883, "ALC883", patch_alc882), HDA_CODEC_REV_ENTRY(0x10ec0885, 0x100101, "ALC889A", patch_alc882), HDA_CODEC_REV_ENTRY(0x10ec0885, 0x100103, "ALC889A", patch_alc882), HDA_CODEC_ENTRY(0x10ec0885, "ALC885", patch_alc882), HDA_CODEC_ENTRY(0x10ec0887, "ALC887", patch_alc882), HDA_CODEC_REV_ENTRY(0x10ec0888, 0x100101, "ALC1200", patch_alc882), HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882), HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882), HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662), HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269), {} /* terminator */ }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek HD-audio codec"); MODULE_IMPORT_NS("SND_HDA_SCODEC_COMPONENT"); static struct hda_codec_driver realtek_driver = { .id = snd_hda_id_realtek, }; module_hda_codec_driver(realtek_driver);
// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments' K3 Interrupt Aggregator MSI bus * * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ * Lokesh Vutla <[email protected]> */ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/soc/ti/ti_sci_inta_msi.h> #include <linux/soc/ti/ti_sci_protocol.h> static void ti_sci_inta_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { /* Nothing to do */ } static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { /* Nothing to do */ } static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info) { struct irq_chip *chip = info->chip; if (WARN_ON(!chip)) return; chip->irq_request_resources = irq_chip_request_resources_parent; chip->irq_release_resources = irq_chip_release_resources_parent; chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg; chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg; chip->irq_set_type = irq_chip_set_type_parent; chip->irq_unmask = irq_chip_unmask_parent; chip->irq_mask = irq_chip_mask_parent; chip->irq_ack = irq_chip_ack_parent; } struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { struct irq_domain *domain; ti_sci_inta_msi_update_chip_ops(info); info->flags |= MSI_FLAG_FREE_MSI_DESCS; domain = msi_create_irq_domain(fwnode, info, parent); if (domain) irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI); return domain; } EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain); static int ti_sci_inta_msi_alloc_descs(struct device *dev, struct ti_sci_resource *res) { struct msi_desc msi_desc; int set, i, count = 0; memset(&msi_desc, 0, sizeof(msi_desc)); msi_desc.nvec_used = 1; for (set = 0; set < res->sets; set++) { for (i = 0; i < res->desc[set].num; i++, count++) { msi_desc.msi_index = res->desc[set].start + i; if (msi_insert_msi_desc(dev, &msi_desc)) goto fail; } for (i = 0; i < res->desc[set].num_sec; i++, count++) { msi_desc.msi_index = res->desc[set].start_sec + i; if (msi_insert_msi_desc(dev, &msi_desc)) goto fail; } } return count; fail: msi_free_msi_descs(dev); return -ENOMEM; } int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, struct ti_sci_resource *res) { struct platform_device *pdev = to_platform_device(dev); int ret, nvec; if (pdev->id < 0) return -ENODEV; ret = msi_setup_device_data(dev); if (ret) return ret; msi_lock_descs(dev); nvec = ti_sci_inta_msi_alloc_descs(dev, res); if (nvec <= 0) { ret = nvec; goto unlock; } /* Use alloc ALL as it's unclear whether there are gaps in the indices */ ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec); if (ret) dev_err(dev, "Failed to allocate IRQs %d\n", ret); unlock: msi_unlock_descs(dev); return ret; } EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
// SPDX-License-Identifier: GPL-2.0-only /* * wm8988.c -- WM8988 ALSA SoC audio driver * * Copyright 2009 Wolfson Microelectronics plc * Copyright 2005 Openedhand Ltd. * * Author: Mark Brown <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8988.h" /* * wm8988 register cache * We can't read the WM8988 register space when we * are using 2 wire for device control, so we cache them instead. */ static const struct reg_default wm8988_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 5, 0x0008 }, { 7, 0x000a }, { 8, 0x0000 }, { 10, 0x00ff }, { 11, 0x00ff }, { 12, 0x000f }, { 13, 0x000f }, { 16, 0x0000 }, { 17, 0x007b }, { 18, 0x0000 }, { 19, 0x0032 }, { 20, 0x0000 }, { 21, 0x00c3 }, { 22, 0x00c3 }, { 23, 0x00c0 }, { 24, 0x0000 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 }, { 31, 0x0000 }, { 32, 0x0000 }, { 33, 0x0000 }, { 34, 0x0050 }, { 35, 0x0050 }, { 36, 0x0050 }, { 37, 0x0050 }, { 40, 0x0079 }, { 41, 0x0079 }, { 42, 0x0079 }, }; static bool wm8988_writeable(struct device *dev, unsigned int reg) { switch (reg) { case WM8988_LINVOL: case WM8988_RINVOL: case WM8988_LOUT1V: case WM8988_ROUT1V: case WM8988_ADCDAC: case WM8988_IFACE: case WM8988_SRATE: case WM8988_LDAC: case WM8988_RDAC: case WM8988_BASS: case WM8988_TREBLE: case WM8988_RESET: case WM8988_3D: case WM8988_ALC1: case WM8988_ALC2: case WM8988_ALC3: case WM8988_NGATE: case WM8988_LADC: case WM8988_RADC: case WM8988_ADCTL1: case WM8988_ADCTL2: case WM8988_PWR1: case WM8988_PWR2: case WM8988_ADCTL3: case WM8988_ADCIN: case WM8988_LADCIN: case WM8988_RADCIN: case WM8988_LOUTM1: case WM8988_LOUTM2: case WM8988_ROUTM1: case WM8988_ROUTM2: case WM8988_LOUT2V: case WM8988_ROUT2V: case WM8988_LPPB: return true; default: return false; } } /* codec private data */ struct wm8988_priv { struct regmap *regmap; unsigned int sysclk; const struct snd_pcm_hw_constraint_list *sysclk_constraints; }; #define wm8988_reset(c) snd_soc_component_write(c, WM8988_RESET, 0) /* * WM8988 Controls */ static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"}; static SOC_ENUM_SINGLE_DECL(bass_boost, WM8988_BASS, 7, bass_boost_txt); static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static SOC_ENUM_SINGLE_DECL(bass_filter, WM8988_BASS, 6, bass_filter_txt); static const char *treble_txt[] = {"8kHz", "4kHz"}; static SOC_ENUM_SINGLE_DECL(treble, WM8988_TREBLE, 6, treble_txt); static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_lc, WM8988_3D, 5, stereo_3d_lc_txt); static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_uc, WM8988_3D, 6, stereo_3d_uc_txt); static const char *stereo_3d_func_txt[] = {"Capture", "Playback"}; static SOC_ENUM_SINGLE_DECL(stereo_3d_func, WM8988_3D, 7, stereo_3d_func_txt); static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"}; static SOC_ENUM_SINGLE_DECL(alc_func, WM8988_ALC1, 7, alc_func_txt); static const char *ng_type_txt[] = {"Constant PGA Gain", "Mute ADC Output"}; static SOC_ENUM_SINGLE_DECL(ng_type, WM8988_NGATE, 1, ng_type_txt); static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static SOC_ENUM_SINGLE_DECL(deemph, WM8988_ADCDAC, 1, deemph_txt); static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static SOC_ENUM_SINGLE_DECL(adcpol, WM8988_ADCDAC, 5, adcpol_txt); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); static const struct snd_kcontrol_new wm8988_snd_controls[] = { SOC_ENUM("Bass Boost", bass_boost), SOC_ENUM("Bass Filter", bass_filter), SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0), SOC_ENUM("Treble Cut-off", treble), SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", stereo_3d_lc), SOC_ENUM("3D Upper Cut-off", stereo_3d_uc), SOC_ENUM("3D Mode", stereo_3d_func), SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", alc_func), SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", ng_type), SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0), SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0), SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1), SOC_ENUM("Playback De-emphasis", deemph), SOC_ENUM("Capture Polarity", adcpol), SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0), SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv), SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1, bypass_tlv), SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V, WM8988_ROUT1V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V, WM8988_ROUT2V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V, 0, 127, 0, out_tlv), }; /* * DAPM Controls */ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); u16 adctl2 = snd_soc_component_read(component, WM8988_ADCTL2); /* Use the DAC to gate LRC if active, otherwise use ADC */ if (snd_soc_component_read(component, WM8988_PWR2) & 0x180) adctl2 &= ~0x4; else adctl2 |= 0x4; return snd_soc_component_write(component, WM8988_ADCTL2, adctl2); } static const char *wm8988_line_texts[] = { "Line 1", "Line 2", "PGA", "Differential"}; static const unsigned int wm8988_line_values[] = { 0, 1, 3, 4}; static const struct soc_enum wm8988_lline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_left_line_controls = SOC_DAPM_ENUM("Route", wm8988_lline_enum); static const struct soc_enum wm8988_rline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_right_line_controls = SOC_DAPM_ENUM("Route", wm8988_rline_enum); /* Left Mixer */ static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0), }; static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"}; static const unsigned int wm8988_pga_val[] = { 0, 1, 3 }; /* Left PGA Mux */ static const struct soc_enum wm8988_lpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_left_pga_controls = SOC_DAPM_ENUM("Route", wm8988_lpga_enum); /* Right PGA Mux */ static const struct soc_enum wm8988_rpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_right_pga_controls = SOC_DAPM_ENUM("Route", wm8988_rpga_enum); /* Differential Mux */ static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"}; static SOC_ENUM_SINGLE_DECL(diffmux, WM8988_ADCIN, 8, wm8988_diff_sel); static const struct snd_kcontrol_new wm8988_diffmux_controls = SOC_DAPM_ENUM("Route", diffmux); /* Mono ADC Mux */ static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static SOC_ENUM_SINGLE_DECL(monomux, WM8988_ADCIN, 6, wm8988_mono_mux); static const struct snd_kcontrol_new wm8988_monomux_controls = SOC_DAPM_ENUM("Route", monomux); static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("Mic Bias", WM8988_PWR1, 1, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8988_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0, &wm8988_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0, &wm8988_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_right_line_controls), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0), SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8988_left_mixer_controls[0], ARRAY_SIZE(wm8988_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8988_right_mixer_controls[0], ARRAY_SIZE(wm8988_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), }; static const struct snd_soc_dapm_route wm8988_dapm_routes[] = { { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left PGA Mux", "Line 1", "LINPUT1" }, { "Left PGA Mux", "Line 2", "LINPUT2" }, { "Left PGA Mux", "Differential", "Differential Mux" }, { "Right PGA Mux", "Line 1", "RINPUT1" }, { "Right PGA Mux", "Line 2", "RINPUT2" }, { "Right PGA Mux", "Differential", "Differential Mux" }, { "Differential Mux", "Line 1", "LINPUT1" }, { "Differential Mux", "Line 1", "RINPUT1" }, { "Differential Mux", "Line 2", "LINPUT2" }, { "Differential Mux", "Line 2", "RINPUT2" }, { "Left ADC Mux", "Stereo", "Left PGA Mux" }, { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, { "Right ADC Mux", "Stereo", "Right PGA Mux" }, { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, { "Left ADC", NULL, "Left ADC Mux" }, { "Right ADC", NULL, "Right ADC Mux" }, { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left Mixer", "Playback Switch", "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Right Mixer", "Playback Switch", "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Left Out 1", NULL, "Left Mixer" }, { "LOUT1", NULL, "Left Out 1" }, { "Right Out 1", NULL, "Right Mixer" }, { "ROUT1", NULL, "Right Out 1" }, { "Left Out 2", NULL, "Left Mixer" }, { "LOUT2", NULL, "Left Out 2" }, { "Right Out 2", NULL, "Right Mixer" }, { "ROUT2", NULL, "Right Out 2" }, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* The set of rates we can generate from the above for each SYSCLK */ static const unsigned int rates_12288[] = { 8000, 12000, 16000, 24000, 32000, 48000, 96000, }; static const struct snd_pcm_hw_constraint_list constraints_12288 = { .count = ARRAY_SIZE(rates_12288), .list = rates_12288, }; static const unsigned int rates_112896[] = { 8000, 11025, 22050, 44100, }; static const struct snd_pcm_hw_constraint_list constraints_112896 = { .count = ARRAY_SIZE(rates_112896), .list = rates_112896, }; static const unsigned int rates_12[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 41100, 48000, 48000, 88235, 96000, }; static const struct snd_pcm_hw_constraint_list constraints_12 = { .count = ARRAY_SIZE(rates_12), .list = rates_12, }; /* * Note that this should be called from init rather than from hw_params. */ static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_component *component = codec_dai->component; struct wm8988_priv *wm8988 = snd_soc_component_get_drvdata(component); switch (freq) { case 11289600: case 18432000: case 22579200: case 36864000: wm8988->sysclk_constraints = &constraints_112896; wm8988->sysclk = freq; return 0; case 12288000: case 16934400: case 24576000: case 33868800: wm8988->sysclk_constraints = &constraints_12288; wm8988->sysclk = freq; return 0; case 12000000: case 24000000: wm8988->sysclk_constraints = &constraints_12; wm8988->sysclk = freq; return 0; } return -EINVAL; } static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_component *component = codec_dai->component; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_component_write(component, WM8988_IFACE, iface); return 0; } static int wm8988_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct wm8988_priv *wm8988 = snd_soc_component_get_drvdata(component); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8988->sysclk) { dev_err(component->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, wm8988->sysclk_constraints); return 0; } static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct wm8988_priv *wm8988 = snd_soc_component_get_drvdata(component); u16 iface = snd_soc_component_read(component, WM8988_IFACE) & 0x1f3; u16 srate = snd_soc_component_read(component, WM8988_SRATE) & 0x180; int coeff; coeff = get_coeff(wm8988->sysclk, params_rate(params)); if (coeff < 0) { coeff = get_coeff(wm8988->sysclk / 2, params_rate(params)); srate |= 0x40; } if (coeff < 0) { dev_err(component->dev, "Unable to configure sample rate %dHz with %dHz MCLK\n", params_rate(params), wm8988->sysclk); return coeff; } /* bit size */ switch (params_width(params)) { case 16: break; case 20: iface |= 0x0004; break; case 24: iface |= 0x0008; break; case 32: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_component_write(component, WM8988_IFACE, iface); if (coeff >= 0) snd_soc_component_write(component, WM8988_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8988_mute(struct snd_soc_dai *dai, int mute, int direction) { struct snd_soc_component *component = dai->component; u16 mute_reg = snd_soc_component_read(component, WM8988_ADCDAC) & 0xfff7; if (mute) snd_soc_component_write(component, WM8988_ADCDAC, mute_reg | 0x8); else snd_soc_component_write(component, WM8988_ADCDAC, mute_reg); return 0; } static int wm8988_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct wm8988_priv *wm8988 = snd_soc_component_get_drvdata(component); u16 pwr_reg = snd_soc_component_read(component, WM8988_PWR1) & ~0x1c1; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VREF, VMID=2x50k, digital enabled */ snd_soc_component_write(component, WM8988_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_STANDBY: if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { regcache_sync(wm8988->regmap); /* VREF, VMID=2x5k */ snd_soc_component_write(component, WM8988_PWR1, pwr_reg | 0x1c1); /* Charge caps */ msleep(100); } /* VREF, VMID=2*500k, digital stopped */ snd_soc_component_write(component, WM8988_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_component_write(component, WM8988_PWR1, 0x0000); break; } return 0; } #define WM8988_RATES SNDRV_PCM_RATE_8000_96000 #define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8988_ops = { .startup = wm8988_pcm_startup, .hw_params = wm8988_pcm_hw_params, .set_fmt = wm8988_set_dai_fmt, .set_sysclk = wm8988_set_dai_sysclk, .mute_stream = wm8988_mute, .no_capture_mute = 1, }; static struct snd_soc_dai_driver wm8988_dai = { .name = "wm8988-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .ops = &wm8988_ops, .symmetric_rate = 1, }; static int wm8988_probe(struct snd_soc_component *component) { int ret = 0; ret = wm8988_reset(component); if (ret < 0) { dev_err(component->dev, "Failed to issue reset\n"); return ret; } /* set the update bits (we always update left then right) */ snd_soc_component_update_bits(component, WM8988_RADC, 0x0100, 0x0100); snd_soc_component_update_bits(component, WM8988_RDAC, 0x0100, 0x0100); snd_soc_component_update_bits(component, WM8988_ROUT1V, 0x0100, 0x0100); snd_soc_component_update_bits(component, WM8988_ROUT2V, 0x0100, 0x0100); snd_soc_component_update_bits(component, WM8988_RINVOL, 0x0100, 0x0100); return 0; } static const struct snd_soc_component_driver soc_component_dev_wm8988 = { .probe = wm8988_probe, .set_bias_level = wm8988_set_bias_level, .controls = wm8988_snd_controls, .num_controls = ARRAY_SIZE(wm8988_snd_controls), .dapm_widgets = wm8988_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets), .dapm_routes = wm8988_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes), .suspend_bias_off = 1, .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, }; static const struct regmap_config wm8988_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8988_LPPB, .writeable_reg = wm8988_writeable, .cache_type = REGCACHE_MAPLE, .reg_defaults = wm8988_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8988_reg_defaults), }; #if defined(CONFIG_SPI_MASTER) static int wm8988_spi_probe(struct spi_device *spi) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&spi->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; wm8988->regmap = devm_regmap_init_spi(spi, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&spi->dev, "Failed to init regmap: %d\n", ret); return ret; } spi_set_drvdata(spi, wm8988); ret = devm_snd_soc_register_component(&spi->dev, &soc_component_dev_wm8988, &wm8988_dai, 1); return ret; } static struct spi_driver wm8988_spi_driver = { .driver = { .name = "wm8988", }, .probe = wm8988_spi_probe, }; #endif /* CONFIG_SPI_MASTER */ #if IS_ENABLED(CONFIG_I2C) static int wm8988_i2c_probe(struct i2c_client *i2c) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&i2c->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8988); wm8988->regmap = devm_regmap_init_i2c(i2c, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); return ret; } ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_wm8988, &wm8988_dai, 1); return ret; } static const struct i2c_device_id wm8988_i2c_id[] = { { "wm8988" }, { } }; MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id); static struct i2c_driver wm8988_i2c_driver = { .driver = { .name = "wm8988", }, .probe = wm8988_i2c_probe, .id_table = wm8988_i2c_id, }; #endif static int __init wm8988_modinit(void) { int ret = 0; #if IS_ENABLED(CONFIG_I2C) ret = i2c_add_driver(&wm8988_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8988_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8988_modinit); static void __exit wm8988_exit(void) { #if IS_ENABLED(CONFIG_I2C) i2c_del_driver(&wm8988_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8988_spi_driver); #endif } module_exit(wm8988_exit); MODULE_DESCRIPTION("ASoC WM8988 driver"); MODULE_AUTHOR("Mark Brown <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CLOSURE_H #define _LINUX_CLOSURE_H #include <linux/llist.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/workqueue.h> /* * Closure is perhaps the most overused and abused term in computer science, but * since I've been unable to come up with anything better you're stuck with it * again. * * What are closures? * * They embed a refcount. The basic idea is they count "things that are in * progress" - in flight bios, some other thread that's doing something else - * anything you might want to wait on. * * The refcount may be manipulated with closure_get() and closure_put(). * closure_put() is where many of the interesting things happen, when it causes * the refcount to go to 0. * * Closures can be used to wait on things both synchronously and asynchronously, * and synchronous and asynchronous use can be mixed without restriction. To * wait synchronously, use closure_sync() - you will sleep until your closure's * refcount hits 1. * * To wait asynchronously, use * continue_at(cl, next_function, workqueue); * * passing it, as you might expect, the function to run when nothing is pending * and the workqueue to run that function out of. * * continue_at() also, critically, requires a 'return' immediately following the * location where this macro is referenced, to return to the calling function. * There's good reason for this. * * To use safely closures asynchronously, they must always have a refcount while * they are running owned by the thread that is running them. Otherwise, suppose * you submit some bios and wish to have a function run when they all complete: * * foo_endio(struct bio *bio) * { * closure_put(cl); * } * * closure_init(cl); * * do_stuff(); * closure_get(cl); * bio1->bi_endio = foo_endio; * bio_submit(bio1); * * do_more_stuff(); * closure_get(cl); * bio2->bi_endio = foo_endio; * bio_submit(bio2); * * continue_at(cl, complete_some_read, system_wq); * * If closure's refcount started at 0, complete_some_read() could run before the * second bio was submitted - which is almost always not what you want! More * importantly, it wouldn't be possible to say whether the original thread or * complete_some_read()'s thread owned the closure - and whatever state it was * associated with! * * So, closure_init() initializes a closure's refcount to 1 - and when a * closure_fn is run, the refcount will be reset to 1 first. * * Then, the rule is - if you got the refcount with closure_get(), release it * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount * on a closure because you called closure_init() or you were run out of a * closure - _always_ use continue_at(). Doing so consistently will help * eliminate an entire class of particularly pernicious races. * * Lastly, you might have a wait list dedicated to a specific event, and have no * need for specifying the condition - you just want to wait until someone runs * closure_wake_up() on the appropriate wait list. In that case, just use * closure_wait(). It will return either true or false, depending on whether the * closure was already on a wait list or not - a closure can only be on one wait * list at a time. * * Parents: * * closure_init() takes two arguments - it takes the closure to initialize, and * a (possibly null) parent. * * If parent is non null, the new closure will have a refcount for its lifetime; * a closure is considered to be "finished" when its refcount hits 0 and the * function to run is null. Hence * * continue_at(cl, NULL, NULL); * * returns up the (spaghetti) stack of closures, precisely like normal return * returns up the C stack. continue_at() with non null fn is better thought of * as doing a tail call. * * All this implies that a closure should typically be embedded in a particular * struct (which its refcount will normally control the lifetime of), and that * struct can very much be thought of as a stack frame. */ struct closure; struct closure_syncer; typedef void (closure_fn) (struct work_struct *); extern struct dentry *bcache_debug; struct closure_waitlist { struct llist_head list; }; enum closure_state { /* * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by * the thread that owns the closure, and cleared by the thread that's * waking up the closure. * * The rest are for debugging and don't affect behaviour: * * CLOSURE_RUNNING: Set when a closure is running (i.e. by * closure_init() and when closure_put() runs then next function), and * must be cleared before remaining hits 0. Primarily to help guard * against incorrect usage and accidentally transferring references. * continue_at() and closure_return() clear it for you, if you're doing * something unusual you can use closure_set_dead() which also helps * annotate where references are being transferred. */ CLOSURE_BITS_START = (1U << 26), CLOSURE_DESTRUCTOR = (1U << 26), CLOSURE_WAITING = (1U << 28), CLOSURE_RUNNING = (1U << 30), }; #define CLOSURE_GUARD_MASK \ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1) #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) struct closure { union { struct { struct workqueue_struct *wq; struct closure_syncer *s; struct llist_node list; closure_fn *fn; }; struct work_struct work; }; struct closure *parent; atomic_t remaining; bool closure_get_happened; #ifdef CONFIG_DEBUG_CLOSURES #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e #define CLOSURE_MAGIC_STACK 0xc05451cc unsigned int magic; struct list_head all; unsigned long ip; unsigned long waiting_on; #endif }; void closure_sub(struct closure *cl, int v); void closure_put(struct closure *cl); void __closure_wake_up(struct closure_waitlist *list); bool closure_wait(struct closure_waitlist *list, struct closure *cl); void __closure_sync(struct closure *cl); static inline unsigned closure_nr_remaining(struct closure *cl) { return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK; } /** * closure_sync - sleep until a closure a closure has nothing left to wait on * * Sleeps until the refcount hits 1 - the thread that's running the closure owns * the last refcount. */ static inline void closure_sync(struct closure *cl) { #ifdef CONFIG_DEBUG_CLOSURES BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened); #endif if (cl->closure_get_happened) __closure_sync(cl); } int __closure_sync_timeout(struct closure *cl, unsigned long timeout); static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout) { #ifdef CONFIG_DEBUG_CLOSURES BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened); #endif return cl->closure_get_happened ? __closure_sync_timeout(cl, timeout) : 0; } #ifdef CONFIG_DEBUG_CLOSURES void closure_debug_create(struct closure *cl); void closure_debug_destroy(struct closure *cl); #else static inline void closure_debug_create(struct closure *cl) {} static inline void closure_debug_destroy(struct closure *cl) {} #endif static inline void closure_set_ip(struct closure *cl) { #ifdef CONFIG_DEBUG_CLOSURES cl->ip = _THIS_IP_; #endif } static inline void closure_set_ret_ip(struct closure *cl) { #ifdef CONFIG_DEBUG_CLOSURES cl->ip = _RET_IP_; #endif } static inline void closure_set_waiting(struct closure *cl, unsigned long f) { #ifdef CONFIG_DEBUG_CLOSURES cl->waiting_on = f; #endif } static inline void closure_set_stopped(struct closure *cl) { atomic_sub(CLOSURE_RUNNING, &cl->remaining); } static inline void set_closure_fn(struct closure *cl, closure_fn *fn, struct workqueue_struct *wq) { closure_set_ip(cl); cl->fn = fn; cl->wq = wq; } static inline void closure_queue(struct closure *cl) { struct workqueue_struct *wq = cl->wq; /** * Changes made to closure, work_struct, or a couple of other structs * may cause work.func not pointing to the right location. */ BUILD_BUG_ON(offsetof(struct closure, fn) != offsetof(struct work_struct, func)); if (wq) { INIT_WORK(&cl->work, cl->work.func); BUG_ON(!queue_work(wq, &cl->work)); } else cl->fn(&cl->work); } /** * closure_get - increment a closure's refcount */ static inline void closure_get(struct closure *cl) { cl->closure_get_happened = true; #ifdef CONFIG_DEBUG_CLOSURES BUG_ON((atomic_inc_return(&cl->remaining) & CLOSURE_REMAINING_MASK) <= 1); #else atomic_inc(&cl->remaining); #endif } /** * closure_get_not_zero */ static inline bool closure_get_not_zero(struct closure *cl) { unsigned old = atomic_read(&cl->remaining); do { if (!(old & CLOSURE_REMAINING_MASK)) return false; } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1)); return true; } /** * closure_init - Initialize a closure, setting the refcount to 1 * @cl: closure to initialize * @parent: parent of the new closure. cl will take a refcount on it for its * lifetime; may be NULL. */ static inline void closure_init(struct closure *cl, struct closure *parent) { cl->fn = NULL; cl->parent = parent; if (parent) closure_get(parent); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); cl->closure_get_happened = false; closure_debug_create(cl); closure_set_ip(cl); } static inline void closure_init_stack(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); #ifdef CONFIG_DEBUG_CLOSURES cl->magic = CLOSURE_MAGIC_STACK; #endif } static inline void closure_init_stack_release(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); #ifdef CONFIG_DEBUG_CLOSURES cl->magic = CLOSURE_MAGIC_STACK; #endif } /** * closure_wake_up - wake up all closures on a wait list, * with memory barrier */ static inline void closure_wake_up(struct closure_waitlist *list) { /* Memory barrier for the wait list */ smp_mb(); __closure_wake_up(list); } #define CLOSURE_CALLBACK(name) void name(struct work_struct *ws) #define closure_type(name, type, member) \ struct closure *cl = container_of(ws, struct closure, work); \ type *name = container_of(cl, type, member) /** * continue_at - jump to another function with barrier * * After @cl is no longer waiting on anything (i.e. all outstanding refs have * been dropped with closure_put()), it will resume execution at @fn running out * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). * * This is because after calling continue_at() you no longer have a ref on @cl, * and whatever @cl owns may be freed out from under you - a running closure fn * has a ref on its own closure which continue_at() drops. * * Note you are expected to immediately return after using this macro. */ #define continue_at(_cl, _fn, _wq) \ do { \ set_closure_fn(_cl, _fn, _wq); \ closure_sub(_cl, CLOSURE_RUNNING + 1); \ } while (0) /** * closure_return - finish execution of a closure * * This is used to indicate that @cl is finished: when all outstanding refs on * @cl have been dropped @cl's ref on its parent closure (as passed to * closure_init()) will be dropped, if one was specified - thus this can be * thought of as returning to the parent closure. */ #define closure_return(_cl) continue_at((_cl), NULL, NULL) void closure_return_sync(struct closure *cl); /** * continue_at_nobarrier - jump to another function without barrier * * Causes @fn to be executed out of @cl, in @wq context (or called directly if * @wq is NULL). * * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, * thus it's not safe to touch anything protected by @cl after a * continue_at_nobarrier(). */ #define continue_at_nobarrier(_cl, _fn, _wq) \ do { \ set_closure_fn(_cl, _fn, _wq); \ closure_queue(_cl); \ } while (0) /** * closure_return_with_destructor - finish execution of a closure, * with destructor * * Works like closure_return(), except @destructor will be called when all * outstanding refs on @cl have been dropped; @destructor may be used to safely * free the memory occupied by @cl, and it is called with the ref on the parent * closure still held - so @destructor could safely return an item to a * freelist protected by @cl's parent. */ #define closure_return_with_destructor(_cl, _destructor) \ do { \ set_closure_fn(_cl, _destructor, NULL); \ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ } while (0) /** * closure_call - execute @fn out of a new, uninitialized closure * * Typically used when running out of one closure, and we want to run @fn * asynchronously out of a new closure - @parent will then wait for @cl to * finish. */ static inline void closure_call(struct closure *cl, closure_fn fn, struct workqueue_struct *wq, struct closure *parent) { closure_init(cl, parent); continue_at_nobarrier(cl, fn, wq); } #define __closure_wait_event(waitlist, _cond) \ do { \ struct closure cl; \ \ closure_init_stack(&cl); \ \ while (1) { \ closure_wait(waitlist, &cl); \ if (_cond) \ break; \ closure_sync(&cl); \ } \ closure_wake_up(waitlist); \ closure_sync(&cl); \ } while (0) #define closure_wait_event(waitlist, _cond) \ do { \ if (!(_cond)) \ __closure_wait_event(waitlist, _cond); \ } while (0) #define __closure_wait_event_timeout(waitlist, _cond, _until) \ ({ \ struct closure cl; \ long _t; \ \ closure_init_stack(&cl); \ \ while (1) { \ closure_wait(waitlist, &cl); \ if (_cond) { \ _t = max_t(long, 1L, _until - jiffies); \ break; \ } \ _t = max_t(long, 0L, _until - jiffies); \ if (!_t) \ break; \ closure_sync_timeout(&cl, _t); \ } \ closure_wake_up(waitlist); \ closure_sync(&cl); \ _t; \ }) /* * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if * condition became true */ #define closure_wait_event_timeout(waitlist, _cond, _timeout) \ ({ \ unsigned long _until = jiffies + _timeout; \ (_cond) \ ? max_t(long, 1L, _until - jiffies) \ : __closure_wait_event_timeout(waitlist, _cond, _until);\ }) #endif /* _LINUX_CLOSURE_H */
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/types.h> #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_83xx_hw.h" #define QLC_BC_COMMAND 0 #define QLC_BC_RESPONSE 1 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ) #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) #define QLC_BC_MSG 0 #define QLC_BC_CFREE 1 #define QLC_BC_FLR 2 #define QLC_BC_HDR_SZ 16 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8); static void qlcnic_sriov_process_bc_cmd(struct work_struct *); static int qlcnic_sriov_vf_shutdown(struct pci_dev *); static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .read_crb = qlcnic_83xx_read_crb, .write_crb = qlcnic_83xx_write_crb, .read_reg = qlcnic_83xx_rd_reg_indirect, .write_reg = qlcnic_83xx_wrt_reg_indirect, .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, .mbx_cmd = qlcnic_sriov_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_83xx_create_rx_ctx, .create_tx_ctx = qlcnic_83xx_create_tx_ctx, .del_rx_ctx = qlcnic_83xx_del_rx_ctx, .del_tx_ctx = qlcnic_83xx_del_tx_ctx, .setup_link_event = qlcnic_83xx_setup_link_event, .get_nic_info = qlcnic_83xx_get_nic_info, .get_pci_info = qlcnic_83xx_get_pci_info, .set_nic_info = qlcnic_83xx_set_nic_info, .change_macvlan = qlcnic_83xx_sre_macaddr_change, .napi_enable = qlcnic_83xx_napi_enable, .napi_disable = qlcnic_83xx_napi_disable, .config_intr_coal = qlcnic_83xx_config_intr_coal, .config_rss = qlcnic_83xx_config_rss, .config_hw_lro = qlcnic_83xx_config_hw_lro, .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, .free_mac_list = qlcnic_sriov_vf_free_mac_list, .enable_sds_intr = qlcnic_83xx_enable_sds_intr, .disable_sds_intr = qlcnic_83xx_disable_sds_intr, .encap_rx_offload = qlcnic_83xx_encap_rx_offload, .encap_tx_offload = qlcnic_83xx_encap_tx_offload, }; static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_config_led, .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, .shutdown = qlcnic_sriov_vf_shutdown, .resume = qlcnic_sriov_vf_resume, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, }; static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, {QLCNIC_BC_CMD_GET_ACL, 3, 14}, {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, }; static inline bool qlcnic_sriov_bc_msg_check(u32 val) { return (val & (1 << QLC_BC_MSG)) ? true : false; } static inline bool qlcnic_sriov_channel_free_check(u32 val) { return (val & (1 << QLC_BC_CFREE)) ? true : false; } static inline bool qlcnic_sriov_flr_check(u32 val) { return (val & (1 << QLC_BC_FLR)) ? true : false; } static inline u8 qlcnic_sriov_target_func_id(u32 val) { return (val >> 4) & 0xff; } static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) { struct pci_dev *dev = adapter->pdev; int pos; u16 stride, offset; if (qlcnic_sriov_vf_check(adapter)) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); if (!pos) return 0; pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); return (dev->devfn + offset + stride * vf_id) & 0xff; } int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) { struct qlcnic_sriov *sriov; struct qlcnic_back_channel *bc; struct workqueue_struct *wq; struct qlcnic_vport *vp; struct qlcnic_vf_info *vf; int err, i; if (!qlcnic_sriov_enable_check(adapter)) return -EIO; sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); if (!sriov) return -ENOMEM; adapter->ahw->sriov = sriov; sriov->num_vfs = num_vfs; bc = &sriov->bc; sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info), GFP_KERNEL); if (!sriov->vf_info) { err = -ENOMEM; goto qlcnic_free_sriov; } wq = create_singlethread_workqueue("bc-trans"); if (wq == NULL) { err = -ENOMEM; dev_err(&adapter->pdev->dev, "Cannot create bc-trans workqueue\n"); goto qlcnic_free_vf_info; } bc->bc_trans_wq = wq; wq = create_singlethread_workqueue("async"); if (wq == NULL) { err = -ENOMEM; dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); goto qlcnic_destroy_trans_wq; } bc->bc_async_wq = wq; INIT_LIST_HEAD(&bc->async_cmd_list); INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); spin_lock_init(&bc->queue_lock); bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; vf->adapter = adapter; vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); mutex_init(&vf->send_cmd_lock); spin_lock_init(&vf->vlan_list_lock); INIT_LIST_HEAD(&vf->rcv_act.wait_list); INIT_LIST_HEAD(&vf->rcv_pend.wait_list); spin_lock_init(&vf->rcv_act.lock); spin_lock_init(&vf->rcv_pend.lock); init_completion(&vf->ch_free_cmpl); INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); if (qlcnic_sriov_pf_check(adapter)) { vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); if (!vp) { err = -ENOMEM; goto qlcnic_destroy_async_wq; } sriov->vf_info[i].vp = vp; vp->vlan_mode = QLC_GUEST_VLAN_MODE; vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); } } return 0; qlcnic_destroy_async_wq: while (i--) kfree(sriov->vf_info[i].vp); destroy_workqueue(bc->bc_async_wq); qlcnic_destroy_trans_wq: destroy_workqueue(bc->bc_trans_wq); qlcnic_free_vf_info: kfree(sriov->vf_info); qlcnic_free_sriov: kfree(adapter->ahw->sriov); return err; } void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) { struct qlcnic_bc_trans *trans; struct qlcnic_cmd_args cmd; unsigned long flags; spin_lock_irqsave(&t_list->lock, flags); while (!list_empty(&t_list->wait_list)) { trans = list_first_entry(&t_list->wait_list, struct qlcnic_bc_trans, list); list_del(&trans->list); t_list->count--; cmd.req.arg = (u32 *)trans->req_pay; cmd.rsp.arg = (u32 *)trans->rsp_pay; qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); } spin_unlock_irqrestore(&t_list->lock, flags); } void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_back_channel *bc = &sriov->bc; struct qlcnic_vf_info *vf; int i; if (!qlcnic_sriov_enable_check(adapter)) return; qlcnic_sriov_cleanup_async_list(bc); destroy_workqueue(bc->bc_async_wq); for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; qlcnic_sriov_cleanup_list(&vf->rcv_pend); cancel_work_sync(&vf->trans_work); qlcnic_sriov_cleanup_list(&vf->rcv_act); } destroy_workqueue(bc->bc_trans_wq); for (i = 0; i < sriov->num_vfs; i++) kfree(sriov->vf_info[i].vp); kfree(sriov->vf_info); kfree(adapter->ahw->sriov); } static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) { qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); qlcnic_sriov_cfg_bc_intr(adapter, 0); __qlcnic_sriov_cleanup(adapter); } void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) return; qlcnic_sriov_free_vlans(adapter); if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_cleanup(adapter); if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_vf_cleanup(adapter); } static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct qlcnic_cmd_args cmd; unsigned long timeout; int err; memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd.hdr = hdr; cmd.pay = pay; cmd.pay_size = size; cmd.func_num = pci_func; cmd.op_type = QLC_83XX_MBX_POST_BC_OP; cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); if (err) { dev_err(&adapter->pdev->dev, "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, cmd.cmd_op, cmd.type, ahw->pci_func, ahw->op_mode); return err; } if (!wait_for_completion_timeout(&cmd.completion, timeout)) { dev_err(&adapter->pdev->dev, "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, cmd.cmd_op, cmd.type, ahw->pci_func, ahw->op_mode); flush_workqueue(mbx->work_q); } return cmd.rsp_opcode; } static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) { adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u16 vport_id) { struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; int err; u32 status; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); if (err) return err; cmd.req.arg[1] = vport_id << 16 | 0x1; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to get vport info, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } status = cmd.rsp.arg[2] & 0xffff; if (status & BIT_0) npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); if (status & BIT_1) npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); if (status & BIT_2) npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); if (status & BIT_3) npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); if (status & BIT_4) npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); if (status & BIT_5) npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); if (status & BIT_6) npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); if (status & BIT_7) npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); if (status & BIT_8) npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); if (status & BIT_9) npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", npar_info->min_tx_bw, npar_info->max_tx_bw, npar_info->max_tx_ques, npar_info->max_tx_mac_filters, npar_info->max_rx_mcast_mac_filters, npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, npar_info->max_rx_buf_rings, npar_info->max_rx_ques, npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, npar_info->max_remote_ipv6_addrs); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; adapter->flags &= ~QLCNIC_TAGGING_ENABLED; return 0; } static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; int i, num_vlans, ret; u16 *vlans; if (sriov->allowed_vlans) return 0; sriov->any_vlan = cmd->rsp.arg[2] & 0xf; sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", sriov->num_allowed_vlans); ret = qlcnic_sriov_alloc_vlans(adapter); if (ret) return ret; if (!sriov->any_vlan) return 0; num_vlans = sriov->num_allowed_vlans; sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL); if (!sriov->allowed_vlans) return -ENOMEM; vlans = (u16 *)&cmd->rsp.arg[3]; for (i = 0; i < num_vlans; i++) sriov->allowed_vlans[i] = vlans[i]; return 0; } static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_cmd_args cmd; int ret = 0; memset(&cmd, 0, sizeof(cmd)); ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); if (ret) return ret; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", ret); } else { sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; switch (sriov->vlan_mode) { case QLC_GUEST_VLAN_MODE: ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); break; case QLC_PVID_MODE: ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); break; } } qlcnic_free_mbx_args(&cmd); return ret; } static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_info nic_info; int err; err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); if (err) return err; ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters; err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); if (err) return -EIO; if (qlcnic_83xx_get_port_info(adapter)) return -EIO; qlcnic_sriov_vf_cfg_buff_desc(adapter); adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d\n", adapter->ahw->fw_hal_version); ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_mtu = nic_info.max_mtu; ahw->op_mode = nic_info.op_mode; ahw->capabilities = nic_info.capabilities; return 0; } static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter) { int err; adapter->flags |= QLCNIC_VLAN_FILTERING; adapter->ahw->total_nic_func = 1; INIT_LIST_HEAD(&adapter->vf_mc_list); if (!qlcnic_use_msi_x && !!qlcnic_use_msi) dev_warn(&adapter->pdev->dev, "Device does not support MSI interrupts\n"); /* compute and set default and max tx/sds rings */ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; } err = qlcnic_83xx_setup_mbx_intr(adapter); if (err) goto err_out_disable_msi; err = qlcnic_sriov_init(adapter, 1); if (err) goto err_out_disable_mbx_intr; err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) goto err_out_cleanup_sriov; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (err) goto err_out_disable_bc_intr; err = qlcnic_sriov_vf_init_driver(adapter); if (err) goto err_out_send_channel_term; err = qlcnic_sriov_get_vf_acl(adapter); if (err) goto err_out_send_channel_term; err = qlcnic_setup_netdev(adapter, adapter->netdev); if (err) goto err_out_send_channel_term; pci_set_drvdata(adapter->pdev, adapter); dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, adapter->ahw->idc.delay); return 0; err_out_send_channel_term: qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); err_out_disable_bc_intr: qlcnic_sriov_cfg_bc_intr(adapter, 0); err_out_cleanup_sriov: __qlcnic_sriov_cleanup(adapter); err_out_disable_mbx_intr: qlcnic_83xx_free_mbx_intr(adapter); err_out_disable_msi: qlcnic_teardown_intr(adapter); return err; } static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) { u32 state; do { msleep(20); if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) return -EIO; state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); } while (state != QLC_83XX_IDC_DEV_READY); return 0; } int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->reset_context = 0; adapter->fw_fail_cnt = 0; ahw->msix_supported = 1; adapter->need_fw_reset = 0; adapter->flags |= QLCNIC_TX_INTR_SHARED; err = qlcnic_sriov_check_dev_ready(adapter); if (err) return err; err = qlcnic_sriov_setup_vf(adapter); if (err) return err; if (qlcnic_read_mac_addr(adapter)) dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; dev_info(&adapter->pdev->dev, "HAL Version: %d Non Privileged SRIOV function\n", ahw->fw_hal_version); adapter->nic_ops = &qlcnic_sriov_vf_ops; set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); return; } void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) { ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; } static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) { u32 pay_size; pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); if (pay_size) pay_size = QLC_BC_PAYLOAD_SZ; else pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; return pay_size; } int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) { struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; u8 i; if (qlcnic_sriov_vf_check(adapter)) return 0; for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { if (vf_info[i].pci_func == pci_func) return i; } return -EINVAL; } static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) { *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); if (!*trans) return -ENOMEM; init_completion(&(*trans)->resp_cmpl); return 0; } static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, u32 size) { *hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC); if (!*hdr) return -ENOMEM; return 0; } static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) { const struct qlcnic_mailbox_metadata *mbx_tbl; int i, size; mbx_tbl = qlcnic_sriov_bc_mbx_tbl; size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { mbx->op_type = QLC_BC_CMD; mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), GFP_ATOMIC); if (!mbx->req.arg) return -ENOMEM; mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), GFP_ATOMIC); if (!mbx->rsp.arg) { kfree(mbx->req.arg); mbx->req.arg = NULL; return -ENOMEM; } mbx->req.arg[0] = (type | (mbx->req.num << 16) | (3 << 29)); mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; return 0; } } return -EINVAL; } static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd, u16 seq, u8 msg_type) { struct qlcnic_bc_hdr *hdr; int i; u32 num_regs, bc_pay_sz; u16 remainder; u8 cmd_op, num_frags, t_num_frags; bc_pay_sz = QLC_BC_PAYLOAD_SZ; if (msg_type == QLC_BC_COMMAND) { trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; num_regs = cmd->req.num; trans->req_pay_size = (num_regs * 4); num_regs = cmd->rsp.num; trans->rsp_pay_size = (num_regs * 4); cmd_op = cmd->req.arg[0] & 0xff; remainder = (trans->req_pay_size) % (bc_pay_sz); num_frags = (trans->req_pay_size) / (bc_pay_sz); if (remainder) num_frags++; t_num_frags = num_frags; if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) return -ENOMEM; remainder = (trans->rsp_pay_size) % (bc_pay_sz); num_frags = (trans->rsp_pay_size) / (bc_pay_sz); if (remainder) num_frags++; if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) return -ENOMEM; num_frags = t_num_frags; hdr = trans->req_hdr; } else { cmd->req.arg = (u32 *)trans->req_pay; cmd->rsp.arg = (u32 *)trans->rsp_pay; cmd_op = cmd->req.arg[0] & 0xff; cmd->cmd_op = cmd_op; remainder = (trans->rsp_pay_size) % (bc_pay_sz); num_frags = (trans->rsp_pay_size) / (bc_pay_sz); if (remainder) num_frags++; cmd->req.num = trans->req_pay_size / 4; cmd->rsp.num = trans->rsp_pay_size / 4; hdr = trans->rsp_hdr; cmd->op_type = trans->req_hdr->op_type; } trans->trans_id = seq; trans->cmd_id = cmd_op; for (i = 0; i < num_frags; i++) { hdr[i].version = 2; hdr[i].msg_type = msg_type; hdr[i].op_type = cmd->op_type; hdr[i].num_cmds = 1; hdr[i].num_frags = num_frags; hdr[i].frag_num = i + 1; hdr[i].cmd_op = cmd_op; hdr[i].seq_id = seq; } return 0; } static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) { if (!trans) return; kfree(trans->req_hdr); kfree(trans->rsp_hdr); kfree(trans); } static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_trans_list *t_list; unsigned long flags; int ret = 0; if (type == QLC_BC_RESPONSE) { t_list = &vf->rcv_act; spin_lock_irqsave(&t_list->lock, flags); t_list->count--; list_del(&trans->list); if (t_list->count > 0) ret = 1; spin_unlock_irqrestore(&t_list->lock, flags); } if (type == QLC_BC_COMMAND) { while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) msleep(100); vf->send_cmd = NULL; clear_bit(QLC_BC_VF_SEND, &vf->state); } return ret; } static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, work_func_t func) { if (test_bit(QLC_BC_VF_FLR, &vf->state) || vf->adapter->need_fw_reset) return; queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); } static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) { struct completion *cmpl = &trans->resp_cmpl; if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) trans->trans_state = QLC_END; else trans->trans_state = QLC_ABORT; return; } static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, u8 type) { if (type == QLC_BC_RESPONSE) { trans->curr_rsp_frag++; if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) trans->trans_state = QLC_INIT; else trans->trans_state = QLC_END; } else { trans->curr_req_frag++; if (trans->curr_req_frag < trans->req_hdr->num_frags) trans->trans_state = QLC_INIT; else trans->trans_state = QLC_WAIT_FOR_RESP; } } static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; struct completion *cmpl = &vf->ch_free_cmpl; if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { trans->trans_state = QLC_ABORT; return; } clear_bit(QLC_BC_VF_CHANNEL, &vf->state); qlcnic_sriov_handle_multi_frags(trans, type); } static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u32 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; u8 i, max = 2, hdr_size, j; hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); max = (size / sizeof(u32)) + hdr_size; for (i = 2, j = 0; j < hdr_size; i++, j++) *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); for (; j < max; i++, j++) *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); } static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) { int ret = -EBUSY; u32 timeout = 10000; do { if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { ret = 0; break; } mdelay(1); } while (--timeout); return ret; } static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; u32 pay_size; u32 *hdr, *pay; int ret; u8 pci_func = trans->func_id; if (__qlcnic_sriov_issue_bc_post(vf)) return -EBUSY; if (type == QLC_BC_COMMAND) { hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); pay = (u32 *)(trans->req_pay + trans->curr_req_frag); pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); pay_size = (pay_size / sizeof(u32)); } else { hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); pay_size = (pay_size / sizeof(u32)); } ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, pci_func, pay_size); return ret; } static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf, u8 type) { bool flag = true; int err = -EIO; while (flag) { if (test_bit(QLC_BC_VF_FLR, &vf->state) || vf->adapter->need_fw_reset) trans->trans_state = QLC_ABORT; switch (trans->trans_state) { case QLC_INIT: trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; if (qlcnic_sriov_issue_bc_post(trans, type)) trans->trans_state = QLC_ABORT; break; case QLC_WAIT_FOR_CHANNEL_FREE: qlcnic_sriov_wait_for_channel_free(trans, type); break; case QLC_WAIT_FOR_RESP: qlcnic_sriov_wait_for_resp(trans); break; case QLC_END: err = 0; flag = false; break; case QLC_ABORT: err = -EIO; flag = false; clear_bit(QLC_BC_VF_CHANNEL, &vf->state); break; default: err = -EIO; flag = false; } } return err; } static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, int pci_func) { struct qlcnic_vf_info *vf; int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); if (index < 0) return -EIO; vf = &adapter->ahw->sriov->vf_info[index]; trans->vf = vf; trans->func_id = pci_func; if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { if (qlcnic_sriov_pf_check(adapter)) return -EIO; if (qlcnic_sriov_vf_check(adapter) && trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) return -EIO; } mutex_lock(&vf->send_cmd_lock); vf->send_cmd = trans; err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); mutex_unlock(&vf->send_cmd_lock); return err; } static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { #ifdef CONFIG_QLCNIC_SRIOV if (qlcnic_sriov_pf_check(adapter)) { qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); return; } #endif cmd->rsp.arg[0] |= (0x9 << 25); return; } static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) { struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, trans_work); struct qlcnic_bc_trans *trans = NULL; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_cmd_args cmd; u8 req; if (adapter->need_fw_reset) return; if (test_bit(QLC_BC_VF_FLR, &vf->state)) return; memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); trans = list_first_entry(&vf->rcv_act.wait_list, struct qlcnic_bc_trans, list); adapter = vf->adapter; if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, QLC_BC_RESPONSE)) goto cleanup_trans; __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); trans->trans_state = QLC_INIT; __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); cleanup_trans: qlcnic_free_mbx_args(&cmd); req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); qlcnic_sriov_cleanup_transaction(trans); if (req) qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, qlcnic_sriov_process_bc_cmd); } static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, struct qlcnic_vf_info *vf) { struct qlcnic_bc_trans *trans; u32 pay_size; if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) return; trans = vf->send_cmd; if (trans == NULL) goto clear_send; if (trans->trans_id != hdr->seq_id) goto clear_send; pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), pay_size); if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) goto clear_send; complete(&trans->resp_cmpl); clear_send: clear_bit(QLC_BC_VF_SEND, &vf->state); } int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; t_list->count++; list_add_tail(&trans->list, &t_list->wait_list); if (t_list->count == 1) qlcnic_sriov_schedule_bc_cmd(sriov, vf, qlcnic_sriov_process_bc_cmd); return 0; } static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; spin_lock(&t_list->lock); __qlcnic_sriov_add_act_list(sriov, vf, trans); spin_unlock(&t_list->lock); return 0; } static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_hdr *hdr) { struct qlcnic_bc_trans *trans = NULL; struct list_head *node; u32 pay_size, curr_frag; u8 found = 0, active = 0; spin_lock(&vf->rcv_pend.lock); if (vf->rcv_pend.count > 0) { list_for_each(node, &vf->rcv_pend.wait_list) { trans = list_entry(node, struct qlcnic_bc_trans, list); if (trans->trans_id == hdr->seq_id) { found = 1; break; } } } if (found) { curr_frag = trans->curr_req_frag; pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, curr_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->req_hdr + curr_frag), (u32 *)(trans->req_pay + curr_frag), pay_size); trans->curr_req_frag++; if (trans->curr_req_frag >= hdr->num_frags) { vf->rcv_pend.count--; list_del(&trans->list); active = 1; } } spin_unlock(&vf->rcv_pend.lock); if (active) if (qlcnic_sriov_add_act_list(sriov, vf, trans)) qlcnic_sriov_cleanup_transaction(trans); return; } static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, struct qlcnic_bc_hdr *hdr, struct qlcnic_vf_info *vf) { struct qlcnic_bc_trans *trans; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_cmd_args cmd; u32 pay_size; int err; u8 cmd_op; if (adapter->need_fw_reset) return; if (!test_bit(QLC_BC_VF_STATE, &vf->state) && hdr->op_type != QLC_BC_CMD && hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) return; if (hdr->frag_num > 1) { qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); return; } memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd_op = hdr->cmd_op; if (qlcnic_sriov_alloc_bc_trans(&trans)) return; if (hdr->op_type == QLC_BC_CMD) err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); else err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); if (err) { qlcnic_sriov_cleanup_transaction(trans); return; } cmd.op_type = hdr->op_type; if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, QLC_BC_COMMAND)) { qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); return; } pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->req_hdr + trans->curr_req_frag), (u32 *)(trans->req_pay + trans->curr_req_frag), pay_size); trans->func_id = vf->pci_func; trans->vf = vf; trans->trans_id = hdr->seq_id; trans->curr_req_frag++; if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) return; if (trans->curr_req_frag == trans->req_hdr->num_frags) { if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); } } else { spin_lock(&vf->rcv_pend.lock); list_add_tail(&trans->list, &vf->rcv_pend.wait_list); vf->rcv_pend.count++; spin_unlock(&vf->rcv_pend.lock); } } static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct qlcnic_bc_hdr hdr; u32 *ptr = (u32 *)&hdr; u8 msg_type, i; for (i = 2; i < 6; i++) ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); msg_type = hdr.msg_type; switch (msg_type) { case QLC_BC_COMMAND: qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); break; case QLC_BC_RESPONSE: qlcnic_sriov_handle_bc_resp(&hdr, vf); break; } } static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct qlcnic_adapter *adapter = vf->adapter; if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_handle_flr(sriov, vf); else dev_err(&adapter->pdev->dev, "Invalid event to VF. VF should not get FLR event\n"); } void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) { struct qlcnic_vf_info *vf; struct qlcnic_sriov *sriov; int index; u8 pci_func; sriov = adapter->ahw->sriov; pci_func = qlcnic_sriov_target_func_id(event); index = qlcnic_sriov_func_to_index(adapter, pci_func); if (index < 0) return; vf = &sriov->vf_info[index]; vf->pci_func = pci_func; if (qlcnic_sriov_channel_free_check(event)) complete(&vf->ch_free_cmpl); if (qlcnic_sriov_flr_check(event)) { qlcnic_sriov_handle_flr_event(sriov, vf); return; } if (qlcnic_sriov_bc_msg_check(event)) qlcnic_sriov_handle_msg_event(sriov, vf); } int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) { struct qlcnic_cmd_args cmd; int err; if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) return 0; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) return -ENOMEM; if (enable) cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); err = qlcnic_83xx_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to %s bc events, err=%d\n", (enable ? "enable" : "disable"), err); } qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans) { u8 max = QLC_BC_CMD_MAX_RETRY_CNT; u32 state; state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (state == QLC_83XX_IDC_DEV_READY) { msleep(20); clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); trans->trans_state = QLC_INIT; if (++adapter->fw_fail_cnt > max) return -EIO; else return 0; } return -EIO; } static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlcnic_bc_trans *trans; int err; u32 rsp_data, opcode, mbx_err_code, rsp; u16 seq = ++adapter->ahw->sriov->bc.trans_counter; u8 func = ahw->pci_func; rsp = qlcnic_sriov_alloc_bc_trans(&trans); if (rsp) goto free_cmd; rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); if (rsp) goto cleanup_transaction; retry: if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", QLCNIC_MBX_RSP(cmd->req.arg[0]), func); goto err_out; } err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); if (err) { dev_err(dev, "MBX command 0x%x timed out for VF %d\n", (cmd->req.arg[0] & 0xffff), func); rsp = QLCNIC_RCODE_TIMEOUT; /* After adapter reset PF driver may take some time to * respond to VF's request. Retry request till maximum retries. */ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && !qlcnic_sriov_retry_bc_cmd(adapter, trans)) goto retry; goto err_out; } rsp_data = cmd->rsp.arg[0]; mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { rsp = QLCNIC_RCODE_SUCCESS; } else { if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { rsp = QLCNIC_RCODE_SUCCESS; } else { rsp = mbx_err_code; if (!rsp) rsp = 1; dev_err(dev, "MBX command 0x%x failed with err:0x%x for VF %d\n", opcode, mbx_err_code, func); } } err_out: if (rsp == QLCNIC_RCODE_TIMEOUT) { ahw->reset_context = 1; adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); } cleanup_transaction: qlcnic_sriov_cleanup_transaction(trans); free_cmd: if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { qlcnic_free_mbx_args(cmd); kfree(cmd); } return rsp; } static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) return qlcnic_sriov_async_issue_cmd(adapter, cmd); else return __qlcnic_sriov_issue_cmd(adapter, cmd); } static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) { struct qlcnic_cmd_args cmd; struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; int ret; memset(&cmd, 0, sizeof(cmd)); if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) return -ENOMEM; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed bc channel %s %d\n", cmd_op ? "term" : "init", ret); goto out; } cmd_op = (cmd.rsp.arg[0] & 0xff); if (cmd.rsp.arg[0] >> 25 == 2) return 2; if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else clear_bit(QLC_BC_VF_STATE, &vf->state); out: qlcnic_free_mbx_args(&cmd); return ret; } static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, enum qlcnic_mac_type mac_type) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; u16 vlan_id; int i; vf = &adapter->ahw->sriov->vf_info[0]; if (!qlcnic_sriov_check_any_vlan(vf)) { qlcnic_nic_add_mac(adapter, mac, 0, mac_type); } else { spin_lock(&vf->vlan_list_lock); for (i = 0; i < sriov->num_allowed_vlans; i++) { vlan_id = vf->sriov_vlans[i]; if (vlan_id) qlcnic_nic_add_mac(adapter, mac, vlan_id, mac_type); } spin_unlock(&vf->vlan_list_lock); if (qlcnic_84xx_check(adapter)) qlcnic_nic_add_mac(adapter, mac, 0, mac_type); } } void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { struct list_head *head = &bc->async_cmd_list; struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); cancel_work_sync(&bc->vf_async_work); spin_lock(&bc->queue_lock); while (!list_empty(head)) { entry = list_entry(head->next, struct qlcnic_async_cmd, list); list_del(&entry->list); kfree(entry->cmd); kfree(entry); } spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct netdev_hw_addr *ha; u32 mode = VPORT_MISS_MODE_DROP; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > ahw->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; } else { qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC); if (!netdev_mc_empty(netdev)) { qlcnic_flush_mcast_mac(adapter); netdev_for_each_mc_addr(ha, netdev) qlcnic_vf_add_mc_list(netdev, ha->addr, QLCNIC_MULTICAST_MAC); } } /* configure unicast MAC address, if there is not sufficient space * to store all the unicast addresses then enable promiscuous mode */ if (netdev_uc_count(netdev) > ahw->max_uc_count) { mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if (!netdev_uc_empty(netdev)) { netdev_for_each_uc_addr(ha, netdev) qlcnic_vf_add_mc_list(netdev, ha->addr, QLCNIC_UNICAST_MAC); } if (adapter->pdev->is_virtfn) { if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { qlcnic_alloc_lb_filters_mem(adapter); adapter->drv_mac_learn = true; adapter->rx_mac_learn = true; } else { adapter->drv_mac_learn = false; adapter->rx_mac_learn = false; } } qlcnic_nic_set_promisc(adapter, mode); } static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { struct qlcnic_async_cmd *entry, *tmp; struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; struct list_head *head; LIST_HEAD(del_list); bc = container_of(work, struct qlcnic_back_channel, vf_async_work); head = &bc->async_cmd_list; spin_lock(&bc->queue_lock); list_splice_init(head, &del_list); spin_unlock(&bc->queue_lock); list_for_each_entry_safe(entry, tmp, &del_list, list) { list_del(&entry->list); cmd = entry->cmd; __qlcnic_sriov_issue_cmd(bc->adapter, cmd); kfree(entry); } if (!list_empty(head)) queue_work(bc->bc_async_wq, &bc->vf_async_work); return; } static struct qlcnic_async_cmd * qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, struct qlcnic_cmd_args *cmd) { struct qlcnic_async_cmd *entry = NULL; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return NULL; entry->cmd = cmd; spin_lock(&bc->queue_lock); list_add_tail(&entry->list, &bc->async_cmd_list); spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, struct qlcnic_cmd_args *cmd) { struct qlcnic_async_cmd *entry = NULL; entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); if (!entry) { qlcnic_free_mbx_args(cmd); kfree(cmd); return; } queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; if (adapter->need_fw_reset) return -EIO; qlcnic_sriov_schedule_async_cmd(bc, cmd); return 0; } static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) { int err; adapter->need_fw_reset = 0; qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (err) goto err_out_cleanup_bc_intr; err = qlcnic_sriov_vf_init_driver(adapter); if (err) goto err_out_term_channel; return 0; err_out_term_channel: qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); err_out_cleanup_bc_intr: qlcnic_sriov_cfg_bc_intr(adapter, 0); return err; } static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) { if (!qlcnic_up(adapter, netdev)) qlcnic_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); } static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; struct net_device *netdev = adapter->netdev; u8 i, max_ints = ahw->num_msix - 1; netif_device_detach(netdev); qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_disable_mbx_intr(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); for (i = 0; i < max_ints; i++) { intr_tbl[i].id = i; intr_tbl[i].enabled = 0; intr_tbl[i].src = 0; } ahw->reset_context = 0; } static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; u32 state; if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); adapter->fw_fail_cnt = 0; dev_info(dev, "%s: Reinitialization of VF 0x%x done after FW reset\n", __func__, func); } else { dev_err(dev, "%s: Reinitialization of VF 0x%x failed after FW reset\n", __func__, func); state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); dev_info(dev, "Current state 0x%x after FW reset\n", state); } } return 0; } static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; u32 state; adapter->reset_ctx_cnt++; /* Skip the context reset and check if FW is hung */ if (adapter->reset_ctx_cnt < 3) { adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); dev_info(dev, "Resetting context, wait here to check if FW is in failed state\n"); return 0; } /* Check if number of resets exceed the threshold. * If it exceeds the threshold just fail the VF. */ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); adapter->tx_timeo_cnt = 0; adapter->fw_fail_cnt = 0; adapter->reset_ctx_cnt = 0; qlcnic_sriov_vf_detach(adapter); dev_err(dev, "Device context resets have exceeded the threshold, device interface will be shutdown\n"); return -EIO; } dev_info(dev, "Resetting context of VF 0x%x\n", func); dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", __func__, adapter->reset_ctx_cnt, func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); adapter->need_fw_reset = 0; if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; adapter->fw_fail_cnt = 0; dev_info(dev, "Done resetting context for VF 0x%x\n", func); } else { dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", __func__, func); state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); dev_info(dev, "%s: Current state 0x%x\n", __func__, state); } return 0; } static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int ret = 0; if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) ret = qlcnic_sriov_vf_handle_dev_ready(adapter); else if (ahw->reset_context) ret = qlcnic_sriov_vf_handle_context_reset(adapter); clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) { struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_err(&adapter->pdev->dev, "Device is in failed state\n"); if (idc->prev_state == QLC_83XX_IDC_DEV_READY) qlcnic_sriov_vf_detach(adapter); clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); clear_bit(__QLCNIC_RESETTING, &adapter->state); return -EIO; } static int qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; } static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; u8 func = adapter->ahw->pci_func; if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { dev_err(&adapter->pdev->dev, "Firmware hang detected by VF 0x%x\n", func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; } static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); return 0; } static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); } static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) { struct qlcnic_adapter *adapter; struct qlc_83xx_idc *idc; int ret = 0; adapter = container_of(work, struct qlcnic_adapter, fw_work.work); idc = &adapter->ahw->idc; idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); switch (idc->curr_state) { case QLC_83XX_IDC_DEV_READY: ret = qlcnic_sriov_vf_idc_ready_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_RESET: case QLC_83XX_IDC_DEV_INIT: ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_QUISCENT: ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); break; case QLC_83XX_IDC_DEV_FAILED: ret = qlcnic_sriov_vf_idc_failed_state(adapter); break; case QLC_83XX_IDC_DEV_QUISCENT: break; default: ret = qlcnic_sriov_vf_idc_unknown_state(adapter); } idc->prev_state = idc->curr_state; qlcnic_sriov_vf_periodic_tasks(adapter); if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, idc->delay); } static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) { while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) msleep(20); clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); clear_bit(__QLCNIC_RESETTING, &adapter->state); cancel_delayed_work_sync(&adapter->fw_work); } static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i, err = -EINVAL; if (!vf->sriov_vlans) return err; spin_lock_bh(&vf->vlan_list_lock); for (i = 0; i < sriov->num_allowed_vlans; i++) { if (vf->sriov_vlans[i] == vlan_id) { err = 0; break; } } spin_unlock_bh(&vf->vlan_list_lock); return err; } static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { int err = 0; spin_lock_bh(&vf->vlan_list_lock); if (vf->num_vlan >= sriov->num_allowed_vlans) err = -EINVAL; spin_unlock_bh(&vf->vlan_list_lock); return err; } static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter, u16 vid, u8 enable) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; bool vlan_exist; u8 allowed = 0; int i; vf = &adapter->ahw->sriov->vf_info[0]; vlan_exist = qlcnic_sriov_check_any_vlan(vf); if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) return -EINVAL; if (enable) { if (qlcnic_83xx_vf_check(adapter) && vlan_exist) return -EINVAL; if (qlcnic_sriov_validate_num_vlans(sriov, vf)) return -EINVAL; if (sriov->any_vlan) { for (i = 0; i < sriov->num_allowed_vlans; i++) { if (sriov->allowed_vlans[i] == vid) allowed = 1; } if (!allowed) return -EINVAL; } } else { if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) return -EINVAL; } return 0; } static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id, enum qlcnic_vlan_operations opcode) { struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_sriov *sriov; sriov = adapter->ahw->sriov; if (!vf->sriov_vlans) return; spin_lock_bh(&vf->vlan_list_lock); switch (opcode) { case QLC_VLAN_ADD: qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); break; case QLC_VLAN_DELETE: qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); break; default: netdev_err(adapter->netdev, "Invalid VLAN operation\n"); } spin_unlock_bh(&vf->vlan_list_lock); return; } int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, u16 vid, u8 enable) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct net_device *netdev = adapter->netdev; struct qlcnic_vf_info *vf; struct qlcnic_cmd_args cmd; int ret; memset(&cmd, 0, sizeof(cmd)); if (vid == 0) return 0; vf = &adapter->ahw->sriov->vf_info[0]; ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable); if (ret) return ret; ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_CFG_GUEST_VLAN); if (ret) return ret; cmd.req.arg[1] = (enable & 1) | vid << 16; qlcnic_sriov_cleanup_async_list(&sriov->bc); ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed to configure guest VLAN, err=%d\n", ret); } else { netif_addr_lock_bh(netdev); qlcnic_free_mac_list(adapter); netif_addr_unlock_bh(netdev); if (enable) qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); else qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); netif_addr_lock_bh(netdev); qlcnic_set_multi(netdev); netif_addr_unlock_bh(netdev); } qlcnic_free_mbx_args(&cmd); return ret; } static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) { struct list_head *head = &adapter->mac_list; struct qlcnic_mac_vlan_list *cur; while (!list_empty(head)) { cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); qlcnic_sriov_cfg_bc_intr(adapter, 0); qlcnic_83xx_disable_mbx_intr(adapter); cancel_delayed_work_sync(&adapter->idc_aen_work); return pci_save_state(pdev); } static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) { struct qlc_83xx_idc *idc = &adapter->ahw->idc; struct net_device *netdev = adapter->netdev; int err; set_bit(QLC_83XX_MODULE_LOADED, &idc->status); qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (!err) { if (netif_running(netdev)) { err = qlcnic_up(adapter, netdev); if (!err) qlcnic_restore_indev_addr(netdev, NETDEV_UP); } } netif_device_attach(netdev); qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, idc->delay); return err; } int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; int i; for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); if (!vf->sriov_vlans) return -ENOMEM; } return 0; } void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; int i; for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; kfree(vf->sriov_vlans); vf->sriov_vlans = NULL; } } void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i; for (i = 0; i < sriov->num_allowed_vlans; i++) { if (!vf->sriov_vlans[i]) { vf->sriov_vlans[i] = vlan_id; vf->num_vlan++; return; } } } void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i; for (i = 0; i < sriov->num_allowed_vlans; i++) { if (vf->sriov_vlans[i] == vlan_id) { vf->sriov_vlans[i] = 0; vf->num_vlan--; return; } } } bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf) { bool err = false; spin_lock_bh(&vf->vlan_list_lock); if (vf->num_vlan) err = true; spin_unlock_bh(&vf->vlan_list_lock); return err; }
// SPDX-License-Identifier: GPL-2.0 /* * PCI I/O adapter configuration related functions. * * Copyright IBM Corp. 2016 */ #define KMSG_COMPONENT "sclp_cmd" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/completion.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <asm/sclp.h> #include "sclp.h" #define SCLP_CMDW_CONFIGURE_PCI 0x001a0001 #define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001 #define SCLP_ATYPE_PCI 2 #define SCLP_ERRNOTIFY_AQ_RESET 0 #define SCLP_ERRNOTIFY_AQ_REPAIR 1 #define SCLP_ERRNOTIFY_AQ_INFO_LOG 2 #define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3 static DEFINE_MUTEX(sclp_pci_mutex); static struct sclp_register sclp_pci_event = { .send_mask = EVTYP_ERRNOTIFY_MASK, }; struct err_notify_evbuf { struct evbuf_header header; u8 action; u8 atype; u32 fh; u32 fid; u8 data[]; } __packed; struct err_notify_sccb { struct sccb_header header; struct err_notify_evbuf evbuf; } __packed; struct pci_cfg_sccb { struct sccb_header header; u8 atype; /* adapter type */ u8 reserved1; u16 reserved2; u32 aid; /* adapter identifier */ } __packed; static int do_pci_configure(sclp_cmdw_t cmd, u32 fid) { struct pci_cfg_sccb *sccb; int rc; if (!SCLP_HAS_PCI_RECONFIG) return -EOPNOTSUPP; sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; sccb->atype = SCLP_ATYPE_PCI; sccb->aid = fid; rc = sclp_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: break; default: pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n", cmd, sccb->header.response_code); rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } int sclp_pci_configure(u32 fid) { return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid); } EXPORT_SYMBOL(sclp_pci_configure); int sclp_pci_deconfigure(u32 fid) { return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid); } EXPORT_SYMBOL(sclp_pci_deconfigure); static void sclp_pci_callback(struct sclp_req *req, void *data) { struct completion *completion = data; complete(completion); } static int sclp_pci_check_report(struct zpci_report_error_header *report) { if (report->version != 1) return -EINVAL; switch (report->action) { case SCLP_ERRNOTIFY_AQ_RESET: case SCLP_ERRNOTIFY_AQ_REPAIR: case SCLP_ERRNOTIFY_AQ_INFO_LOG: case SCLP_ERRNOTIFY_AQ_OPTICS_DATA: break; default: return -EINVAL; } if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb))) return -EINVAL; return 0; } int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid) { DECLARE_COMPLETION_ONSTACK(completion); struct err_notify_sccb *sccb; struct sclp_req req; int ret; ret = sclp_pci_check_report(report); if (ret) return ret; mutex_lock(&sclp_pci_mutex); ret = sclp_register(&sclp_pci_event); if (ret) goto out_unlock; if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) { ret = -EOPNOTSUPP; goto out_unregister; } sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) { ret = -ENOMEM; goto out_unregister; } memset(&req, 0, sizeof(req)); req.callback_data = &completion; req.callback = sclp_pci_callback; req.command = SCLP_CMDW_WRITE_EVENT_DATA; req.status = SCLP_REQ_FILLED; req.sccb = sccb; sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length; sccb->evbuf.header.type = EVTYP_ERRNOTIFY; sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length; sccb->evbuf.action = report->action; sccb->evbuf.atype = SCLP_ATYPE_PCI; sccb->evbuf.fh = fh; sccb->evbuf.fid = fid; memcpy(sccb->evbuf.data, report->data, report->length); ret = sclp_add_request(&req); if (ret) goto out_free_req; wait_for_completion(&completion); if (req.status != SCLP_REQ_DONE) { pr_warn("request failed (status=0x%02x)\n", req.status); ret = -EIO; goto out_free_req; } if (sccb->header.response_code != 0x0020) { pr_warn("request failed with response code 0x%x\n", sccb->header.response_code); ret = -EIO; } out_free_req: free_page((unsigned long) sccb); out_unregister: sclp_unregister(&sclp_pci_event); out_unlock: mutex_unlock(&sclp_pci_mutex); return ret; }
// SPDX-License-Identifier: GPL-2.0-only OR MIT /dts-v1/; #include "mt7981b.dtsi" / { compatible = "openwrt,one", "mediatek,mt7981b"; model = "OpenWrt One"; memory@40000000 { reg = <0 0x40000000 0 0x40000000>; device_type = "memory"; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * Overview: * This is the generic MTD driver for NAND flash devices. It should be * capable of working with almost all NAND chips currently available. * * Additional technical information is available on * http://www.linux-mtd.infradead.org/doc/nand.html * * Copyright (C) 2000 Steven J. Hill ([email protected]) * 2002-2006 Thomas Gleixner ([email protected]) * * Credits: * David Woodhouse for adding multichip support * * Aleph One Ltd. and Toby Churchill Ltd. for supporting the * rework for 2K page size chips * * TODO: * Enable cached programming for 2k page size chips * Check, if mtd->ecctype should be set to MTD_ECC_HW * if we have HW ECC support. * BBT table is not serialized, has to be fixed */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/nand-ecc-sw-bch.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/mtd/partitions.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include "internals.h" static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, struct mtd_pairing_info *info) { int lastpage = (mtd->erasesize / mtd->writesize) - 1; int dist = 3; if (page == lastpage) dist = 2; if (!page || (page & 1)) { info->group = 0; info->pair = (page + 1) / 2; } else { info->group = 1; info->pair = (page + 1 - dist) / 2; } return 0; } static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, const struct mtd_pairing_info *info) { int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; int page = info->pair * 2; int dist = 3; if (!info->group && !info->pair) return 0; if (info->pair == lastpair && info->group) dist = 2; if (!info->group) page--; else if (info->pair) page += dist - 1; if (page >= mtd->erasesize / mtd->writesize) return -EINVAL; return page; } const struct mtd_pairing_scheme dist3_pairing_scheme = { .ngroups = 2, .get_info = nand_pairing_dist3_get_info, .get_wunit = nand_pairing_dist3_get_wunit, }; static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) { int ret = 0; /* Start address must align on block boundary */ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { pr_debug("%s: unaligned address\n", __func__); ret = -EINVAL; } /* Length must align on block boundary */ if (len & ((1ULL << chip->phys_erase_shift) - 1)) { pr_debug("%s: length not block aligned\n", __func__); ret = -EINVAL; } return ret; } /** * nand_extract_bits - Copy unaligned bits from one buffer to another one * @dst: destination buffer * @dst_off: bit offset at which the writing starts * @src: source buffer * @src_off: bit offset at which the reading starts * @nbits: number of bits to copy from @src to @dst * * Copy bits from one memory region to another (overlap authorized). */ void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, unsigned int src_off, unsigned int nbits) { unsigned int tmp, n; dst += dst_off / 8; dst_off %= 8; src += src_off / 8; src_off %= 8; while (nbits) { n = min3(8 - dst_off, 8 - src_off, nbits); tmp = (*src >> src_off) & GENMASK(n - 1, 0); *dst &= ~GENMASK(n - 1 + dst_off, dst_off); *dst |= tmp << dst_off; dst_off += n; if (dst_off >= 8) { dst++; dst_off -= 8; } src_off += n; if (src_off >= 8) { src++; src_off -= 8; } nbits -= n; } } EXPORT_SYMBOL_GPL(nand_extract_bits); /** * nand_select_target() - Select a NAND target (A.K.A. die) * @chip: NAND chip object * @cs: the CS line to select. Note that this CS id is always from the chip * PoV, not the controller one * * Select a NAND target so that further operations executed on @chip go to the * selected NAND target. */ void nand_select_target(struct nand_chip *chip, unsigned int cs) { /* * cs should always lie between 0 and nanddev_ntargets(), when that's * not the case it's a bug and the caller should be fixed. */ if (WARN_ON(cs > nanddev_ntargets(&chip->base))) return; chip->cur_cs = cs; if (chip->legacy.select_chip) chip->legacy.select_chip(chip, cs); } EXPORT_SYMBOL_GPL(nand_select_target); /** * nand_deselect_target() - Deselect the currently selected target * @chip: NAND chip object * * Deselect the currently selected NAND target. The result of operations * executed on @chip after the target has been deselected is undefined. */ void nand_deselect_target(struct nand_chip *chip) { if (chip->legacy.select_chip) chip->legacy.select_chip(chip, -1); chip->cur_cs = -1; } EXPORT_SYMBOL_GPL(nand_deselect_target); /** * nand_release_device - [GENERIC] release chip * @chip: NAND chip object * * Release chip lock and wake up anyone waiting on the device. */ static void nand_release_device(struct nand_chip *chip) { /* Release the controller and the chip */ mutex_unlock(&chip->controller->lock); mutex_unlock(&chip->lock); } /** * nand_bbm_get_next_page - Get the next page for bad block markers * @chip: NAND chip object * @page: First page to start checking for bad block marker usage * * Returns an integer that corresponds to the page offset within a block, for * a page that is used to store bad block markers. If no more pages are * available, -EINVAL is returned. */ int nand_bbm_get_next_page(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int last_page = ((mtd->erasesize - mtd->writesize) >> chip->page_shift) & chip->pagemask; unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE | NAND_BBM_LASTPAGE; if (page == 0 && !(chip->options & bbm_flags)) return 0; if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) return 0; if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) return 1; if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) return last_page; return -EINVAL; } /** * nand_block_bad - [DEFAULT] Read bad block marker from the chip * @chip: NAND chip object * @ofs: offset from device start * * Check, if the block is bad. */ static int nand_block_bad(struct nand_chip *chip, loff_t ofs) { int first_page, page_offset; int res; u8 bad; first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; page_offset = nand_bbm_get_next_page(chip, 0); while (page_offset >= 0) { res = chip->ecc.read_oob(chip, first_page + page_offset); if (res < 0) return res; bad = chip->oob_poi[chip->badblockpos]; if (likely(chip->badblockbits == 8)) res = bad != 0xFF; else res = hweight8(bad) < chip->badblockbits; if (res) return res; page_offset = nand_bbm_get_next_page(chip, page_offset + 1); } return 0; } /** * nand_region_is_secured() - Check if the region is secured * @chip: NAND chip object * @offset: Offset of the region to check * @size: Size of the region to check * * Checks if the region is secured by comparing the offset and size with the * list of secure regions obtained from DT. Returns true if the region is * secured else false. */ static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size) { int i; /* Skip touching the secure regions if present */ for (i = 0; i < chip->nr_secure_regions; i++) { const struct nand_secure_region *region = &chip->secure_regions[i]; if (offset + size <= region->offset || offset >= region->offset + region->size) continue; pr_debug("%s: Region 0x%llx - 0x%llx is secured!", __func__, offset, offset + size); return true; } return false; } static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) { struct mtd_info *mtd = nand_to_mtd(chip); if (chip->options & NAND_NO_BBM_QUIRK) return 0; /* Check if the region is secured */ if (nand_region_is_secured(chip, ofs, mtd->erasesize)) return -EIO; if (mtd_check_expert_analysis_mode()) return 0; if (chip->legacy.block_bad) return chip->legacy.block_bad(chip, ofs); return nand_block_bad(chip, ofs); } /** * nand_get_device - [GENERIC] Get chip for selected access * @chip: NAND chip structure * * Lock the device and its controller for exclusive access */ static void nand_get_device(struct nand_chip *chip) { /* Wait until the device is resumed. */ while (1) { mutex_lock(&chip->lock); if (!chip->suspended) { mutex_lock(&chip->controller->lock); return; } mutex_unlock(&chip->lock); wait_event(chip->resume_wq, !chip->suspended); } } /** * nand_check_wp - [GENERIC] check if the chip is write protected * @chip: NAND chip object * * Check, if the device is write protected. The function expects, that the * device is already selected. */ static int nand_check_wp(struct nand_chip *chip) { u8 status; int ret; /* Broken xD cards report WP despite being writable */ if (chip->options & NAND_BROKEN_XD) return 0; /* controller responsible for NAND write protect */ if (chip->controller->controller_wp) return 0; /* Check the WP bit */ ret = nand_status_op(chip, &status); if (ret) return ret; return status & NAND_STATUS_WP ? 0 : 1; } /** * nand_fill_oob - [INTERN] Transfer client buffer to oob * @chip: NAND chip object * @oob: oob data buffer * @len: oob data write length * @ops: oob ops structure */ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; /* * Initialise to all 0xFF, to avoid the possibility of left over OOB * data from a previous OOB read. */ memset(chip->oob_poi, 0xff, mtd->oobsize); switch (ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_RAW: memcpy(chip->oob_poi + ops->ooboffs, oob, len); return oob + len; case MTD_OPS_AUTO_OOB: ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, ops->ooboffs, len); BUG_ON(ret); return oob + len; default: BUG(); } return NULL; } /** * nand_do_write_oob - [MTD Interface] NAND write out-of-band * @chip: NAND chip object * @to: offset to write to * @ops: oob operation description structure * * NAND write out-of-band. */ static int nand_do_write_oob(struct nand_chip *chip, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); int chipnr, page, status, len, ret; pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, (int)ops->ooblen); len = mtd_oobavail(mtd, ops); /* Do not allow write past end of page */ if ((ops->ooboffs + ops->ooblen) > len) { pr_debug("%s: attempt to write past end of page\n", __func__); return -EINVAL; } /* Check if the region is secured */ if (nand_region_is_secured(chip, to, ops->ooblen)) return -EIO; chipnr = (int)(to >> chip->chip_shift); /* * Reset the chip. Some chips (like the Toshiba TC5832DC found in one * of my DiskOnChip 2000 test units) will clear the whole data page too * if we don't do this. I have no clue why, but I seem to have 'fixed' * it in the doc2000 driver in August 1999. dwmw2. */ ret = nand_reset(chip, chipnr); if (ret) return ret; nand_select_target(chip, chipnr); /* Shift to get page */ page = (int)(to >> chip->page_shift); /* Check, if it is write protected */ if (nand_check_wp(chip)) { nand_deselect_target(chip); return -EROFS; } /* Invalidate the page cache, if we write to the cached page */ if (page == chip->pagecache.page) chip->pagecache.page = -1; nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); if (ops->mode == MTD_OPS_RAW) status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); else status = chip->ecc.write_oob(chip, page & chip->pagemask); nand_deselect_target(chip); if (status) return status; ops->oobretlen = ops->ooblen; return 0; } /** * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker * @chip: NAND chip object * @ofs: offset from device start * * This is the default implementation, which can be overridden by a hardware * specific driver. It provides the details for writing a bad block marker to a * block. */ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) { struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_oob_ops ops; uint8_t buf[2] = { 0, 0 }; int ret = 0, res, page_offset; memset(&ops, 0, sizeof(ops)); ops.oobbuf = buf; ops.ooboffs = chip->badblockpos; if (chip->options & NAND_BUSWIDTH_16) { ops.ooboffs &= ~0x01; ops.len = ops.ooblen = 2; } else { ops.len = ops.ooblen = 1; } ops.mode = MTD_OPS_PLACE_OOB; page_offset = nand_bbm_get_next_page(chip, 0); while (page_offset >= 0) { res = nand_do_write_oob(chip, ofs + (page_offset * mtd->writesize), &ops); if (!ret) ret = res; page_offset = nand_bbm_get_next_page(chip, page_offset + 1); } return ret; } /** * nand_markbad_bbm - mark a block by updating the BBM * @chip: NAND chip object * @ofs: offset of the block to mark bad */ int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) { if (chip->legacy.block_markbad) return chip->legacy.block_markbad(chip, ofs); return nand_default_block_markbad(chip, ofs); } /** * nand_block_markbad_lowlevel - mark a block bad * @chip: NAND chip object * @ofs: offset from device start * * This function performs the generic NAND bad block marking steps (i.e., bad * block table(s) and/or marker(s)). We only allow the hardware driver to * specify how to write bad block markers to OOB (chip->legacy.block_markbad). * * We try operations in the following order: * * (1) erase the affected block, to allow OOB marker to be written cleanly * (2) write bad block marker to OOB area of affected block (unless flag * NAND_BBT_NO_OOB_BBM is present) * (3) update the BBT * * Note that we retain the first error encountered in (2) or (3), finish the * procedures, and dump the error in the end. */ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) { struct mtd_info *mtd = nand_to_mtd(chip); int res, ret = 0; if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { struct erase_info einfo; /* Attempt erase before marking OOB */ memset(&einfo, 0, sizeof(einfo)); einfo.addr = ofs; einfo.len = 1ULL << chip->phys_erase_shift; nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ nand_get_device(chip); ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); } /* Mark block bad in BBT */ if (chip->bbt) { res = nand_markbad_bbt(chip, ofs); if (!ret) ret = res; } if (!ret) mtd->ecc_stats.badblocks++; return ret; } /** * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. * @mtd: MTD device structure * @ofs: offset from device start * * Check if the block is marked as reserved. */ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) { struct nand_chip *chip = mtd_to_nand(mtd); if (!chip->bbt) return 0; /* Return info from the table */ return nand_isreserved_bbt(chip, ofs); } /** * nand_block_checkbad - [GENERIC] Check if a block is marked bad * @chip: NAND chip object * @ofs: offset from device start * @allowbbt: 1, if its allowed to access the bbt area * * Check, if the block is bad. Either by reading the bad block table or * calling of the scan function. */ static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) { /* Return info from the table */ if (chip->bbt) return nand_isbad_bbt(chip, ofs, allowbbt); return nand_isbad_bbm(chip, ofs); } /** * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 * @chip: NAND chip structure * @timeout_ms: Timeout in ms * * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. * If that does not happen whitin the specified timeout, -ETIMEDOUT is * returned. * * This helper is intended to be used when the controller does not have access * to the NAND R/B pin. * * Be aware that calling this helper from an ->exec_op() implementation means * ->exec_op() must be re-entrant. * * Return 0 if the NAND chip is ready, a negative error otherwise. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) { const struct nand_interface_config *conf; u8 status = 0; int ret; if (!nand_has_exec_op(chip)) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ conf = nand_get_interface_config(chip); ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); ret = nand_status_op(chip, NULL); if (ret) return ret; /* * +1 below is necessary because if we are now in the last fraction * of jiffy and msecs_to_jiffies is 1 then we will wait only that * small jiffy fraction - possibly leading to false timeout */ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { ret = nand_read_data_op(chip, &status, sizeof(status), true, false); if (ret) break; if (status & NAND_STATUS_READY) break; /* * Typical lowest execution time for a tR on most NANDs is 10us, * use this as polling delay before doing something smarter (ie. * deriving a delay from the timeout value, timeout_ms/ratio). */ udelay(10); } while (time_before(jiffies, timeout_ms)); /* * We have to exit READ_STATUS mode in order to read real data on the * bus in case the WAITRDY instruction is preceding a DATA_IN * instruction. */ nand_exit_status_op(chip); if (ret) return ret; return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; }; EXPORT_SYMBOL_GPL(nand_soft_waitrdy); /** * nand_gpio_waitrdy - Poll R/B GPIO pin until ready * @chip: NAND chip structure * @gpiod: GPIO descriptor of R/B pin * @timeout_ms: Timeout in ms * * Poll the R/B GPIO pin until it becomes ready. If that does not happen * whitin the specified timeout, -ETIMEDOUT is returned. * * This helper is intended to be used when the controller has access to the * NAND R/B pin over GPIO. * * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. */ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms) { /* * Wait until R/B pin indicates chip is ready or timeout occurs. * +1 below is necessary because if we are now in the last fraction * of jiffy and msecs_to_jiffies is 1 then we will wait only that * small jiffy fraction - possibly leading to false timeout. */ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { if (gpiod_get_value_cansleep(gpiod)) return 0; cond_resched(); } while (time_before(jiffies, timeout_ms)); return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; }; EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); /** * panic_nand_wait - [GENERIC] wait until the command is done * @chip: NAND chip structure * @timeo: timeout * * Wait for command done. This is a helper function for nand_wait used when * we are in interrupt context. May happen when in panic and trying to write * an oops through mtdoops. */ void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) { int i; for (i = 0; i < timeo; i++) { if (chip->legacy.dev_ready) { if (chip->legacy.dev_ready(chip)) break; } else { int ret; u8 status; ret = nand_read_data_op(chip, &status, sizeof(status), true, false); if (ret) return; if (status & NAND_STATUS_READY) break; } mdelay(1); } } static bool nand_supports_get_features(struct nand_chip *chip, int addr) { return (chip->parameters.supports_set_get_features && test_bit(addr, chip->parameters.get_feature_list)); } static bool nand_supports_set_features(struct nand_chip *chip, int addr) { return (chip->parameters.supports_set_get_features && test_bit(addr, chip->parameters.set_feature_list)); } /** * nand_reset_interface - Reset data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * * Reset the Data interface and timings to ONFI mode 0. * * Returns 0 for success or negative error code otherwise. */ static int nand_reset_interface(struct nand_chip *chip, int chipnr) { const struct nand_controller_ops *ops = chip->controller->ops; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; /* * The ONFI specification says: * " * To transition from NV-DDR or NV-DDR2 to the SDR data * interface, the host shall use the Reset (FFh) command * using SDR timing mode 0. A device in any timing mode is * required to recognize Reset (FFh) command issued in SDR * timing mode 0. * " * * Configure the data interface in SDR mode and set the * timings to timing mode 0. */ chip->current_interface_config = nand_get_reset_interface_config(); ret = ops->setup_interface(chip, chipnr, chip->current_interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); return ret; } /** * nand_setup_interface - Setup the best data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * * Configure what has been reported to be the best data interface and NAND * timings supported by the chip and the driver. * * Returns 0 for success or negative error code otherwise. */ static int nand_setup_interface(struct nand_chip *chip, int chipnr) { const struct nand_controller_ops *ops = chip->controller->ops; u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; /* * A nand_reset_interface() put both the NAND chip and the NAND * controller in timings mode 0. If the default mode for this chip is * also 0, no need to proceed to the change again. Plus, at probe time, * nand_setup_interface() uses ->set/get_features() which would * fail anyway as the parameter page is not available yet. */ if (!chip->best_interface_config) return 0; request = chip->best_interface_config->timings.mode; if (nand_interface_is_sdr(chip->best_interface_config)) request |= ONFI_DATA_INTERFACE_SDR; else request |= ONFI_DATA_INTERFACE_NVDDR; tmode_param[0] = request; /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { nand_select_target(chip, chipnr); ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, tmode_param); nand_deselect_target(chip); if (ret) return ret; } /* Change the mode on the controller side */ ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); if (ret) return ret; /* Check the mode has been accepted by the chip, if supported */ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) goto update_interface_config; memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); nand_select_target(chip, chipnr); ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, tmode_param); nand_deselect_target(chip); if (ret) goto err_reset_chip; if (request != tmode_param[0]) { pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", chip->best_interface_config->timings.mode); pr_debug("NAND chip would work in %s timing mode %d\n", tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); goto err_reset_chip; } update_interface_config: chip->current_interface_config = chip->best_interface_config; return 0; err_reset_chip: /* * Fallback to mode 0 if the chip explicitly did not ack the chosen * timing mode. */ nand_reset_interface(chip, chipnr); nand_select_target(chip, chipnr); nand_reset_op(chip); nand_deselect_target(chip); return ret; } /** * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the * NAND controller and the NAND chip support * @chip: the NAND chip * @iface: the interface configuration (can eventually be updated) * @spec_timings: specific timings, when not fitting the ONFI specification * * If specific timings are provided, use them. Otherwise, retrieve supported * timing modes from ONFI information. */ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_sdr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_SDR_IFACE; if (spec_timings) { iface->timings.sdr = *spec_timings; iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); /* Verify the controller supports the requested interface */ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) { chip->best_interface_config = iface; return ret; } /* Fallback to slower modes */ best_mode = iface->timings.mode; } else if (chip->parameters.onfi) { best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; } for (mode = best_mode; mode >= 0; mode--) { onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) { chip->best_interface_config = iface; break; } } return ret; } /** * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the * NAND controller and the NAND chip support * @chip: the NAND chip * @iface: the interface configuration (can eventually be updated) * @spec_timings: specific timings, when not fitting the ONFI specification * * If specific timings are provided, use them. Otherwise, retrieve supported * timing modes from ONFI information. */ int nand_choose_best_nvddr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_nvddr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_NVDDR_IFACE; if (spec_timings) { iface->timings.nvddr = *spec_timings; iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); /* Verify the controller supports the requested interface */ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) { chip->best_interface_config = iface; return ret; } /* Fallback to slower modes */ best_mode = iface->timings.mode; } else if (chip->parameters.onfi) { best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; } for (mode = best_mode; mode >= 0; mode--) { onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) { chip->best_interface_config = iface; break; } } return ret; } /** * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both * NAND controller and the NAND chip support * @chip: the NAND chip * @iface: the interface configuration (can eventually be updated) * * If specific timings are provided, use them. Otherwise, retrieve supported * timing modes from ONFI information. */ static int nand_choose_best_timings(struct nand_chip *chip, struct nand_interface_config *iface) { int ret; /* Try the fastest timings: NV-DDR */ ret = nand_choose_best_nvddr_timings(chip, iface, NULL); if (!ret) return 0; /* Fallback to SDR timings otherwise */ return nand_choose_best_sdr_timings(chip, iface, NULL); } /** * nand_choose_interface_config - find the best data interface and timings * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip * and the driver. Eventually let the NAND manufacturer driver propose his own * set of timings. * * After this function nand_chip->interface_config is initialized with the best * timing mode available. * * Returns 0 for success or negative error code otherwise. */ static int nand_choose_interface_config(struct nand_chip *chip) { struct nand_interface_config *iface; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) return -ENOMEM; if (chip->ops.choose_interface_config) ret = chip->ops.choose_interface_config(chip, iface); else ret = nand_choose_best_timings(chip, iface); if (ret) kfree(iface); return ret; } /** * nand_fill_column_cycles - fill the column cycles of an address * @chip: The NAND chip * @addrs: Array of address cycles to fill * @offset_in_page: The offset in the page * * Fills the first or the first two bytes of the @addrs field depending * on the NAND bus width and the page size. * * Returns the number of cycles needed to encode the column, or a negative * error code in case one of the arguments is invalid. */ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, unsigned int offset_in_page) { struct mtd_info *mtd = nand_to_mtd(chip); bool ident_stage = !mtd->writesize; /* Bypass all checks during NAND identification */ if (likely(!ident_stage)) { /* Make sure the offset is less than the actual page size. */ if (offset_in_page > mtd->writesize + mtd->oobsize) return -EINVAL; /* * On small page NANDs, there's a dedicated command to access the OOB * area, and the column address is relative to the start of the OOB * area, not the start of the page. Asjust the address accordingly. */ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) offset_in_page -= mtd->writesize; /* * The offset in page is expressed in bytes, if the NAND bus is 16-bit * wide, then it must be divided by 2. */ if (chip->options & NAND_BUSWIDTH_16) { if (WARN_ON(offset_in_page % 2)) return -EINVAL; offset_in_page /= 2; } } addrs[0] = offset_in_page; /* * Small page NANDs use 1 cycle for the columns, while large page NANDs * need 2 */ if (!ident_stage && mtd->writesize <= 512) return 1; addrs[1] = offset_in_page >> 8; return 2; } static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; if (offset_in_page >= mtd->writesize) instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; else if (offset_in_page >= 256 && !(chip->options & NAND_BUSWIDTH_16)) instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; ret = nand_fill_column_cycles(chip, addrs, offset_in_page); if (ret < 0) return ret; addrs[1] = page; addrs[2] = page >> 8; if (chip->options & NAND_ROW_ADDR_3) { addrs[3] = page >> 16; instrs[1].ctx.addr.naddrs++; } return nand_exec_op(chip, &op); } static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_ADDR(4, addrs, 0), NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; ret = nand_fill_column_cycles(chip, addrs, offset_in_page); if (ret < 0) return ret; addrs[2] = page; addrs[3] = page >> 8; if (chip->options & NAND_ROW_ADDR_3) { addrs[4] = page >> 16; instrs[1].ctx.addr.naddrs++; } return nand_exec_op(chip, &op); } static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun) { /* lun is expected to be very small */ return (lun * pages_per_lun) + pages_per_lun - 1; } static void rawnand_cap_cont_reads(struct nand_chip *chip) { struct nand_memory_organization *memorg; unsigned int ppl, first_lun, last_lun; memorg = nanddev_get_memorg(&chip->base); ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; first_lun = chip->cont_read.first_page / ppl; last_lun = chip->cont_read.last_page / ppl; /* Prevent sequential cache reads across LUN boundaries */ if (first_lun != last_lun) chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun); else chip->cont_read.pause_page = chip->cont_read.last_page; if (chip->cont_read.first_page == chip->cont_read.pause_page) { chip->cont_read.first_page++; chip->cont_read.pause_page = min(chip->cont_read.last_page, rawnand_last_page_of_lun(ppl, first_lun + 1)); } if (chip->cont_read.first_page >= chip->cont_read.last_page) chip->cont_read.ongoing = false; } static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len, bool check_only) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 addrs[5]; struct nand_op_instr start_instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_ADDR(4, addrs, 0), NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0), NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_op_instr cont_instrs[] = { NAND_OP_CMD(page == chip->cont_read.pause_page ? NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs); struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs); int ret; if (!len) { start_op.ninstrs--; cont_op.ninstrs--; } ret = nand_fill_column_cycles(chip, addrs, offset_in_page); if (ret < 0) return ret; addrs[2] = page; addrs[3] = page >> 8; if (chip->options & NAND_ROW_ADDR_3) { addrs[4] = page >> 16; start_instrs[1].ctx.addr.naddrs++; } /* Check if cache reads are supported */ if (check_only) { if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op)) return -EOPNOTSUPP; return 0; } if (page == chip->cont_read.first_page) ret = nand_exec_op(chip, &start_op); else ret = nand_exec_op(chip, &cont_op); if (ret) return ret; if (!chip->cont_read.ongoing) return 0; if (page == chip->cont_read.last_page) { chip->cont_read.ongoing = false; } else if (page == chip->cont_read.pause_page) { chip->cont_read.first_page++; rawnand_cap_cont_reads(chip); } return 0; } static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page) { return chip->cont_read.ongoing && page >= chip->cont_read.first_page; } /** * nand_read_page_op - Do a READ PAGE operation * @chip: The NAND chip * @page: page to read * @offset_in_page: offset within the page * @buf: buffer used to store the data * @len: length of the buffer * * This function issues a READ PAGE operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { struct mtd_info *mtd = nand_to_mtd(chip); if (len && !buf) return -EINVAL; if (offset_in_page + len > mtd->writesize + mtd->oobsize) return -EINVAL; if (nand_has_exec_op(chip)) { if (mtd->writesize > 512) { if (rawnand_cont_read_ongoing(chip, page)) return nand_lp_exec_cont_read_page_op(chip, page, offset_in_page, buf, len, false); else return nand_lp_exec_read_page_op(chip, page, offset_in_page, buf, len); } return nand_sp_exec_read_page_op(chip, page, offset_in_page, buf, len); } chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); if (len) chip->legacy.read_buf(chip, buf, len); return 0; } EXPORT_SYMBOL_GPL(nand_read_page_op); /** * nand_read_param_page_op - Do a READ PARAMETER PAGE operation * @chip: The NAND chip * @page: parameter page to read * @buf: buffer used to store the data * @len: length of the buffer * * This function issues a READ PARAMETER PAGE operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, unsigned int len) { unsigned int i; u8 *p = buf; if (len && !buf) return -EINVAL; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), NAND_OP_ADDR(1, &page, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_8BIT_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); for (i = 0; i < len; i++) p[i] = chip->legacy.read_byte(chip); return 0; } /** * nand_change_read_column_op - Do a CHANGE READ COLUMN operation * @chip: The NAND chip * @offset_in_page: offset within the page * @buf: buffer used to store the data * @len: length of the buffer * @force_8bit: force 8-bit bus access * * This function issues a CHANGE READ COLUMN operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_change_read_column_op(struct nand_chip *chip, unsigned int offset_in_page, void *buf, unsigned int len, bool force_8bit) { struct mtd_info *mtd = nand_to_mtd(chip); bool ident_stage = !mtd->writesize; if (len && !buf) return -EINVAL; if (!ident_stage) { if (offset_in_page + len > mtd->writesize + mtd->oobsize) return -EINVAL; /* Small page NANDs do not support column change. */ if (mtd->writesize <= 512) return -ENOTSUPP; } if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_RNDOUTSTART, NAND_COMMON_TIMING_NS(conf, tCCS_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; ret = nand_fill_column_cycles(chip, addrs, offset_in_page); if (ret < 0) return ret; /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; instrs[3].ctx.data.force_8bit = force_8bit; return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); if (len) chip->legacy.read_buf(chip, buf, len); return 0; } EXPORT_SYMBOL_GPL(nand_change_read_column_op); /** * nand_read_oob_op - Do a READ OOB operation * @chip: The NAND chip * @page: page to read * @offset_in_oob: offset within the OOB area * @buf: buffer used to store the data * @len: length of the buffer * * This function issues a READ OOB operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_read_oob_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_oob, void *buf, unsigned int len) { struct mtd_info *mtd = nand_to_mtd(chip); if (len && !buf) return -EINVAL; if (offset_in_oob + len > mtd->oobsize) return -EINVAL; if (nand_has_exec_op(chip)) return nand_read_page_op(chip, page, mtd->writesize + offset_in_oob, buf, len); chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); if (len) chip->legacy.read_buf(chip, buf, len); return 0; } EXPORT_SYMBOL_GPL(nand_read_oob_op); static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { /* * The first instruction will be dropped if we're dealing * with a large page NAND and adjusted if we're dealing * with a small page NAND and the page offset is > 255. */ NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_CMD(NAND_CMD_SEQIN, 0), NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_DATA_OUT(len, buf, 0), NAND_OP_CMD(NAND_CMD_PAGEPROG, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), }; struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, instrs); int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); if (naddrs < 0) return naddrs; addrs[naddrs++] = page; addrs[naddrs++] = page >> 8; if (chip->options & NAND_ROW_ADDR_3) addrs[naddrs++] = page >> 16; instrs[2].ctx.addr.naddrs = naddrs; /* Drop the last two instructions if we're not programming the page. */ if (!prog) { op.ninstrs -= 2; /* Also drop the DATA_OUT instruction if empty. */ if (!len) op.ninstrs--; } if (mtd->writesize <= 512) { /* * Small pages need some more tweaking: we have to adjust the * first instruction depending on the page offset we're trying * to access. */ if (offset_in_page >= mtd->writesize) instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; else if (offset_in_page >= 256 && !(chip->options & NAND_BUSWIDTH_16)) instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; } else { /* * Drop the first command if we're dealing with a large page * NAND. */ op.instrs++; op.ninstrs--; } return nand_exec_op(chip, &op); } /** * nand_prog_page_begin_op - starts a PROG PAGE operation * @chip: The NAND chip * @page: page to write * @offset_in_page: offset within the page * @buf: buffer containing the data to write to the page * @len: length of the buffer * * This function issues the first half of a PROG PAGE operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len) { struct mtd_info *mtd = nand_to_mtd(chip); if (len && !buf) return -EINVAL; if (offset_in_page + len > mtd->writesize + mtd->oobsize) return -EINVAL; if (nand_has_exec_op(chip)) return nand_exec_prog_page_op(chip, page, offset_in_page, buf, len, false); chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); if (buf) chip->legacy.write_buf(chip, buf, len); return 0; } EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); /** * nand_prog_page_end_op - ends a PROG PAGE operation * @chip: The NAND chip * * This function issues the second half of a PROG PAGE operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_prog_page_end_op(struct nand_chip *chip) { int ret; u8 status; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); ret = nand_exec_op(chip, &op); if (ret) return ret; ret = nand_status_op(chip, &status); if (ret) return ret; } else { chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); ret = chip->legacy.waitfunc(chip); if (ret < 0) return ret; status = ret; } if (status & NAND_STATUS_FAIL) return -EIO; return 0; } EXPORT_SYMBOL_GPL(nand_prog_page_end_op); /** * nand_prog_page_op - Do a full PROG PAGE operation * @chip: The NAND chip * @page: page to write * @offset_in_page: offset within the page * @buf: buffer containing the data to write to the page * @len: length of the buffer * * This function issues a full PROG PAGE operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len) { struct mtd_info *mtd = nand_to_mtd(chip); u8 status; int ret; if (!len || !buf) return -EINVAL; if (offset_in_page + len > mtd->writesize + mtd->oobsize) return -EINVAL; if (nand_has_exec_op(chip)) { ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf, len, true); if (ret) return ret; ret = nand_status_op(chip, &status); if (ret) return ret; } else { chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); chip->legacy.write_buf(chip, buf, len); chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); ret = chip->legacy.waitfunc(chip); if (ret < 0) return ret; status = ret; } if (status & NAND_STATUS_FAIL) return -EIO; return 0; } EXPORT_SYMBOL_GPL(nand_prog_page_op); /** * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation * @chip: The NAND chip * @offset_in_page: offset within the page * @buf: buffer containing the data to send to the NAND * @len: length of the buffer * @force_8bit: force 8-bit bus access * * This function issues a CHANGE WRITE COLUMN operation. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit) { struct mtd_info *mtd = nand_to_mtd(chip); if (len && !buf) return -EINVAL; if (offset_in_page + len > mtd->writesize + mtd->oobsize) return -EINVAL; /* Small page NANDs do not support column change. */ if (mtd->writesize <= 512) return -ENOTSUPP; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), NAND_OP_DATA_OUT(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; ret = nand_fill_column_cycles(chip, addrs, offset_in_page); if (ret < 0) return ret; instrs[2].ctx.data.force_8bit = force_8bit; /* Drop the DATA_OUT instruction if len is set to 0. */ if (!len) op.ninstrs--; return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); if (len) chip->legacy.write_buf(chip, buf, len); return 0; } EXPORT_SYMBOL_GPL(nand_change_write_column_op); /** * nand_readid_op - Do a READID operation * @chip: The NAND chip * @addr: address cycle to pass after the READID command * @buf: buffer used to store the ID * @len: length of the buffer * * This function sends a READID command and reads back the ID returned by the * NAND. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, unsigned int len) { unsigned int i; u8 *id = buf, *ddrbuf = NULL; if (len && !buf) return -EINVAL; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), NAND_OP_ADDR(1, &addr, NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_8BIT_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; /* READ_ID data bytes are received twice in NV-DDR mode */ if (len && nand_interface_is_nvddr(conf)) { ddrbuf = kzalloc(len * 2, GFP_KERNEL); if (!ddrbuf) return -ENOMEM; instrs[2].ctx.data.len *= 2; instrs[2].ctx.data.buf.in = ddrbuf; } /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; ret = nand_exec_op(chip, &op); if (!ret && len && nand_interface_is_nvddr(conf)) { for (i = 0; i < len; i++) id[i] = ddrbuf[i * 2]; } kfree(ddrbuf); return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); for (i = 0; i < len; i++) id[i] = chip->legacy.read_byte(chip); return 0; } EXPORT_SYMBOL_GPL(nand_readid_op); /** * nand_status_op - Do a STATUS operation * @chip: The NAND chip * @status: out variable to store the NAND status * * This function sends a STATUS command and reads back the status returned by * the NAND. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 ddrstatus[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_8BIT_DATA_IN(1, status, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; /* The status data byte will be received twice in NV-DDR mode */ if (status && nand_interface_is_nvddr(conf)) { instrs[1].ctx.data.len *= 2; instrs[1].ctx.data.buf.in = ddrstatus; } if (!status) op.ninstrs--; ret = nand_exec_op(chip, &op); if (!ret && status && nand_interface_is_nvddr(conf)) *status = ddrstatus[0]; return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); if (status) *status = chip->legacy.read_byte(chip); return 0; } EXPORT_SYMBOL_GPL(nand_status_op); /** * nand_exit_status_op - Exit a STATUS operation * @chip: The NAND chip * * This function sends a READ0 command to cancel the effect of the STATUS * command to avoid reading only the status until a new read command is sent. * * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_exit_status_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); return 0; } EXPORT_SYMBOL_GPL(nand_exit_status_op); /** * nand_erase_op - Do an erase operation * @chip: The NAND chip * @eraseblock: block to erase * * This function sends an ERASE command and waits for the NAND to be ready * before returning. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) { unsigned int page = eraseblock << (chip->phys_erase_shift - chip->page_shift); int ret; u8 status; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_ERASE2, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 0), }; struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, instrs); if (chip->options & NAND_ROW_ADDR_3) instrs[1].ctx.addr.naddrs++; ret = nand_exec_op(chip, &op); if (ret) return ret; ret = nand_status_op(chip, &status); if (ret) return ret; } else { chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); ret = chip->legacy.waitfunc(chip); if (ret < 0) return ret; status = ret; } if (status & NAND_STATUS_FAIL) return -EIO; return 0; } EXPORT_SYMBOL_GPL(nand_erase_op); /** * nand_set_features_op - Do a SET FEATURES operation * @chip: The NAND chip * @feature: feature id * @data: 4 bytes of data * * This function sends a SET FEATURES command and waits for the NAND to be * ready before returning. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ static int nand_set_features_op(struct nand_chip *chip, u8 feature, const void *data) { const u8 *params = data; int i, ret; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) chip->legacy.write_byte(chip, params[i]); ret = chip->legacy.waitfunc(chip); if (ret < 0) return ret; if (ret & NAND_STATUS_FAIL) return -EIO; return 0; } /** * nand_get_features_op - Do a GET FEATURES operation * @chip: The NAND chip * @feature: feature id * @data: 4 bytes of data * * This function sends a GET FEATURES command and waits for the NAND to be * ready before returning. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ static int nand_get_features_op(struct nand_chip *chip, u8 feature, void *data) { u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; int i; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, data, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int ret; /* GET_FEATURE data bytes are received twice in NV-DDR mode */ if (nand_interface_is_nvddr(conf)) { instrs[3].ctx.data.len *= 2; instrs[3].ctx.data.buf.in = ddrbuf; } ret = nand_exec_op(chip, &op); if (nand_interface_is_nvddr(conf)) { for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) params[i] = ddrbuf[i * 2]; } return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) params[i] = chip->legacy.read_byte(chip); return 0; } static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, unsigned int delay_ns) { if (nand_has_exec_op(chip)) { struct nand_op_instr instrs[] = { NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), PSEC_TO_NSEC(delay_ns)), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); return nand_exec_op(chip, &op); } /* Apply delay or wait for ready/busy pin */ if (!chip->legacy.dev_ready) udelay(chip->legacy.chip_delay); else nand_wait_ready(chip); return 0; } /** * nand_reset_op - Do a reset operation * @chip: The NAND chip * * This function sends a RESET command and waits for the NAND to be ready * before returning. * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RESET, NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); return nand_exec_op(chip, &op); } chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); return 0; } EXPORT_SYMBOL_GPL(nand_reset_op); /** * nand_read_data_op - Read data from the NAND * @chip: The NAND chip * @buf: buffer used to store the data * @len: length of the buffer * @force_8bit: force 8-bit bus access * @check_only: do not actually run the command, only checks if the * controller driver supports it * * This function does a raw data read on the bus. Usually used after launching * another NAND operation like nand_read_page_op(). * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, bool force_8bit, bool check_only) { if (!len || (!check_only && !buf)) return -EINVAL; if (nand_has_exec_op(chip)) { const struct nand_interface_config *conf = nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); u8 *ddrbuf = NULL; int ret, i; instrs[0].ctx.data.force_8bit = force_8bit; /* * Parameter payloads (ID, status, features, etc) do not go * through the same pipeline as regular data, hence the * force_8bit flag must be set and this also indicates that in * case NV-DDR timings are being used the data will be received * twice. */ if (force_8bit && nand_interface_is_nvddr(conf)) { ddrbuf = kzalloc(len * 2, GFP_KERNEL); if (!ddrbuf) return -ENOMEM; instrs[0].ctx.data.len *= 2; instrs[0].ctx.data.buf.in = ddrbuf; } if (check_only) { ret = nand_check_op(chip, &op); kfree(ddrbuf); return ret; } ret = nand_exec_op(chip, &op); if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { u8 *dst = buf; for (i = 0; i < len; i++) dst[i] = ddrbuf[i * 2]; } kfree(ddrbuf); return ret; } if (check_only) return 0; if (force_8bit) { u8 *p = buf; unsigned int i; for (i = 0; i < len; i++) p[i] = chip->legacy.read_byte(chip); } else { chip->legacy.read_buf(chip, buf, len); } return 0; } EXPORT_SYMBOL_GPL(nand_read_data_op); /** * nand_write_data_op - Write data from the NAND * @chip: The NAND chip * @buf: buffer containing the data to send on the bus * @len: length of the buffer * @force_8bit: force 8-bit bus access * * This function does a raw data write on the bus. Usually used after launching * another NAND operation like nand_write_page_begin_op(). * This function does not select/unselect the CS line. * * Returns 0 on success, a negative error code otherwise. */ int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit) { if (!len || !buf) return -EINVAL; if (nand_has_exec_op(chip)) { struct nand_op_instr instrs[] = { NAND_OP_DATA_OUT(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); instrs[0].ctx.data.force_8bit = force_8bit; return nand_exec_op(chip, &op); } if (force_8bit) { const u8 *p = buf; unsigned int i; for (i = 0; i < len; i++) chip->legacy.write_byte(chip, p[i]); } else { chip->legacy.write_buf(chip, buf, len); } return 0; } EXPORT_SYMBOL_GPL(nand_write_data_op); /** * struct nand_op_parser_ctx - Context used by the parser * @instrs: array of all the instructions that must be addressed * @ninstrs: length of the @instrs array * @subop: Sub-operation to be passed to the NAND controller * * This structure is used by the core to split NAND operations into * sub-operations that can be handled by the NAND controller. */ struct nand_op_parser_ctx { const struct nand_op_instr *instrs; unsigned int ninstrs; struct nand_subop subop; }; /** * nand_op_parser_must_split_instr - Checks if an instruction must be split * @pat: the parser pattern element that matches @instr * @instr: pointer to the instruction to check * @start_offset: this is an in/out parameter. If @instr has already been * split, then @start_offset is the offset from which to start * (either an address cycle or an offset in the data buffer). * Conversely, if the function returns true (ie. instr must be * split), this parameter is updated to point to the first * data/address cycle that has not been taken care of. * * Some NAND controllers are limited and cannot send X address cycles with a * unique operation, or cannot read/write more than Y bytes at the same time. * In this case, split the instruction that does not fit in a single * controller-operation into two or more chunks. * * Returns true if the instruction must be split, false otherwise. * The @start_offset parameter is also updated to the offset at which the next * bundle of instruction must start (if an address or a data instruction). */ static bool nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, const struct nand_op_instr *instr, unsigned int *start_offset) { switch (pat->type) { case NAND_OP_ADDR_INSTR: if (!pat->ctx.addr.maxcycles) break; if (instr->ctx.addr.naddrs - *start_offset > pat->ctx.addr.maxcycles) { *start_offset += pat->ctx.addr.maxcycles; return true; } break; case NAND_OP_DATA_IN_INSTR: case NAND_OP_DATA_OUT_INSTR: if (!pat->ctx.data.maxlen) break; if (instr->ctx.data.len - *start_offset > pat->ctx.data.maxlen) { *start_offset += pat->ctx.data.maxlen; return true; } break; default: break; } return false; } /** * nand_op_parser_match_pat - Checks if a pattern matches the instructions * remaining in the parser context * @pat: the pattern to test * @ctx: the parser context structure to match with the pattern @pat * * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. * Returns true if this is the case, false ortherwise. When true is returned, * @ctx->subop is updated with the set of instructions to be passed to the * controller driver. */ static bool nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, struct nand_op_parser_ctx *ctx) { unsigned int instr_offset = ctx->subop.first_instr_start_off; const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; const struct nand_op_instr *instr = ctx->subop.instrs; unsigned int i, ninstrs; for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { /* * The pattern instruction does not match the operation * instruction. If the instruction is marked optional in the * pattern definition, we skip the pattern element and continue * to the next one. If the element is mandatory, there's no * match and we can return false directly. */ if (instr->type != pat->elems[i].type) { if (!pat->elems[i].optional) return false; continue; } /* * Now check the pattern element constraints. If the pattern is * not able to handle the whole instruction in a single step, * we have to split it. * The last_instr_end_off value comes back updated to point to * the position where we have to split the instruction (the * start of the next subop chunk). */ if (nand_op_parser_must_split_instr(&pat->elems[i], instr, &instr_offset)) { ninstrs++; i++; break; } instr++; ninstrs++; instr_offset = 0; } /* * This can happen if all instructions of a pattern are optional. * Still, if there's not at least one instruction handled by this * pattern, this is not a match, and we should try the next one (if * any). */ if (!ninstrs) return false; /* * We had a match on the pattern head, but the pattern may be longer * than the instructions we're asked to execute. We need to make sure * there's no mandatory elements in the pattern tail. */ for (; i < pat->nelems; i++) { if (!pat->elems[i].optional) return false; } /* * We have a match: update the subop structure accordingly and return * true. */ ctx->subop.ninstrs = ninstrs; ctx->subop.last_instr_end_off = instr_offset; return true; } #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) { const struct nand_op_instr *instr; char *prefix = " "; unsigned int i; pr_debug("executing subop (CS%d):\n", ctx->subop.cs); for (i = 0; i < ctx->ninstrs; i++) { instr = &ctx->instrs[i]; if (instr == &ctx->subop.instrs[0]) prefix = " ->"; nand_op_trace(prefix, instr); if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) prefix = " "; } } #else static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) { /* NOP */ } #endif static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, const struct nand_op_parser_ctx *b) { if (a->subop.ninstrs < b->subop.ninstrs) return -1; else if (a->subop.ninstrs > b->subop.ninstrs) return 1; if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) return -1; else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) return 1; return 0; } /** * nand_op_parser_exec_op - exec_op parser * @chip: the NAND chip * @parser: patterns description provided by the controller driver * @op: the NAND operation to address * @check_only: when true, the function only checks if @op can be handled but * does not execute the operation * * Helper function designed to ease integration of NAND controller drivers that * only support a limited set of instruction sequences. The supported sequences * are described in @parser, and the framework takes care of splitting @op into * multiple sub-operations (if required) and pass them back to the ->exec() * callback of the matching pattern if @check_only is set to false. * * NAND controller drivers should call this function from their own ->exec_op() * implementation. * * Returns 0 on success, a negative error code otherwise. A failure can be * caused by an unsupported operation (none of the supported patterns is able * to handle the requested operation), or an error returned by one of the * matching pattern->exec() hook. */ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only) { struct nand_op_parser_ctx ctx = { .subop.cs = op->cs, .subop.instrs = op->instrs, .instrs = op->instrs, .ninstrs = op->ninstrs, }; unsigned int i; while (ctx.subop.instrs < op->instrs + op->ninstrs) { const struct nand_op_parser_pattern *pattern; struct nand_op_parser_ctx best_ctx; int ret, best_pattern = -1; for (i = 0; i < parser->npatterns; i++) { struct nand_op_parser_ctx test_ctx = ctx; pattern = &parser->patterns[i]; if (!nand_op_parser_match_pat(pattern, &test_ctx)) continue; if (best_pattern >= 0 && nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) continue; best_pattern = i; best_ctx = test_ctx; } if (best_pattern < 0) { pr_debug("->exec_op() parser: pattern not found!\n"); return -ENOTSUPP; } ctx = best_ctx; nand_op_parser_trace(&ctx); if (!check_only) { pattern = &parser->patterns[best_pattern]; ret = pattern->exec(chip, &ctx.subop); if (ret) return ret; } /* * Update the context structure by pointing to the start of the * next subop. */ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; if (ctx.subop.last_instr_end_off) ctx.subop.instrs -= 1; ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; } return 0; } EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); static bool nand_instr_is_data(const struct nand_op_instr *instr) { return instr && (instr->type == NAND_OP_DATA_IN_INSTR || instr->type == NAND_OP_DATA_OUT_INSTR); } static bool nand_subop_instr_is_valid(const struct nand_subop *subop, unsigned int instr_idx) { return subop && instr_idx < subop->ninstrs; } static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, unsigned int instr_idx) { if (instr_idx) return 0; return subop->first_instr_start_off; } /** * nand_subop_get_addr_start_off - Get the start offset in an address array * @subop: The entire sub-operation * @instr_idx: Index of the instruction inside the sub-operation * * During driver development, one could be tempted to directly use the * ->addr.addrs field of address instructions. This is wrong as address * instructions might be split. * * Given an address instruction, returns the offset of the first cycle to issue. */ unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, unsigned int instr_idx) { if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) return 0; return nand_subop_get_start_off(subop, instr_idx); } EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); /** * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert * @subop: The entire sub-operation * @instr_idx: Index of the instruction inside the sub-operation * * During driver development, one could be tempted to directly use the * ->addr->naddrs field of a data instruction. This is wrong as instructions * might be split. * * Given an address instruction, returns the number of address cycle to issue. */ unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, unsigned int instr_idx) { int start_off, end_off; if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) return 0; start_off = nand_subop_get_addr_start_off(subop, instr_idx); if (instr_idx == subop->ninstrs - 1 && subop->last_instr_end_off) end_off = subop->last_instr_end_off; else end_off = subop->instrs[instr_idx].ctx.addr.naddrs; return end_off - start_off; } EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); /** * nand_subop_get_data_start_off - Get the start offset in a data array * @subop: The entire sub-operation * @instr_idx: Index of the instruction inside the sub-operation * * During driver development, one could be tempted to directly use the * ->data->buf.{in,out} field of data instructions. This is wrong as data * instructions might be split. * * Given a data instruction, returns the offset to start from. */ unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, unsigned int instr_idx) { if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || !nand_instr_is_data(&subop->instrs[instr_idx]))) return 0; return nand_subop_get_start_off(subop, instr_idx); } EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); /** * nand_subop_get_data_len - Get the number of bytes to retrieve * @subop: The entire sub-operation * @instr_idx: Index of the instruction inside the sub-operation * * During driver development, one could be tempted to directly use the * ->data->len field of a data instruction. This is wrong as data instructions * might be split. * * Returns the length of the chunk of data to send/receive. */ unsigned int nand_subop_get_data_len(const struct nand_subop *subop, unsigned int instr_idx) { int start_off = 0, end_off; if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || !nand_instr_is_data(&subop->instrs[instr_idx]))) return 0; start_off = nand_subop_get_data_start_off(subop, instr_idx); if (instr_idx == subop->ninstrs - 1 && subop->last_instr_end_off) end_off = subop->last_instr_end_off; else end_off = subop->instrs[instr_idx].ctx.data.len; return end_off - start_off; } EXPORT_SYMBOL_GPL(nand_subop_get_data_len); /** * nand_reset - Reset and initialize a NAND device * @chip: The NAND chip * @chipnr: Internal die id * * Save the timings data structure, then apply SDR timings mode 0 (see * nand_reset_interface for details), do the reset operation, and apply * back the previous timings. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset(struct nand_chip *chip, int chipnr) { int ret; ret = nand_reset_interface(chip, chipnr); if (ret) return ret; /* * The CS line has to be released before we can apply the new NAND * interface settings, hence this weird nand_select_target() * nand_deselect_target() dance. */ nand_select_target(chip, chipnr); ret = nand_reset_op(chip); nand_deselect_target(chip); if (ret) return ret; ret = nand_setup_interface(chip, chipnr); if (ret) return ret; return 0; } EXPORT_SYMBOL_GPL(nand_reset); /** * nand_get_features - wrapper to perform a GET_FEATURE * @chip: NAND chip info structure * @addr: feature address * @subfeature_param: the subfeature parameters, a four bytes array * * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the * operation cannot be handled. */ int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param) { if (!nand_supports_get_features(chip, addr)) return -ENOTSUPP; if (chip->legacy.get_features) return chip->legacy.get_features(chip, addr, subfeature_param); return nand_get_features_op(chip, addr, subfeature_param); } /** * nand_set_features - wrapper to perform a SET_FEATURE * @chip: NAND chip info structure * @addr: feature address * @subfeature_param: the subfeature parameters, a four bytes array * * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the * operation cannot be handled. */ int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param) { if (!nand_supports_set_features(chip, addr)) return -ENOTSUPP; if (chip->legacy.set_features) return chip->legacy.set_features(chip, addr, subfeature_param); return nand_set_features_op(chip, addr, subfeature_param); } /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data * @buf: buffer to test * @len: buffer length * @bitflips_threshold: maximum number of bitflips * * Check if a buffer contains only 0xff, which means the underlying region * has been erased and is ready to be programmed. * The bitflips_threshold specify the maximum number of bitflips before * considering the region is not erased. * Note: The logic of this function has been extracted from the memweight * implementation, except that nand_check_erased_buf function exit before * testing the whole buffer if the number of bitflips exceed the * bitflips_threshold value. * * Returns a positive number of bitflips less than or equal to * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the * threshold. */ static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) { const unsigned char *bitmap = buf; int bitflips = 0; int weight; for (; len && ((uintptr_t)bitmap) % sizeof(long); len--, bitmap++) { weight = hweight8(*bitmap); bitflips += BITS_PER_BYTE - weight; if (unlikely(bitflips > bitflips_threshold)) return -EBADMSG; } for (; len >= sizeof(long); len -= sizeof(long), bitmap += sizeof(long)) { unsigned long d = *((unsigned long *)bitmap); if (d == ~0UL) continue; weight = hweight_long(d); bitflips += BITS_PER_LONG - weight; if (unlikely(bitflips > bitflips_threshold)) return -EBADMSG; } for (; len > 0; len--, bitmap++) { weight = hweight8(*bitmap); bitflips += BITS_PER_BYTE - weight; if (unlikely(bitflips > bitflips_threshold)) return -EBADMSG; } return bitflips; } /** * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only * 0xff data * @data: data buffer to test * @datalen: data length * @ecc: ECC buffer * @ecclen: ECC length * @extraoob: extra OOB buffer * @extraooblen: extra OOB length * @bitflips_threshold: maximum number of bitflips * * Check if a data buffer and its associated ECC and OOB data contains only * 0xff pattern, which means the underlying region has been erased and is * ready to be programmed. * The bitflips_threshold specify the maximum number of bitflips before * considering the region as not erased. * * Note: * 1/ ECC algorithms are working on pre-defined block sizes which are usually * different from the NAND page size. When fixing bitflips, ECC engines will * report the number of errors per chunk, and the NAND core infrastructure * expect you to return the maximum number of bitflips for the whole page. * This is why you should always use this function on a single chunk and * not on the whole page. After checking each chunk you should update your * max_bitflips value accordingly. * 2/ When checking for bitflips in erased pages you should not only check * the payload data but also their associated ECC data, because a user might * have programmed almost all bits to 1 but a few. In this case, we * shouldn't consider the chunk as erased, and checking ECC bytes prevent * this case. * 3/ The extraoob argument is optional, and should be used if some of your OOB * data are protected by the ECC engine. * It could also be used if you support subpages and want to attach some * extra OOB data to an ECC chunk. * * Returns a positive number of bitflips less than or equal to * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the * threshold. In case of success, the passed buffers are filled with 0xff. */ int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, int bitflips_threshold) { int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; data_bitflips = nand_check_erased_buf(data, datalen, bitflips_threshold); if (data_bitflips < 0) return data_bitflips; bitflips_threshold -= data_bitflips; ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); if (ecc_bitflips < 0) return ecc_bitflips; bitflips_threshold -= ecc_bitflips; extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, bitflips_threshold); if (extraoob_bitflips < 0) return extraoob_bitflips; if (data_bitflips) memset(data, 0xff, datalen); if (ecc_bitflips) memset(ecc, 0xff, ecclen); if (extraoob_bitflips) memset(extraoob, 0xff, extraooblen); return data_bitflips + ecc_bitflips + extraoob_bitflips; } EXPORT_SYMBOL(nand_check_erased_ecc_chunk); /** * nand_read_page_raw_notsupp - dummy read raw page function * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Returns -ENOTSUPP unconditionally. */ int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, int oob_required, int page) { return -ENOTSUPP; } /** * nand_read_page_raw - [INTERN] read raw page data without ecc * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); if (ret) return ret; if (oob_required) { ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false); if (ret) return ret; } return 0; } EXPORT_SYMBOL(nand_read_page_raw); /** * nand_monolithic_read_page_raw - Monolithic page read in raw mode * @chip: NAND chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * This is a raw page read, ie. without any error detection/correction. * Monolithic means we are requesting all the relevant data (main plus * eventually OOB) to be loaded in the NAND cache and sent over the * bus (from the NAND chip to the NAND controller) in a single * operation. This is an alternative to nand_read_page_raw(), which * first reads the main data, and if the OOB data is requested too, * then reads more data on the bus. */ int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); unsigned int size = mtd->writesize; u8 *read_buf = buf; int ret; if (oob_required) { size += mtd->oobsize; if (buf != chip->data_buf) read_buf = nand_get_data_buf(chip); } ret = nand_read_page_op(chip, page, 0, read_buf, size); if (ret) return ret; if (buf != chip->data_buf) memcpy(buf, read_buf, mtd->writesize); return 0; } EXPORT_SYMBOL(nand_monolithic_read_page_raw); /** * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * We need a special oob layout and handling even when OOB isn't used. */ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; uint8_t *oob = chip->oob_poi; int steps, size, ret; ret = nand_read_page_op(chip, page, 0, NULL, 0); if (ret) return ret; for (steps = chip->ecc.steps; steps > 0; steps--) { ret = nand_read_data_op(chip, buf, eccsize, false, false); if (ret) return ret; buf += eccsize; if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, false, false); if (ret) return ret; oob += chip->ecc.prepad; } ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; oob += eccbytes; if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, false, false); if (ret) return ret; oob += chip->ecc.postpad; } } size = mtd->oobsize - (oob - chip->oob_poi); if (size) { ret = nand_read_data_op(chip, oob, size, false, false); if (ret) return ret; } return 0; } /** * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read */ static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->ecc.calc_buf; uint8_t *ecc_code = chip->ecc.code_buf; unsigned int max_bitflips = 0; chip->ecc.read_page_raw(chip, buf, 1, page); for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(chip, p, &ecc_calc[i]); ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; eccsteps = chip->ecc.steps; p = buf; for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } return max_bitflips; } /** * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function * @chip: nand chip info structure * @data_offs: offset of requested data within the page * @readlen: data length * @bufpoi: buffer to store read data * @page: page number to read */ static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int start_step, end_step, num_steps, ret; uint8_t *p; int data_col_addr, i, gaps = 0; int datafrag_len, eccfrag_len, aligned_len, aligned_pos; int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; int index, section = 0; unsigned int max_bitflips = 0; struct mtd_oob_region oobregion = { }; /* Column address within the page aligned to ECC size (256bytes) */ start_step = data_offs / chip->ecc.size; end_step = (data_offs + readlen - 1) / chip->ecc.size; num_steps = end_step - start_step + 1; index = start_step * chip->ecc.bytes; /* Data size aligned to ECC ecc.size */ datafrag_len = num_steps * chip->ecc.size; eccfrag_len = num_steps * chip->ecc.bytes; data_col_addr = start_step * chip->ecc.size; /* If we read not a page aligned data */ p = bufpoi + data_col_addr; ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); if (ret) return ret; /* Calculate ECC */ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); /* * The performance is faster if we position offsets according to * ecc.pos. Let's make sure that there are no gaps in ECC positions. */ ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion); if (ret) return ret; if (oobregion.length < eccfrag_len) gaps = 1; if (gaps) { ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi, mtd->oobsize, false); if (ret) return ret; } else { /* * Send the command to read the particular ECC bytes take care * about buswidth alignment in read_buf. */ aligned_pos = oobregion.offset & ~(busw - 1); aligned_len = eccfrag_len; if (oobregion.offset & (busw - 1)) aligned_len++; if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & (busw - 1)) aligned_len++; ret = nand_change_read_column_op(chip, mtd->writesize + aligned_pos, &chip->oob_poi[aligned_pos], aligned_len, false); if (ret) return ret; } ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, chip->oob_poi, index, eccfrag_len); if (ret) return ret; p = bufpoi + data_col_addr; for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { int stat; stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], &chip->ecc.calc_buf[i]); if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { /* check for empty pages with bitflips */ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, &chip->ecc.code_buf[i], chip->ecc.bytes, NULL, 0, chip->ecc.strength); } if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } return max_bitflips; } /** * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Not for syndrome calculating ECC controllers which need a special oob layout. */ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->ecc.calc_buf; uint8_t *ecc_code = chip->ecc.code_buf; unsigned int max_bitflips = 0; ret = nand_read_page_op(chip, page, 0, NULL, 0); if (ret) return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(chip, NAND_ECC_READ); ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; chip->ecc.calculate(chip, p, &ecc_calc[i]); } ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false); if (ret) return ret; ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; eccsteps = chip->ecc.steps; p = buf; for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { /* check for empty pages with bitflips */ stat = nand_check_erased_ecc_chunk(p, eccsize, &ecc_code[i], eccbytes, NULL, 0, chip->ecc.strength); } if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } return max_bitflips; } /** * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC * data read from OOB area * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Hardware ECC for large page chips, which requires the ECC data to be * extracted from the OOB before the actual data is read. */ int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_code = chip->ecc.code_buf; unsigned int max_bitflips = 0; /* Read the OOB area first */ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); if (ret) return ret; /* Move read cursor to start of page */ ret = nand_change_read_column_op(chip, 0, NULL, 0, false); if (ret) return ret; ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; chip->ecc.hwctl(chip, NAND_ECC_READ); ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { /* check for empty pages with bitflips */ stat = nand_check_erased_ecc_chunk(p, eccsize, &ecc_code[i], eccbytes, NULL, 0, chip->ecc.strength); } if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } return max_bitflips; } EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first); /** * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * The hw generator calculates the error syndrome automatically. Therefore we * need a special oob layout and handling. */ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret, i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; uint8_t *p = buf; uint8_t *oob = chip->oob_poi; unsigned int max_bitflips = 0; ret = nand_read_page_op(chip, page, 0, NULL, 0); if (ret) return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; chip->ecc.hwctl(chip, NAND_ECC_READ); ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, false, false); if (ret) return ret; oob += chip->ecc.prepad; } chip->ecc.hwctl(chip, NAND_ECC_READSYN); ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; stat = chip->ecc.correct(chip, p, oob, NULL); oob += eccbytes; if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, false, false); if (ret) return ret; oob += chip->ecc.postpad; } if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { /* check for empty pages with bitflips */ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, oob - eccpadbytes, eccpadbytes, NULL, 0, chip->ecc.strength); } if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } /* Calculate remaining oob bytes */ i = mtd->oobsize - (oob - chip->oob_poi); if (i) { ret = nand_read_data_op(chip, oob, i, false, false); if (ret) return ret; } return max_bitflips; } /** * nand_transfer_oob - [INTERN] Transfer oob to client buffer * @chip: NAND chip object * @oob: oob destination address * @ops: oob ops structure * @len: size of oob to transfer */ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, struct mtd_oob_ops *ops, size_t len) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; switch (ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_RAW: memcpy(oob, chip->oob_poi + ops->ooboffs, len); return oob + len; case MTD_OPS_AUTO_OOB: ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, ops->ooboffs, len); BUG_ON(ret); return oob + len; default: BUG(); } return NULL; } static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, u32 readlen, int col) { struct mtd_info *mtd = nand_to_mtd(chip); unsigned int first_page, last_page; chip->cont_read.ongoing = false; if (!chip->controller->supported_op.cont_read) return; /* * Don't bother making any calculations if the length is too small. * Side effect: avoids possible integer underflows below. */ if (readlen < (2 * mtd->writesize)) return; /* Derive the page where continuous read should start (the first full page read) */ first_page = page; if (col) first_page++; /* Derive the page where continuous read should stop (the last full page read) */ last_page = page + ((col + readlen) / mtd->writesize) - 1; /* Configure and enable continuous read when suitable */ if (first_page < last_page) { chip->cont_read.first_page = first_page; chip->cont_read.last_page = last_page; chip->cont_read.ongoing = true; /* May reset the ongoing flag */ rawnand_cap_cont_reads(chip); } } static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page) { if (!chip->cont_read.ongoing || page != chip->cont_read.first_page) return; chip->cont_read.first_page++; rawnand_cap_cont_reads(chip); } /** * nand_setup_read_retry - [INTERN] Set the READ RETRY mode * @chip: NAND chip object * @retry_mode: the retry mode to use * * Some vendors supply a special command to shift the Vt threshold, to be used * when there are too many bitflips in a page (i.e., ECC error). After setting * a new threshold, the host should retry reading the page. */ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) { pr_debug("setting READ RETRY mode %d\n", retry_mode); if (retry_mode >= chip->read_retries) return -EINVAL; if (!chip->ops.setup_read_retry) return -EOPNOTSUPP; return chip->ops.setup_read_retry(chip, retry_mode); } static void nand_wait_readrdy(struct nand_chip *chip) { const struct nand_interface_config *conf; if (!(chip->options & NAND_NEED_READRDY)) return; conf = nand_get_interface_config(chip); WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); } /** * nand_do_read_ops - [INTERN] Read data with ECC * @chip: NAND chip object * @from: offset to read from * @ops: oob ops structure * * Internal function. Called with chip held. */ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, struct mtd_oob_ops *ops) { int chipnr, page, realpage, col, bytes, aligned, oob_required; struct mtd_info *mtd = nand_to_mtd(chip); int ret = 0; uint32_t readlen = ops->len; uint32_t oobreadlen = ops->ooblen; uint32_t max_oobsize = mtd_oobavail(mtd, ops); uint8_t *bufpoi, *oob, *buf; int use_bounce_buf; unsigned int max_bitflips = 0; int retry_mode = 0; bool ecc_fail = false; /* Check if the region is secured */ if (nand_region_is_secured(chip, from, readlen)) return -EIO; chipnr = (int)(from >> chip->chip_shift); nand_select_target(chip, chipnr); realpage = (int)(from >> chip->page_shift); page = realpage & chip->pagemask; col = (int)(from & (mtd->writesize - 1)); buf = ops->datbuf; oob = ops->oobbuf; oob_required = oob ? 1 : 0; if (likely(ops->mode != MTD_OPS_RAW)) rawnand_enable_cont_reads(chip, page, readlen, col); while (1) { struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; bytes = min(mtd->writesize - col, readlen); aligned = (bytes == mtd->writesize); if (!aligned) use_bounce_buf = 1; else if (chip->options & NAND_USES_DMA) use_bounce_buf = !virt_addr_valid(buf) || !IS_ALIGNED((unsigned long)buf, chip->buf_align); else use_bounce_buf = 0; /* Is the current page in the buffer? */ if (realpage != chip->pagecache.page || oob) { bufpoi = use_bounce_buf ? chip->data_buf : buf; if (use_bounce_buf && aligned) pr_debug("%s: using read bounce buffer for buf@%p\n", __func__, buf); read_retry: /* * Now read the page into the buffer. Absent an error, * the read methods return max bitflips per ecc step. */ if (unlikely(ops->mode == MTD_OPS_RAW)) ret = chip->ecc.read_page_raw(chip, bufpoi, oob_required, page); else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && !oob) ret = chip->ecc.read_subpage(chip, col, bytes, bufpoi, page); else ret = chip->ecc.read_page(chip, bufpoi, oob_required, page); if (ret < 0) { if (use_bounce_buf) /* Invalidate page cache */ chip->pagecache.page = -1; break; } /* * Copy back the data in the initial buffer when reading * partial pages or when a bounce buffer is required. */ if (use_bounce_buf) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && !(mtd->ecc_stats.failed - ecc_stats.failed) && (ops->mode != MTD_OPS_RAW)) { chip->pagecache.page = realpage; chip->pagecache.bitflips = ret; } else { /* Invalidate page cache */ chip->pagecache.page = -1; } memcpy(buf, bufpoi + col, bytes); } if (unlikely(oob)) { int toread = min(oobreadlen, max_oobsize); if (toread) { oob = nand_transfer_oob(chip, oob, ops, toread); oobreadlen -= toread; } } nand_wait_readrdy(chip); if (mtd->ecc_stats.failed - ecc_stats.failed) { if (retry_mode + 1 < chip->read_retries) { retry_mode++; ret = nand_setup_read_retry(chip, retry_mode); if (ret < 0) break; /* Reset ecc_stats; retry */ mtd->ecc_stats = ecc_stats; goto read_retry; } else { /* No more retry modes; real failure */ ecc_fail = true; } } buf += bytes; max_bitflips = max_t(unsigned int, max_bitflips, ret); } else { memcpy(buf, chip->data_buf + col, bytes); buf += bytes; max_bitflips = max_t(unsigned int, max_bitflips, chip->pagecache.bitflips); rawnand_cont_read_skip_first_page(chip, page); } readlen -= bytes; /* Reset to retry mode 0 */ if (retry_mode) { ret = nand_setup_read_retry(chip, 0); if (ret < 0) break; retry_mode = 0; } if (!readlen) break; /* For subsequent reads align to page boundary */ col = 0; /* Increment page address */ realpage++; page = realpage & chip->pagemask; /* Check, if we cross a chip boundary */ if (!page) { chipnr++; nand_deselect_target(chip); nand_select_target(chip, chipnr); } } nand_deselect_target(chip); if (WARN_ON_ONCE(chip->cont_read.ongoing)) chip->cont_read.ongoing = false; ops->retlen = ops->len - (size_t) readlen; if (oob) ops->oobretlen = ops->ooblen - oobreadlen; if (ret < 0) return ret; if (ecc_fail) return -EBADMSG; return max_bitflips; } /** * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function * @chip: nand chip info structure * @page: page number to read */ int nand_read_oob_std(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); } EXPORT_SYMBOL(nand_read_oob_std); /** * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC * with syndromes * @chip: nand chip info structure * @page: page number to read */ static int nand_read_oob_syndrome(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int length = mtd->oobsize; int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; int eccsize = chip->ecc.size; uint8_t *bufpoi = chip->oob_poi; int i, toread, sndrnd = 0, pos, ret; ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); if (ret) return ret; for (i = 0; i < chip->ecc.steps; i++) { if (sndrnd) { int ret; pos = eccsize + i * (eccsize + chunk); if (mtd->writesize > 512) ret = nand_change_read_column_op(chip, pos, NULL, 0, false); else ret = nand_read_page_op(chip, page, pos, NULL, 0); if (ret) return ret; } else sndrnd = 1; toread = min_t(int, length, chunk); ret = nand_read_data_op(chip, bufpoi, toread, false, false); if (ret) return ret; bufpoi += toread; length -= toread; } if (length > 0) { ret = nand_read_data_op(chip, bufpoi, length, false, false); if (ret) return ret; } return 0; } /** * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function * @chip: nand chip info structure * @page: page number to write */ int nand_write_oob_std(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, mtd->oobsize); } EXPORT_SYMBOL(nand_write_oob_std); /** * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC * with syndrome - only for large page flash * @chip: nand chip info structure * @page: page number to write */ static int nand_write_oob_syndrome(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; int eccsize = chip->ecc.size, length = mtd->oobsize; int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; const uint8_t *bufpoi = chip->oob_poi; /* * data-ecc-data-ecc ... ecc-oob * or * data-pad-ecc-pad-data-pad .... ecc-pad-oob */ if (!chip->ecc.prepad && !chip->ecc.postpad) { pos = steps * (eccsize + chunk); steps = 0; } else pos = eccsize; ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); if (ret) return ret; for (i = 0; i < steps; i++) { if (sndcmd) { if (mtd->writesize <= 512) { uint32_t fill = 0xFFFFFFFF; len = eccsize; while (len > 0) { int num = min_t(int, len, 4); ret = nand_write_data_op(chip, &fill, num, false); if (ret) return ret; len -= num; } } else { pos = eccsize + i * (eccsize + chunk); ret = nand_change_write_column_op(chip, pos, NULL, 0, false); if (ret) return ret; } } else sndcmd = 1; len = min_t(int, length, chunk); ret = nand_write_data_op(chip, bufpoi, len, false); if (ret) return ret; bufpoi += len; length -= len; } if (length > 0) { ret = nand_write_data_op(chip, bufpoi, length, false); if (ret) return ret; } return nand_prog_page_end_op(chip); } /** * nand_do_read_oob - [INTERN] NAND read out-of-band * @chip: NAND chip object * @from: offset to read from * @ops: oob operations description structure * * NAND read out-of-band data from the spare area. */ static int nand_do_read_oob(struct nand_chip *chip, loff_t from, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); unsigned int max_bitflips = 0; int page, realpage, chipnr; struct mtd_ecc_stats stats; int readlen = ops->ooblen; int len; uint8_t *buf = ops->oobbuf; int ret = 0; pr_debug("%s: from = 0x%08Lx, len = %i\n", __func__, (unsigned long long)from, readlen); /* Check if the region is secured */ if (nand_region_is_secured(chip, from, readlen)) return -EIO; stats = mtd->ecc_stats; len = mtd_oobavail(mtd, ops); chipnr = (int)(from >> chip->chip_shift); nand_select_target(chip, chipnr); /* Shift to get page */ realpage = (int)(from >> chip->page_shift); page = realpage & chip->pagemask; while (1) { if (ops->mode == MTD_OPS_RAW) ret = chip->ecc.read_oob_raw(chip, page); else ret = chip->ecc.read_oob(chip, page); if (ret < 0) break; len = min(len, readlen); buf = nand_transfer_oob(chip, buf, ops, len); nand_wait_readrdy(chip); max_bitflips = max_t(unsigned int, max_bitflips, ret); readlen -= len; if (!readlen) break; /* Increment page address */ realpage++; page = realpage & chip->pagemask; /* Check, if we cross a chip boundary */ if (!page) { chipnr++; nand_deselect_target(chip); nand_select_target(chip, chipnr); } } nand_deselect_target(chip); ops->oobretlen = ops->ooblen - readlen; if (ret < 0) return ret; if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; return max_bitflips; } /** * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band * @mtd: MTD device structure * @from: offset to read from * @ops: oob operation description structure * * NAND read data and/or out-of-band data. */ static int nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); struct mtd_ecc_stats old_stats; int ret; ops->retlen = 0; if (ops->mode != MTD_OPS_PLACE_OOB && ops->mode != MTD_OPS_AUTO_OOB && ops->mode != MTD_OPS_RAW) return -ENOTSUPP; nand_get_device(chip); old_stats = mtd->ecc_stats; if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); else ret = nand_do_read_ops(chip, from, ops); if (ops->stats) { ops->stats->uncorrectable_errors += mtd->ecc_stats.failed - old_stats.failed; ops->stats->corrected_bitflips += mtd->ecc_stats.corrected - old_stats.corrected; } nand_release_device(chip); return ret; } /** * nand_write_page_raw_notsupp - dummy raw page write function * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * * Returns -ENOTSUPP unconditionally. */ int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { return -ENOTSUPP; } /** * nand_write_page_raw - [INTERN] raw page write function * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); if (ret) return ret; if (oob_required) { ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); if (ret) return ret; } return nand_prog_page_end_op(chip); } EXPORT_SYMBOL(nand_write_page_raw); /** * nand_monolithic_write_page_raw - Monolithic page write in raw mode * @chip: NAND chip info structure * @buf: data buffer to write * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * * This is a raw page write, ie. without any error detection/correction. * Monolithic means we are requesting all the relevant data (main plus * eventually OOB) to be sent over the bus and effectively programmed * into the NAND chip arrays in a single operation. This is an * alternative to nand_write_page_raw(), which first sends the main * data, then eventually send the OOB data by latching more data * cycles on the NAND bus, and finally sends the program command to * synchronyze the NAND chip cache. */ int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); unsigned int size = mtd->writesize; u8 *write_buf = (u8 *)buf; if (oob_required) { size += mtd->oobsize; if (buf != chip->data_buf) { write_buf = nand_get_data_buf(chip); memcpy(write_buf, buf, mtd->writesize); } } return nand_prog_page_op(chip, page, 0, write_buf, size); } EXPORT_SYMBOL(nand_monolithic_write_page_raw); /** * nand_write_page_raw_syndrome - [INTERN] raw page write function * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * * We need a special oob layout and handling even when ECC isn't checked. */ static int nand_write_page_raw_syndrome(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; uint8_t *oob = chip->oob_poi; int steps, size, ret; ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); if (ret) return ret; for (steps = chip->ecc.steps; steps > 0; steps--) { ret = nand_write_data_op(chip, buf, eccsize, false); if (ret) return ret; buf += eccsize; if (chip->ecc.prepad) { ret = nand_write_data_op(chip, oob, chip->ecc.prepad, false); if (ret) return ret; oob += chip->ecc.prepad; } ret = nand_write_data_op(chip, oob, eccbytes, false); if (ret) return ret; oob += eccbytes; if (chip->ecc.postpad) { ret = nand_write_data_op(chip, oob, chip->ecc.postpad, false); if (ret) return ret; oob += chip->ecc.postpad; } } size = mtd->oobsize - (oob - chip->oob_poi); if (size) { ret = nand_write_data_op(chip, oob, size, false); if (ret) return ret; } return nand_prog_page_end_op(chip); } /** * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write */ static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *ecc_calc = chip->ecc.calc_buf; const uint8_t *p = buf; /* Software ECC calculation */ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(chip, p, &ecc_calc[i]); ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; return chip->ecc.write_page_raw(chip, buf, 1, page); } /** * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write */ static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size, ret; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *ecc_calc = chip->ecc.calc_buf; const uint8_t *p = buf; ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); if (ret) return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(chip, NAND_ECC_WRITE); ret = nand_write_data_op(chip, p, eccsize, false); if (ret) return ret; chip->ecc.calculate(chip, p, &ecc_calc[i]); } ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); if (ret) return ret; return nand_prog_page_end_op(chip); } /** * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write * @chip: nand chip info structure * @offset: column address of subpage within the page * @data_len: data length * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write */ static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, uint32_t data_len, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); uint8_t *oob_buf = chip->oob_poi; uint8_t *ecc_calc = chip->ecc.calc_buf; int ecc_size = chip->ecc.size; int ecc_bytes = chip->ecc.bytes; int ecc_steps = chip->ecc.steps; uint32_t start_step = offset / ecc_size; uint32_t end_step = (offset + data_len - 1) / ecc_size; int oob_bytes = mtd->oobsize / ecc_steps; int step, ret; ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); if (ret) return ret; for (step = 0; step < ecc_steps; step++) { /* configure controller for WRITE access */ chip->ecc.hwctl(chip, NAND_ECC_WRITE); /* write data (untouched subpages already masked by 0xFF) */ ret = nand_write_data_op(chip, buf, ecc_size, false); if (ret) return ret; /* mask ECC of un-touched subpages by padding 0xFF */ if ((step < start_step) || (step > end_step)) memset(ecc_calc, 0xff, ecc_bytes); else chip->ecc.calculate(chip, buf, ecc_calc); /* mask OOB of un-touched subpages by padding 0xFF */ /* if oob_required, preserve OOB metadata of written subpage */ if (!oob_required || (step < start_step) || (step > end_step)) memset(oob_buf, 0xff, oob_bytes); buf += ecc_size; ecc_calc += ecc_bytes; oob_buf += oob_bytes; } /* copy calculated ECC for whole page to chip->buffer->oob */ /* this include masked-value(0xFF) for unwritten subpages */ ecc_calc = chip->ecc.calc_buf; ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); if (ret) return ret; /* write OOB buffer to NAND device */ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); if (ret) return ret; return nand_prog_page_end_op(chip); } /** * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write * @chip: nand chip info structure * @buf: data buffer * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * * The hw generator calculates the error syndrome automatically. Therefore we * need a special oob layout and handling. */ static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; const uint8_t *p = buf; uint8_t *oob = chip->oob_poi; int ret; ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); if (ret) return ret; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(chip, NAND_ECC_WRITE); ret = nand_write_data_op(chip, p, eccsize, false); if (ret) return ret; if (chip->ecc.prepad) { ret = nand_write_data_op(chip, oob, chip->ecc.prepad, false); if (ret) return ret; oob += chip->ecc.prepad; } chip->ecc.calculate(chip, p, oob); ret = nand_write_data_op(chip, oob, eccbytes, false); if (ret) return ret; oob += eccbytes; if (chip->ecc.postpad) { ret = nand_write_data_op(chip, oob, chip->ecc.postpad, false); if (ret) return ret; oob += chip->ecc.postpad; } } /* Calculate remaining oob bytes */ i = mtd->oobsize - (oob - chip->oob_poi); if (i) { ret = nand_write_data_op(chip, oob, i, false); if (ret) return ret; } return nand_prog_page_end_op(chip); } /** * nand_write_page - write one page * @chip: NAND chip descriptor * @offset: address offset within the page * @data_len: length of actual data to be written * @buf: the data to write * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * @raw: use _raw version of write_page */ static int nand_write_page(struct nand_chip *chip, uint32_t offset, int data_len, const uint8_t *buf, int oob_required, int page, int raw) { struct mtd_info *mtd = nand_to_mtd(chip); int status, subpage; if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && chip->ecc.write_subpage) subpage = offset || (data_len < mtd->writesize); else subpage = 0; if (unlikely(raw)) status = chip->ecc.write_page_raw(chip, buf, oob_required, page); else if (subpage) status = chip->ecc.write_subpage(chip, offset, data_len, buf, oob_required, page); else status = chip->ecc.write_page(chip, buf, oob_required, page); if (status < 0) return status; return 0; } #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) /** * nand_do_write_ops - [INTERN] NAND write with ECC * @chip: NAND chip object * @to: offset to write to * @ops: oob operations description structure * * NAND write with ECC. */ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); int chipnr, realpage, page, column; uint32_t writelen = ops->len; uint32_t oobwritelen = ops->ooblen; uint32_t oobmaxlen = mtd_oobavail(mtd, ops); uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; int ret; int oob_required = oob ? 1 : 0; ops->retlen = 0; if (!writelen) return 0; /* Reject writes, which are not page aligned */ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { pr_notice("%s: attempt to write non page aligned data\n", __func__); return -EINVAL; } /* Check if the region is secured */ if (nand_region_is_secured(chip, to, writelen)) return -EIO; column = to & (mtd->writesize - 1); chipnr = (int)(to >> chip->chip_shift); nand_select_target(chip, chipnr); /* Check, if it is write protected */ if (nand_check_wp(chip)) { ret = -EIO; goto err_out; } realpage = (int)(to >> chip->page_shift); page = realpage & chip->pagemask; /* Invalidate the page cache, when we write to the cached page */ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) chip->pagecache.page = -1; /* Don't allow multipage oob writes with offset */ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { ret = -EINVAL; goto err_out; } while (1) { int bytes = mtd->writesize; uint8_t *wbuf = buf; int use_bounce_buf; int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) use_bounce_buf = 1; else if (chip->options & NAND_USES_DMA) use_bounce_buf = !virt_addr_valid(buf) || !IS_ALIGNED((unsigned long)buf, chip->buf_align); else use_bounce_buf = 0; /* * Copy the data from the initial buffer when doing partial page * writes or when a bounce buffer is required. */ if (use_bounce_buf) { pr_debug("%s: using write bounce buffer for buf@%p\n", __func__, buf); if (part_pagewr) bytes = min_t(int, bytes - column, writelen); wbuf = nand_get_data_buf(chip); memset(wbuf, 0xff, mtd->writesize); memcpy(&wbuf[column], buf, bytes); } if (unlikely(oob)) { size_t len = min(oobwritelen, oobmaxlen); oob = nand_fill_oob(chip, oob, len, ops); oobwritelen -= len; } else { /* We still need to erase leftover OOB data */ memset(chip->oob_poi, 0xff, mtd->oobsize); } ret = nand_write_page(chip, column, bytes, wbuf, oob_required, page, (ops->mode == MTD_OPS_RAW)); if (ret) break; writelen -= bytes; if (!writelen) break; column = 0; buf += bytes; realpage++; page = realpage & chip->pagemask; /* Check, if we cross a chip boundary */ if (!page) { chipnr++; nand_deselect_target(chip); nand_select_target(chip, chipnr); } } ops->retlen = ops->len - writelen; if (unlikely(oob)) ops->oobretlen = ops->ooblen; err_out: nand_deselect_target(chip); return ret; } /** * panic_nand_write - [MTD Interface] NAND write with ECC * @mtd: MTD device structure * @to: offset to write to * @len: number of bytes to write * @retlen: pointer to variable to store the number of written bytes * @buf: the data to write * * NAND write with ECC. Used when performing writes in interrupt context, this * may for example be called by mtdoops when writing an oops while in panic. */ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd_to_nand(mtd); int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; nand_select_target(chip, chipnr); /* Wait for the device to get ready */ panic_nand_wait(chip, 400); memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; ops.mode = MTD_OPS_PLACE_OOB; ret = nand_do_write_ops(chip, to, &ops); *retlen = ops.retlen; return ret; } /** * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band * @mtd: MTD device structure * @to: offset to write to * @ops: oob operation description structure */ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); int ret = 0; ops->retlen = 0; nand_get_device(chip); switch (ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_AUTO_OOB: case MTD_OPS_RAW: break; default: goto out; } if (!ops->datbuf) ret = nand_do_write_oob(chip, to, ops); else ret = nand_do_write_ops(chip, to, ops); out: nand_release_device(chip); return ret; } /** * nand_erase - [MTD Interface] erase block(s) * @mtd: MTD device structure * @instr: erase instruction * * Erase one ore more blocks. */ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) { return nand_erase_nand(mtd_to_nand(mtd), instr, 0); } /** * nand_erase_nand - [INTERN] erase block(s) * @chip: NAND chip object * @instr: erase instruction * @allowbbt: allow erasing the bbt area * * Erase one ore more blocks. */ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt) { int page, pages_per_block, ret, chipnr; loff_t len; pr_debug("%s: start = 0x%012llx, len = %llu\n", __func__, (unsigned long long)instr->addr, (unsigned long long)instr->len); if (check_offs_len(chip, instr->addr, instr->len)) return -EINVAL; /* Check if the region is secured */ if (nand_region_is_secured(chip, instr->addr, instr->len)) return -EIO; /* Grab the lock and see if the device is available */ nand_get_device(chip); /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); chipnr = (int)(instr->addr >> chip->chip_shift); /* Calculate pages in each block */ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); /* Select the NAND device */ nand_select_target(chip, chipnr); /* Check, if it is write protected */ if (nand_check_wp(chip)) { pr_debug("%s: device is write protected!\n", __func__); ret = -EIO; goto erase_exit; } /* Loop through the pages */ len = instr->len; while (len) { loff_t ofs = (loff_t)page << chip->page_shift; /* Check if we have a bad block, we do not erase bad blocks! */ if (nand_block_checkbad(chip, ((loff_t) page) << chip->page_shift, allowbbt)) { pr_warn("%s: attempt to erase a bad block at 0x%08llx\n", __func__, (unsigned long long)ofs); ret = -EIO; goto erase_exit; } /* * Invalidate the page cache, if we erase the block which * contains the current cached page. */ if (page <= chip->pagecache.page && chip->pagecache.page < (page + pages_per_block)) chip->pagecache.page = -1; ret = nand_erase_op(chip, (page & chip->pagemask) >> (chip->phys_erase_shift - chip->page_shift)); if (ret) { pr_debug("%s: failed erase, page 0x%08x\n", __func__, page); instr->fail_addr = ofs; goto erase_exit; } /* Increment page address and decrement length */ len -= (1ULL << chip->phys_erase_shift); page += pages_per_block; /* Check, if we cross a chip boundary */ if (len && !(page & chip->pagemask)) { chipnr++; nand_deselect_target(chip); nand_select_target(chip, chipnr); } } ret = 0; erase_exit: /* Deselect and wake up anyone waiting on the device */ nand_deselect_target(chip); nand_release_device(chip); /* Return more or less happy */ return ret; } /** * nand_sync - [MTD Interface] sync * @mtd: MTD device structure * * Sync is actually a wait for chip ready function. */ static void nand_sync(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ nand_get_device(chip); /* Release it and go back */ nand_release_device(chip); } /** * nand_block_isbad - [MTD Interface] Check if block at offset is bad * @mtd: MTD device structure * @offs: offset relative to mtd start */ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) { struct nand_chip *chip = mtd_to_nand(mtd); int chipnr = (int)(offs >> chip->chip_shift); int ret; /* Select the NAND device */ nand_get_device(chip); nand_select_target(chip, chipnr); ret = nand_block_checkbad(chip, offs, 0); nand_deselect_target(chip); nand_release_device(chip); return ret; } /** * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad * @mtd: MTD device structure * @ofs: offset relative to mtd start */ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) { int ret; ret = nand_block_isbad(mtd, ofs); if (ret) { /* If it was bad already, return success and do nothing */ if (ret > 0) return 0; return ret; } return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); } /** * nand_suspend - [MTD Interface] Suspend the NAND flash * @mtd: MTD device structure * * Returns 0 for success or negative error code otherwise. */ static int nand_suspend(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); int ret = 0; mutex_lock(&chip->lock); if (chip->ops.suspend) ret = chip->ops.suspend(chip); if (!ret) chip->suspended = 1; mutex_unlock(&chip->lock); return ret; } /** * nand_resume - [MTD Interface] Resume the NAND flash * @mtd: MTD device structure */ static void nand_resume(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); mutex_lock(&chip->lock); if (chip->suspended) { if (chip->ops.resume) chip->ops.resume(chip); chip->suspended = 0; } else { pr_err("%s called for a chip which is not in suspended state\n", __func__); } mutex_unlock(&chip->lock); wake_up_all(&chip->resume_wq); } /** * nand_shutdown - [MTD Interface] Finish the current NAND operation and * prevent further operations * @mtd: MTD device structure */ static void nand_shutdown(struct mtd_info *mtd) { nand_suspend(mtd); } /** * nand_lock - [MTD Interface] Lock the NAND flash * @mtd: MTD device structure * @ofs: offset byte address * @len: number of bytes to lock (must be a multiple of block/page size) */ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); if (!chip->ops.lock_area) return -ENOTSUPP; return chip->ops.lock_area(chip, ofs, len); } /** * nand_unlock - [MTD Interface] Unlock the NAND flash * @mtd: MTD device structure * @ofs: offset byte address * @len: number of bytes to unlock (must be a multiple of block/page size) */ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); if (!chip->ops.unlock_area) return -ENOTSUPP; return chip->ops.unlock_area(chip, ofs, len); } /* Set default functions */ static void nand_set_defaults(struct nand_chip *chip) { /* If no controller is provided, use the dummy, legacy one. */ if (!chip->controller) { chip->controller = &chip->legacy.dummy_controller; nand_controller_init(chip->controller); } nand_legacy_set_defaults(chip); if (!chip->buf_align) chip->buf_align = 1; } /* Sanitize ONFI strings so we can safely print them */ void sanitize_string(uint8_t *s, size_t len) { ssize_t i; /* Null terminate */ s[len - 1] = 0; /* Remove non printable chars */ for (i = 0; i < len - 1; i++) { if (s[i] < ' ' || s[i] > 127) s[i] = '?'; } /* Remove trailing spaces */ strim(s); } /* * nand_id_has_period - Check if an ID string has a given wraparound period * @id_data: the ID string * @arrlen: the length of the @id_data array * @period: the period of repitition * * Check if an ID string is repeated within a given sequence of bytes at * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a * period of 3). This is a helper function for nand_id_len(). Returns non-zero * if the repetition has a period of @period; otherwise, returns zero. */ static int nand_id_has_period(u8 *id_data, int arrlen, int period) { int i, j; for (i = 0; i < period; i++) for (j = i + period; j < arrlen; j += period) if (id_data[i] != id_data[j]) return 0; return 1; } /* * nand_id_len - Get the length of an ID string returned by CMD_READID * @id_data: the ID string * @arrlen: the length of the @id_data array * Returns the length of the ID string, according to known wraparound/trailing * zero patterns. If no pattern exists, returns the length of the array. */ static int nand_id_len(u8 *id_data, int arrlen) { int last_nonzero, period; /* Find last non-zero byte */ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) if (id_data[last_nonzero]) break; /* All zeros */ if (last_nonzero < 0) return 0; /* Calculate wraparound period */ for (period = 1; period < arrlen; period++) if (nand_id_has_period(id_data, arrlen, period)) break; /* There's a repeated pattern */ if (period < arrlen) return period; /* There are trailing zeros */ if (last_nonzero < arrlen - 1) return last_nonzero + 1; /* No pattern detected */ return arrlen; } /* Extract the bits of per cell from the 3rd byte of the extended ID */ static int nand_get_bits_per_cell(u8 cellinfo) { int bits; bits = cellinfo & NAND_CI_CELLTYPE_MSK; bits >>= NAND_CI_CELLTYPE_SHIFT; return bits + 1; } /* * Many new NAND share similar device ID codes, which represent the size of the * chip. The rest of the parameters must be decoded according to generic or * manufacturer-specific "extended ID" decoding patterns. */ void nand_decode_ext_id(struct nand_chip *chip) { struct nand_memory_organization *memorg; struct mtd_info *mtd = nand_to_mtd(chip); int extid; u8 *id_data = chip->id.data; memorg = nanddev_get_memorg(&chip->base); /* The 3rd id byte holds MLC / multichip data */ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); /* The 4th id byte is the important one */ extid = id_data[3]; /* Calc pagesize */ memorg->pagesize = 1024 << (extid & 0x03); mtd->writesize = memorg->pagesize; extid >>= 2; /* Calc oobsize */ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); mtd->oobsize = memorg->oobsize; extid >>= 2; /* Calc blocksize. Blocksize is multiples of 64KiB */ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / memorg->pagesize; mtd->erasesize = (64 * 1024) << (extid & 0x03); extid >>= 2; /* Get buswidth information */ if (extid & 0x1) chip->options |= NAND_BUSWIDTH_16; } EXPORT_SYMBOL_GPL(nand_decode_ext_id); /* * Old devices have chip data hardcoded in the device ID table. nand_decode_id * decodes a matching ID table entry and assigns the MTD size parameters for * the chip. */ static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; memorg = nanddev_get_memorg(&chip->base); memorg->pages_per_eraseblock = type->erasesize / type->pagesize; mtd->erasesize = type->erasesize; memorg->pagesize = type->pagesize; mtd->writesize = memorg->pagesize; memorg->oobsize = memorg->pagesize / 32; mtd->oobsize = memorg->oobsize; /* All legacy ID NAND are small-page, SLC */ memorg->bits_per_cell = 1; } /* * Set the bad block marker/indicator (BBM/BBI) patterns according to some * heuristic patterns using various detected parameters (e.g., manufacturer, * page size, cell-type information). */ static void nand_decode_bbm_options(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); /* Set the bad block position */ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) chip->badblockpos = NAND_BBM_POS_LARGE; else chip->badblockpos = NAND_BBM_POS_SMALL; } static inline bool is_full_id_nand(struct nand_flash_dev *type) { return type->id_len; } static bool find_full_id_nand(struct nand_chip *chip, struct nand_flash_dev *type) { struct nand_device *base = &chip->base; struct nand_ecc_props requirements; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; u8 *id_data = chip->id.data; memorg = nanddev_get_memorg(&chip->base); if (!strncmp(type->id, id_data, type->id_len)) { memorg->pagesize = type->pagesize; mtd->writesize = memorg->pagesize; memorg->pages_per_eraseblock = type->erasesize / type->pagesize; mtd->erasesize = type->erasesize; memorg->oobsize = type->oobsize; mtd->oobsize = memorg->oobsize; memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); memorg->eraseblocks_per_lun = DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, memorg->pagesize * memorg->pages_per_eraseblock); chip->options |= type->options; requirements.strength = NAND_ECC_STRENGTH(type); requirements.step_size = NAND_ECC_STEP(type); nanddev_set_ecc_requirements(base, &requirements); chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) return false; return true; } return false; } /* * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC * compliant and does not have a full-id or legacy-id entry in the nand_ids * table. */ static void nand_manufacturer_detect(struct nand_chip *chip) { /* * Try manufacturer detection if available and use * nand_decode_ext_id() otherwise. */ if (chip->manufacturer.desc && chip->manufacturer.desc->ops && chip->manufacturer.desc->ops->detect) { struct nand_memory_organization *memorg; memorg = nanddev_get_memorg(&chip->base); /* The 3rd id byte holds MLC / multichip data */ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); chip->manufacturer.desc->ops->detect(chip); } else { nand_decode_ext_id(chip); } } /* * Manufacturer initialization. This function is called for all NANDs including * ONFI and JEDEC compliant ones. * Manufacturer drivers should put all their specific initialization code in * their ->init() hook. */ static int nand_manufacturer_init(struct nand_chip *chip) { if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || !chip->manufacturer.desc->ops->init) return 0; return chip->manufacturer.desc->ops->init(chip); } /* * Manufacturer cleanup. This function is called for all NANDs including * ONFI and JEDEC compliant ones. * Manufacturer drivers should put all their specific cleanup code in their * ->cleanup() hook. */ static void nand_manufacturer_cleanup(struct nand_chip *chip) { /* Release manufacturer private data */ if (chip->manufacturer.desc && chip->manufacturer.desc->ops && chip->manufacturer.desc->ops->cleanup) chip->manufacturer.desc->ops->cleanup(chip); } static const char * nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) { return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } static void rawnand_check_data_only_read_support(struct nand_chip *chip) { /* Use an arbitrary size for the check */ if (!nand_read_data_op(chip, NULL, SZ_512, true, true)) chip->controller->supported_op.data_only_read = 1; } static void rawnand_early_check_supported_ops(struct nand_chip *chip) { /* The supported_op fields should not be set by individual drivers */ WARN_ON_ONCE(chip->controller->supported_op.data_only_read); if (!nand_has_exec_op(chip)) return; rawnand_check_data_only_read_support(chip); } static void rawnand_check_cont_read_support(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); if (!chip->parameters.supports_read_cache) return; if (chip->read_retries) return; if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL, mtd->writesize, true)) chip->controller->supported_op.cont_read = 1; } static void rawnand_late_check_supported_ops(struct nand_chip *chip) { /* The supported_op fields should not be set by individual drivers */ WARN_ON_ONCE(chip->controller->supported_op.cont_read); /* * Too many devices do not support sequential cached reads with on-die * ECC correction enabled, so in this case refuse to perform the * automation. */ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) return; if (!nand_has_exec_op(chip)) return; /* * For now, continuous reads can only be used with the core page helpers. * This can be extended later. */ if (!(chip->ecc.read_page == nand_read_page_hwecc || chip->ecc.read_page == nand_read_page_syndrome || chip->ecc.read_page == nand_read_page_swecc)) return; rawnand_check_cont_read_support(chip); } /* * Get the flash and manufacturer id and lookup if the type is supported. */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { const struct nand_manufacturer_desc *manufacturer_desc; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int busw, ret; u8 *id_data = chip->id.data; u8 maf_id, dev_id; u64 targetsize; /* * Let's start by initializing memorg fields that might be left * unassigned by the ID-based detection logic. */ memorg = nanddev_get_memorg(&chip->base); memorg->planes_per_lun = 1; memorg->luns_per_target = 1; /* * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) * after power-up. */ ret = nand_reset(chip, 0); if (ret) return ret; /* Select the device */ nand_select_target(chip, 0); rawnand_early_check_supported_ops(chip); /* Send the command for reading device ID */ ret = nand_readid_op(chip, 0, id_data, 2); if (ret) return ret; /* Read manufacturer and device IDs */ maf_id = id_data[0]; dev_id = id_data[1]; /* * Try again to make sure, as some systems the bus-hold or other * interface concerns can cause random data which looks like a * possibly credible NAND flash to appear. If the two results do * not match, ignore the device completely. */ /* Read entire ID string */ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); if (ret) return ret; if (id_data[0] != maf_id || id_data[1] != dev_id) { pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", maf_id, dev_id, id_data[0], id_data[1]); return -ENODEV; } chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ manufacturer_desc = nand_get_manufacturer_desc(maf_id); chip->manufacturer.desc = manufacturer_desc; if (!type) type = nand_flash_ids; /* * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic * override it. * This is required to make sure initial NAND bus width set by the * NAND controller driver is coherent with the real NAND bus width * (extracted by auto-detection code). */ busw = chip->options & NAND_BUSWIDTH_16; /* * The flag is only set (never cleared), reset it to its default value * before starting auto-detection. */ chip->options &= ~NAND_BUSWIDTH_16; for (; type->name != NULL; type++) { if (is_full_id_nand(type)) { if (find_full_id_nand(chip, type)) goto ident_done; } else if (dev_id == type->dev_id) { break; } } if (!type->name || !type->pagesize) { /* Check if the chip is ONFI compliant */ ret = nand_onfi_detect(chip); if (ret < 0) return ret; else if (ret) goto ident_done; /* Check if the chip is JEDEC compliant */ ret = nand_jedec_detect(chip); if (ret < 0) return ret; else if (ret) goto ident_done; } if (!type->name) return -ENODEV; chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) return -ENOMEM; if (!type->pagesize) nand_manufacturer_detect(chip); else nand_decode_id(chip, type); /* Get chip options */ chip->options |= type->options; memorg->eraseblocks_per_lun = DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, memorg->pagesize * memorg->pages_per_eraseblock); ident_done: if (!mtd->name) mtd->name = chip->parameters.model; if (chip->options & NAND_BUSWIDTH_AUTO) { WARN_ON(busw & NAND_BUSWIDTH_16); nand_set_defaults(chip); } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { /* * Check, if buswidth is correct. Hardware drivers should set * chip correct! */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); ret = -EINVAL; goto free_detect_allocation; } nand_decode_bbm_options(chip); /* Calculate the address shift from the page size */ chip->page_shift = ffs(mtd->writesize) - 1; /* Convert chipsize to number of pages per chip -1 */ targetsize = nanddev_target_size(&chip->base); chip->pagemask = (targetsize >> chip->page_shift) - 1; chip->bbt_erase_shift = chip->phys_erase_shift = ffs(mtd->erasesize) - 1; if (targetsize & 0xffffffff) chip->chip_shift = ffs((unsigned)targetsize) - 1; else { chip->chip_shift = ffs((unsigned)(targetsize >> 32)); chip->chip_shift += 32 - 1; } if (chip->chip_shift - chip->page_shift > 16) chip->options |= NAND_ROW_ADDR_3; chip->badblockbits = 8; nand_legacy_adjust_cmdfunc(chip); pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), chip->parameters.model); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); return 0; free_detect_allocation: kfree(chip->parameters.model); return ret; } static enum nand_ecc_engine_type of_get_rawnand_ecc_engine_type_legacy(struct device_node *np) { enum nand_ecc_legacy_mode { NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_SOFT_BCH, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_ON_DIE, }; const char * const nand_ecc_legacy_modes[] = { [NAND_ECC_NONE] = "none", [NAND_ECC_SOFT] = "soft", [NAND_ECC_SOFT_BCH] = "soft_bch", [NAND_ECC_HW] = "hw", [NAND_ECC_HW_SYNDROME] = "hw_syndrome", [NAND_ECC_ON_DIE] = "on-die", }; enum nand_ecc_legacy_mode eng_type; const char *pm; int err; err = of_property_read_string(np, "nand-ecc-mode", &pm); if (err) return NAND_ECC_ENGINE_TYPE_INVALID; for (eng_type = NAND_ECC_NONE; eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) { if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) { switch (eng_type) { case NAND_ECC_NONE: return NAND_ECC_ENGINE_TYPE_NONE; case NAND_ECC_SOFT: case NAND_ECC_SOFT_BCH: return NAND_ECC_ENGINE_TYPE_SOFT; case NAND_ECC_HW: case NAND_ECC_HW_SYNDROME: return NAND_ECC_ENGINE_TYPE_ON_HOST; case NAND_ECC_ON_DIE: return NAND_ECC_ENGINE_TYPE_ON_DIE; default: break; } } } return NAND_ECC_ENGINE_TYPE_INVALID; } static enum nand_ecc_placement of_get_rawnand_ecc_placement_legacy(struct device_node *np) { const char *pm; int err; err = of_property_read_string(np, "nand-ecc-mode", &pm); if (!err) { if (!strcasecmp(pm, "hw_syndrome")) return NAND_ECC_PLACEMENT_INTERLEAVED; } return NAND_ECC_PLACEMENT_UNKNOWN; } static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np) { const char *pm; int err; err = of_property_read_string(np, "nand-ecc-mode", &pm); if (!err) { if (!strcasecmp(pm, "soft")) return NAND_ECC_ALGO_HAMMING; else if (!strcasecmp(pm, "soft_bch")) return NAND_ECC_ALGO_BCH; } return NAND_ECC_ALGO_UNKNOWN; } static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf; if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID) user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn); if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN) user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn); if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN) user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn); } static int of_get_nand_bus_width(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); u32 val; int ret; ret = of_property_read_u32(dn, "nand-bus-width", &val); if (ret == -EINVAL) /* Buswidth defaults to 8 if the property does not exist .*/ return 0; else if (ret) return ret; if (val == 16) chip->options |= NAND_BUSWIDTH_16; else if (val != 8) return -EINVAL; return 0; } static int of_get_nand_secure_regions(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); struct property *prop; int nr_elem, i, j; /* Only proceed if the "secure-regions" property is present in DT */ prop = of_find_property(dn, "secure-regions", NULL); if (!prop) return 0; nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); if (nr_elem <= 0) return nr_elem; chip->nr_secure_regions = nr_elem / 2; chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), GFP_KERNEL); if (!chip->secure_regions) return -ENOMEM; for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) { of_property_read_u64_index(dn, "secure-regions", j, &chip->secure_regions[i].offset); of_property_read_u64_index(dn, "secure-regions", j + 1, &chip->secure_regions[i].size); } return 0; } /** * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller * @dev: Device that will be parsed. Also used for managed allocations. * @cs_array: Array of GPIO desc pointers allocated on success * @ncs_array: Number of entries in @cs_array updated on success. * @return 0 on success, an error otherwise. */ int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, unsigned int *ncs_array) { struct gpio_desc **descs; int ndescs, i; ndescs = gpiod_count(dev, "cs"); if (ndescs < 0) { dev_dbg(dev, "No valid cs-gpios property\n"); return 0; } descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); if (!descs) return -ENOMEM; for (i = 0; i < ndescs; i++) { descs[i] = gpiod_get_index_optional(dev, "cs", i, GPIOD_OUT_HIGH); if (IS_ERR(descs[i])) return PTR_ERR(descs[i]); } *ncs_array = ndescs; *cs_array = descs; return 0; } EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); static int rawnand_dt_init(struct nand_chip *chip) { struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); struct device_node *dn = nand_get_flash_node(chip); int ret; if (!dn) return 0; ret = of_get_nand_bus_width(chip); if (ret) return ret; if (of_property_read_bool(dn, "nand-is-boot-medium")) chip->options |= NAND_IS_BOOT_MEDIUM; if (of_property_read_bool(dn, "nand-on-flash-bbt")) chip->bbt_options |= NAND_BBT_USE_FLASH; of_get_nand_ecc_user_config(nand); of_get_nand_ecc_legacy_user_config(chip); /* * If neither the user nor the NAND controller have requested a specific * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST. */ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; /* * Use the user requested engine type, unless there is none, in this * case default to the NAND controller choice, otherwise fallback to * the raw NAND default one. */ if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID) chip->ecc.engine_type = nand->ecc.user_conf.engine_type; if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) chip->ecc.engine_type = nand->ecc.defaults.engine_type; chip->ecc.placement = nand->ecc.user_conf.placement; chip->ecc.algo = nand->ecc.user_conf.algo; chip->ecc.strength = nand->ecc.user_conf.strength; chip->ecc.size = nand->ecc.user_conf.step_size; return 0; } /** * nand_scan_ident - Scan for the NAND device * @chip: NAND chip object * @maxchips: number of chips to scan for * @table: alternative NAND ID table * * This is the first phase of the normal nand_scan() function. It reads the * flash ID and sets up MTD fields accordingly. * * This helper used to be called directly from controller drivers that needed * to tweak some ECC-related parameters before nand_scan_tail(). This separation * prevented dynamic allocations during this phase which was unconvenient and * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. */ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, struct nand_flash_dev *table) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int nand_maf_id, nand_dev_id; unsigned int i; int ret; memorg = nanddev_get_memorg(&chip->base); /* Assume all dies are deselected when we enter nand_scan_ident(). */ chip->cur_cs = -1; mutex_init(&chip->lock); init_waitqueue_head(&chip->resume_wq); /* Enforce the right timings for reset/detection */ chip->current_interface_config = nand_get_reset_interface_config(); ret = rawnand_dt_init(chip); if (ret) return ret; if (!mtd->name && mtd->dev.parent) mtd->name = dev_name(mtd->dev.parent); /* Set the default functions */ nand_set_defaults(chip); ret = nand_legacy_check_hooks(chip); if (ret) return ret; memorg->ntargets = maxchips; /* Read the flash type */ ret = nand_detect(chip, table); if (ret) { if (!(chip->options & NAND_SCAN_SILENT_NODEV)) pr_warn("No NAND device found\n"); nand_deselect_target(chip); return ret; } nand_maf_id = chip->id.data[0]; nand_dev_id = chip->id.data[1]; nand_deselect_target(chip); /* Check for a chip array */ for (i = 1; i < maxchips; i++) { u8 id[2]; /* See comment in nand_get_flash_type for reset */ ret = nand_reset(chip, i); if (ret) break; nand_select_target(chip, i); /* Send the command for reading device ID */ ret = nand_readid_op(chip, 0, id, sizeof(id)); if (ret) break; /* Read manufacturer and device IDs */ if (nand_maf_id != id[0] || nand_dev_id != id[1]) { nand_deselect_target(chip); break; } nand_deselect_target(chip); } if (i > 1) pr_info("%d chips detected\n", i); /* Store the number of chips and calc total size for mtd */ memorg->ntargets = i; mtd->size = i * nanddev_target_size(&chip->base); return 0; } static void nand_scan_ident_cleanup(struct nand_chip *chip) { kfree(chip->parameters.model); kfree(chip->parameters.onfi); } int rawnand_sw_hamming_init(struct nand_chip *chip) { struct nand_ecc_sw_hamming_conf *engine_conf; struct nand_device *base = &chip->base; int ret; base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; base->ecc.user_conf.strength = chip->ecc.strength; base->ecc.user_conf.step_size = chip->ecc.size; ret = nand_ecc_sw_hamming_init_ctx(base); if (ret) return ret; engine_conf = base->ecc.ctx.priv; if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) engine_conf->sm_order = true; chip->ecc.size = base->ecc.ctx.conf.step_size; chip->ecc.strength = base->ecc.ctx.conf.strength; chip->ecc.total = base->ecc.ctx.total; chip->ecc.steps = nanddev_get_ecc_nsteps(base); chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); return 0; } EXPORT_SYMBOL(rawnand_sw_hamming_init); int rawnand_sw_hamming_calculate(struct nand_chip *chip, const unsigned char *buf, unsigned char *code) { struct nand_device *base = &chip->base; return nand_ecc_sw_hamming_calculate(base, buf, code); } EXPORT_SYMBOL(rawnand_sw_hamming_calculate); int rawnand_sw_hamming_correct(struct nand_chip *chip, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { struct nand_device *base = &chip->base; return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); } EXPORT_SYMBOL(rawnand_sw_hamming_correct); void rawnand_sw_hamming_cleanup(struct nand_chip *chip) { struct nand_device *base = &chip->base; nand_ecc_sw_hamming_cleanup_ctx(base); } EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); int rawnand_sw_bch_init(struct nand_chip *chip) { struct nand_device *base = &chip->base; const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base); int ret; base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; base->ecc.user_conf.step_size = chip->ecc.size; base->ecc.user_conf.strength = chip->ecc.strength; ret = nand_ecc_sw_bch_init_ctx(base); if (ret) return ret; chip->ecc.size = ecc_conf->step_size; chip->ecc.strength = ecc_conf->strength; chip->ecc.total = base->ecc.ctx.total; chip->ecc.steps = nanddev_get_ecc_nsteps(base); chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); return 0; } EXPORT_SYMBOL(rawnand_sw_bch_init); static int rawnand_sw_bch_calculate(struct nand_chip *chip, const unsigned char *buf, unsigned char *code) { struct nand_device *base = &chip->base; return nand_ecc_sw_bch_calculate(base, buf, code); } int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { struct nand_device *base = &chip->base; return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); } EXPORT_SYMBOL(rawnand_sw_bch_correct); void rawnand_sw_bch_cleanup(struct nand_chip *chip) { struct nand_device *base = &chip->base; nand_ecc_sw_bch_cleanup_ctx(base); } EXPORT_SYMBOL(rawnand_sw_bch_cleanup); static int nand_set_ecc_on_host_ops(struct nand_chip *chip) { struct nand_ecc_ctrl *ecc = &chip->ecc; switch (ecc->placement) { case NAND_ECC_PLACEMENT_UNKNOWN: case NAND_ECC_PLACEMENT_OOB: /* Use standard hwecc read page function? */ if (!ecc->read_page) ecc->read_page = nand_read_page_hwecc; if (!ecc->write_page) ecc->write_page = nand_write_page_hwecc; if (!ecc->read_page_raw) ecc->read_page_raw = nand_read_page_raw; if (!ecc->write_page_raw) ecc->write_page_raw = nand_write_page_raw; if (!ecc->read_oob) ecc->read_oob = nand_read_oob_std; if (!ecc->write_oob) ecc->write_oob = nand_write_oob_std; if (!ecc->read_subpage) ecc->read_subpage = nand_read_subpage; if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) ecc->write_subpage = nand_write_subpage_hwecc; fallthrough; case NAND_ECC_PLACEMENT_INTERLEAVED: if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && (!ecc->read_page || ecc->read_page == nand_read_page_hwecc || !ecc->write_page || ecc->write_page == nand_write_page_hwecc)) { WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); return -EINVAL; } /* Use standard syndrome read/write page function? */ if (!ecc->read_page) ecc->read_page = nand_read_page_syndrome; if (!ecc->write_page) ecc->write_page = nand_write_page_syndrome; if (!ecc->read_page_raw) ecc->read_page_raw = nand_read_page_raw_syndrome; if (!ecc->write_page_raw) ecc->write_page_raw = nand_write_page_raw_syndrome; if (!ecc->read_oob) ecc->read_oob = nand_read_oob_syndrome; if (!ecc->write_oob) ecc->write_oob = nand_write_oob_syndrome; break; default: pr_warn("Invalid NAND_ECC_PLACEMENT %d\n", ecc->placement); return -EINVAL; } return 0; } static int nand_set_ecc_soft_ops(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_device *nanddev = mtd_to_nanddev(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; int ret; if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) return -EINVAL; switch (ecc->algo) { case NAND_ECC_ALGO_HAMMING: ecc->calculate = rawnand_sw_hamming_calculate; ecc->correct = rawnand_sw_hamming_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; if (!ecc->read_page_raw) ecc->read_page_raw = nand_read_page_raw; if (!ecc->write_page_raw) ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; if (!ecc->size) ecc->size = 256; ecc->bytes = 3; ecc->strength = 1; if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; ret = rawnand_sw_hamming_init(chip); if (ret) { WARN(1, "Hamming ECC initialization failed!\n"); return ret; } return 0; case NAND_ECC_ALGO_BCH: if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); return -EINVAL; } ecc->calculate = rawnand_sw_bch_calculate; ecc->correct = rawnand_sw_bch_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; if (!ecc->read_page_raw) ecc->read_page_raw = nand_read_page_raw; if (!ecc->write_page_raw) ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; /* * We can only maximize ECC config when the default layout is * used, otherwise we don't know how many bytes can really be * used. */ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && mtd->ooblayout != nand_get_large_page_ooblayout()) nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; ret = rawnand_sw_bch_init(chip); if (ret) { WARN(1, "BCH ECC initialization failed!\n"); return ret; } return 0; default: WARN(1, "Unsupported ECC algorithm!\n"); return -EINVAL; } } /** * nand_check_ecc_caps - check the sanity of preset ECC settings * @chip: nand chip info structure * @caps: ECC caps info structure * @oobavail: OOB size that the ECC engine can use * * When ECC step size and strength are already set, check if they are supported * by the controller and the calculated ECC bytes fit within the chip's OOB. * On success, the calculated ECC bytes is set. */ static int nand_check_ecc_caps(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail) { struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_ecc_step_info *stepinfo; int preset_step = chip->ecc.size; int preset_strength = chip->ecc.strength; int ecc_bytes, nsteps = mtd->writesize / preset_step; int i, j; for (i = 0; i < caps->nstepinfos; i++) { stepinfo = &caps->stepinfos[i]; if (stepinfo->stepsize != preset_step) continue; for (j = 0; j < stepinfo->nstrengths; j++) { if (stepinfo->strengths[j] != preset_strength) continue; ecc_bytes = caps->calc_ecc_bytes(preset_step, preset_strength); if (WARN_ON_ONCE(ecc_bytes < 0)) return ecc_bytes; if (ecc_bytes * nsteps > oobavail) { pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", preset_step, preset_strength); return -ENOSPC; } chip->ecc.bytes = ecc_bytes; return 0; } } pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", preset_step, preset_strength); return -ENOTSUPP; } /** * nand_match_ecc_req - meet the chip's requirement with least ECC bytes * @chip: nand chip info structure * @caps: ECC engine caps info structure * @oobavail: OOB size that the ECC engine can use * * If a chip's ECC requirement is provided, try to meet it with the least * number of ECC bytes (i.e. with the largest number of OOB-free bytes). * On success, the chosen ECC settings are set. */ static int nand_match_ecc_req(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail) { const struct nand_ecc_props *requirements = nanddev_get_ecc_requirements(&chip->base); struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_ecc_step_info *stepinfo; int req_step = requirements->step_size; int req_strength = requirements->strength; int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; int best_step = 0, best_strength = 0, best_ecc_bytes = 0; int best_ecc_bytes_total = INT_MAX; int i, j; /* No information provided by the NAND chip */ if (!req_step || !req_strength) return -ENOTSUPP; /* number of correctable bits the chip requires in a page */ req_corr = mtd->writesize / req_step * req_strength; for (i = 0; i < caps->nstepinfos; i++) { stepinfo = &caps->stepinfos[i]; step_size = stepinfo->stepsize; for (j = 0; j < stepinfo->nstrengths; j++) { strength = stepinfo->strengths[j]; /* * If both step size and strength are smaller than the * chip's requirement, it is not easy to compare the * resulted reliability. */ if (step_size < req_step && strength < req_strength) continue; if (mtd->writesize % step_size) continue; nsteps = mtd->writesize / step_size; ecc_bytes = caps->calc_ecc_bytes(step_size, strength); if (WARN_ON_ONCE(ecc_bytes < 0)) continue; ecc_bytes_total = ecc_bytes * nsteps; if (ecc_bytes_total > oobavail || strength * nsteps < req_corr) continue; /* * We assume the best is to meet the chip's requrement * with the least number of ECC bytes. */ if (ecc_bytes_total < best_ecc_bytes_total) { best_ecc_bytes_total = ecc_bytes_total; best_step = step_size; best_strength = strength; best_ecc_bytes = ecc_bytes; } } } if (best_ecc_bytes_total == INT_MAX) return -ENOTSUPP; chip->ecc.size = best_step; chip->ecc.strength = best_strength; chip->ecc.bytes = best_ecc_bytes; return 0; } /** * nand_maximize_ecc - choose the max ECC strength available * @chip: nand chip info structure * @caps: ECC engine caps info structure * @oobavail: OOB size that the ECC engine can use * * Choose the max ECC strength that is supported on the controller, and can fit * within the chip's OOB. On success, the chosen ECC settings are set. */ static int nand_maximize_ecc(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail) { struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_ecc_step_info *stepinfo; int step_size, strength, nsteps, ecc_bytes, corr; int best_corr = 0; int best_step = 0; int best_strength = 0, best_ecc_bytes = 0; int i, j; for (i = 0; i < caps->nstepinfos; i++) { stepinfo = &caps->stepinfos[i]; step_size = stepinfo->stepsize; /* If chip->ecc.size is already set, respect it */ if (chip->ecc.size && step_size != chip->ecc.size) continue; for (j = 0; j < stepinfo->nstrengths; j++) { strength = stepinfo->strengths[j]; if (mtd->writesize % step_size) continue; nsteps = mtd->writesize / step_size; ecc_bytes = caps->calc_ecc_bytes(step_size, strength); if (WARN_ON_ONCE(ecc_bytes < 0)) continue; if (ecc_bytes * nsteps > oobavail) continue; corr = strength * nsteps; /* * If the number of correctable bits is the same, * bigger step_size has more reliability. */ if (corr > best_corr || (corr == best_corr && step_size > best_step)) { best_corr = corr; best_step = step_size; best_strength = strength; best_ecc_bytes = ecc_bytes; } } } if (!best_corr) return -ENOTSUPP; chip->ecc.size = best_step; chip->ecc.strength = best_strength; chip->ecc.bytes = best_ecc_bytes; return 0; } /** * nand_ecc_choose_conf - Set the ECC strength and ECC step size * @chip: nand chip info structure * @caps: ECC engine caps info structure * @oobavail: OOB size that the ECC engine can use * * Choose the ECC configuration according to following logic. * * 1. If both ECC step size and ECC strength are already set (usually by DT) * then check if it is supported by this controller. * 2. If the user provided the nand-ecc-maximize property, then select maximum * ECC strength. * 3. Otherwise, try to match the ECC step size and ECC strength closest * to the chip's requirement. If available OOB size can't fit the chip * requirement then fallback to the maximum ECC step size and ECC strength. * * On success, the chosen ECC settings are set. */ int nand_ecc_choose_conf(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_device *nanddev = mtd_to_nanddev(mtd); if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) return -EINVAL; if (chip->ecc.size && chip->ecc.strength) return nand_check_ecc_caps(chip, caps, oobavail); if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) return nand_maximize_ecc(chip, caps, oobavail); if (!nand_match_ecc_req(chip, caps, oobavail)) return 0; return nand_maximize_ecc(chip, caps, oobavail); } EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) { struct nand_chip *chip = container_of(nand, struct nand_chip, base); unsigned int eb = nanddev_pos_to_row(nand, pos); int ret; eb >>= nand->rowconv.eraseblock_addr_shift; nand_select_target(chip, pos->target); ret = nand_erase_op(chip, eb); nand_deselect_target(chip); return ret; } static int rawnand_markbad(struct nand_device *nand, const struct nand_pos *pos) { struct nand_chip *chip = container_of(nand, struct nand_chip, base); return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); } static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) { struct nand_chip *chip = container_of(nand, struct nand_chip, base); int ret; nand_select_target(chip, pos->target); ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); nand_deselect_target(chip); return ret; } static const struct nand_ops rawnand_ops = { .erase = rawnand_erase, .markbad = rawnand_markbad, .isbad = rawnand_isbad, }; /** * nand_scan_tail - Scan for the NAND device * @chip: NAND chip object * * This is the second phase of the normal nand_scan() function. It fills out * all the uninitialized function pointers with the defaults and scans for a * bad block table if appropriate. */ static int nand_scan_tail(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_device *base = &chip->base; struct nand_ecc_ctrl *ecc = &chip->ecc; int ret, i; /* New bad blocks should be marked in OOB, flash-based BBT, or both */ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && !(chip->bbt_options & NAND_BBT_USE_FLASH))) { return -EINVAL; } chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); if (!chip->data_buf) return -ENOMEM; /* * FIXME: some NAND manufacturer drivers expect the first die to be * selected when manufacturer->init() is called. They should be fixed * to explictly select the relevant die when interacting with the NAND * chip. */ nand_select_target(chip, 0); ret = nand_manufacturer_init(chip); nand_deselect_target(chip); if (ret) goto err_free_buf; /* Set the internal oob buffer location, just after the page data */ chip->oob_poi = chip->data_buf + mtd->writesize; /* * If no default placement scheme is given, select an appropriate one. */ if (!mtd->ooblayout && !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && ecc->algo == NAND_ECC_ALGO_BCH) && !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && ecc->algo == NAND_ECC_ALGO_HAMMING)) { switch (mtd->oobsize) { case 8: case 16: mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); break; case 64: case 128: mtd_set_ooblayout(mtd, nand_get_large_page_hamming_ooblayout()); break; default: /* * Expose the whole OOB area to users if ECC_NONE * is passed. We could do that for all kind of * ->oobsize, but we must keep the old large/small * page with ECC layout when ->oobsize <= 128 for * compatibility reasons. */ if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) { mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); break; } WARN(1, "No oob scheme defined for oobsize %d\n", mtd->oobsize); ret = -EINVAL; goto err_nand_manuf_cleanup; } } /* * Check ECC mode, default to software if 3byte/512byte hardware ECC is * selected and we have 256 byte pagesize fallback to software ECC */ switch (ecc->engine_type) { case NAND_ECC_ENGINE_TYPE_ON_HOST: ret = nand_set_ecc_on_host_ops(chip); if (ret) goto err_nand_manuf_cleanup; if (mtd->writesize >= ecc->size) { if (!ecc->strength) { WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } break; } pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", ecc->size, mtd->writesize); ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; ecc->algo = NAND_ECC_ALGO_HAMMING; fallthrough; case NAND_ECC_ENGINE_TYPE_SOFT: ret = nand_set_ecc_soft_ops(chip); if (ret) goto err_nand_manuf_cleanup; break; case NAND_ECC_ENGINE_TYPE_ON_DIE: if (!ecc->read_page || !ecc->write_page) { WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } if (!ecc->read_oob) ecc->read_oob = nand_read_oob_std; if (!ecc->write_oob) ecc->write_oob = nand_write_oob_std; break; case NAND_ECC_ENGINE_TYPE_NONE: pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n"); ecc->read_page = nand_read_page_raw; ecc->write_page = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->read_page_raw = nand_read_page_raw; ecc->write_page_raw = nand_write_page_raw; ecc->write_oob = nand_write_oob_std; ecc->size = mtd->writesize; ecc->bytes = 0; ecc->strength = 0; break; default: WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type); ret = -EINVAL; goto err_nand_manuf_cleanup; } if (ecc->correct || ecc->calculate) { ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); if (!ecc->calc_buf || !ecc->code_buf) { ret = -ENOMEM; goto err_nand_manuf_cleanup; } } /* For many systems, the standard OOB write also works for raw */ if (!ecc->read_oob_raw) ecc->read_oob_raw = ecc->read_oob; if (!ecc->write_oob_raw) ecc->write_oob_raw = ecc->write_oob; /* Propagate ECC info to the generic NAND and MTD layers */ mtd->ecc_strength = ecc->strength; if (!base->ecc.ctx.conf.strength) base->ecc.ctx.conf.strength = ecc->strength; mtd->ecc_step_size = ecc->size; if (!base->ecc.ctx.conf.step_size) base->ecc.ctx.conf.step_size = ecc->size; /* * Set the number of read / write steps for one page depending on ECC * mode. */ if (!ecc->steps) ecc->steps = mtd->writesize / ecc->size; if (!base->ecc.ctx.nsteps) base->ecc.ctx.nsteps = ecc->steps; if (ecc->steps * ecc->size != mtd->writesize) { WARN(1, "Invalid ECC parameters\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } if (!ecc->total) { ecc->total = ecc->steps * ecc->bytes; chip->base.ecc.ctx.total = ecc->total; } if (ecc->total > mtd->oobsize) { WARN(1, "Total number of ECC bytes exceeded oobsize\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } /* * The number of bytes available for a client to place data into * the out of band area. */ ret = mtd_ooblayout_count_freebytes(mtd); if (ret < 0) ret = 0; mtd->oobavail = ret; /* ECC sanity check: warn if it's too weak */ if (!nand_ecc_is_strong_enough(&chip->base)) pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", mtd->name, chip->ecc.strength, chip->ecc.size, nanddev_get_ecc_requirements(&chip->base)->strength, nanddev_get_ecc_requirements(&chip->base)->step_size); /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { switch (ecc->steps) { case 2: mtd->subpage_sft = 1; break; case 4: case 8: case 16: mtd->subpage_sft = 2; break; } } chip->subpagesize = mtd->writesize >> mtd->subpage_sft; /* Invalidate the pagebuffer reference */ chip->pagecache.page = -1; /* Large page NAND with SOFT_ECC should support subpage reads */ switch (ecc->engine_type) { case NAND_ECC_ENGINE_TYPE_SOFT: if (chip->page_shift > 9) chip->options |= NAND_SUBPAGE_READ; break; default: break; } ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); if (ret) goto err_nand_manuf_cleanup; /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ if (chip->options & NAND_ROM) mtd->flags = MTD_CAP_ROM; /* Fill in remaining MTD driver data */ mtd->_erase = nand_erase; mtd->_point = NULL; mtd->_unpoint = NULL; mtd->_panic_write = panic_nand_write; mtd->_read_oob = nand_read_oob; mtd->_write_oob = nand_write_oob; mtd->_sync = nand_sync; mtd->_lock = nand_lock; mtd->_unlock = nand_unlock; mtd->_suspend = nand_suspend; mtd->_resume = nand_resume; mtd->_reboot = nand_shutdown; mtd->_block_isreserved = nand_block_isreserved; mtd->_block_isbad = nand_block_isbad; mtd->_block_markbad = nand_block_markbad; mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; /* * Initialize bitflip_threshold to its default prior scan_bbt() call. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be * properly set. */ if (!mtd->bitflip_threshold) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); /* Find the fastest data interface for this chip */ ret = nand_choose_interface_config(chip); if (ret) goto err_nanddev_cleanup; /* Enter fastest possible mode on all dies. */ for (i = 0; i < nanddev_ntargets(&chip->base); i++) { ret = nand_setup_interface(chip, i); if (ret) goto err_free_interface_config; } rawnand_late_check_supported_ops(chip); /* * Look for secure regions in the NAND chip. These regions are supposed * to be protected by a secure element like Trustzone. So the read/write * accesses to these regions will be blocked in the runtime by this * driver. */ ret = of_get_nand_secure_regions(chip); if (ret) goto err_free_interface_config; /* Check, if we should skip the bad block table scan */ if (chip->options & NAND_SKIP_BBTSCAN) return 0; /* Build bad block table */ ret = nand_create_bbt(chip); if (ret) goto err_free_secure_regions; return 0; err_free_secure_regions: kfree(chip->secure_regions); err_free_interface_config: kfree(chip->best_interface_config); err_nanddev_cleanup: nanddev_cleanup(&chip->base); err_nand_manuf_cleanup: nand_manufacturer_cleanup(chip); err_free_buf: kfree(chip->data_buf); kfree(ecc->code_buf); kfree(ecc->calc_buf); return ret; } static int nand_attach(struct nand_chip *chip) { if (chip->controller->ops && chip->controller->ops->attach_chip) return chip->controller->ops->attach_chip(chip); return 0; } static void nand_detach(struct nand_chip *chip) { if (chip->controller->ops && chip->controller->ops->detach_chip) chip->controller->ops->detach_chip(chip); } /** * nand_scan_with_ids - [NAND Interface] Scan for the NAND device * @chip: NAND chip object * @maxchips: number of chips to scan for. * @ids: optional flash IDs table * * This fills out all the uninitialized function pointers with the defaults. * The flash ID is read and the mtd/chip structures are filled with the * appropriate values. */ int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, struct nand_flash_dev *ids) { int ret; if (!maxchips) return -EINVAL; ret = nand_scan_ident(chip, maxchips, ids); if (ret) return ret; ret = nand_attach(chip); if (ret) goto cleanup_ident; ret = nand_scan_tail(chip); if (ret) goto detach_chip; return 0; detach_chip: nand_detach(chip); cleanup_ident: nand_scan_ident_cleanup(chip); return ret; } EXPORT_SYMBOL(nand_scan_with_ids); /** * nand_cleanup - [NAND Interface] Free resources held by the NAND device * @chip: NAND chip object */ void nand_cleanup(struct nand_chip *chip) { if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) rawnand_sw_hamming_cleanup(chip); else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) rawnand_sw_bch_cleanup(chip); } nanddev_cleanup(&chip->base); /* Free secure regions data */ kfree(chip->secure_regions); /* Free bad block table memory */ kfree(chip->bbt); kfree(chip->data_buf); kfree(chip->ecc.code_buf); kfree(chip->ecc.calc_buf); /* Free bad block descriptor memory */ if (chip->badblock_pattern && chip->badblock_pattern->options & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); /* Free the data interface */ kfree(chip->best_interface_config); /* Free manufacturer priv data. */ nand_manufacturer_cleanup(chip); /* Free controller specific allocations after chip identification */ nand_detach(chip); /* Free identification phase allocations */ nand_scan_ident_cleanup(chip); } EXPORT_SYMBOL_GPL(nand_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steven J. Hill <[email protected]>"); MODULE_AUTHOR("Thomas Gleixner <[email protected]>"); MODULE_DESCRIPTION("Generic NAND flash driver code");
// SPDX-License-Identifier: GPL-2.0 #include <unistd.h> #include <test_progs.h> #include <network_helpers.h> #include "tailcall_poke.skel.h" #include "tailcall_bpf2bpf_hierarchy2.skel.h" #include "tailcall_bpf2bpf_hierarchy3.skel.h" #include "tailcall_freplace.skel.h" #include "tc_bpf2bpf.skel.h" #include "tailcall_fail.skel.h" /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop * and jmp->jmp rewrites. Also checks for nop->nop. */ static void test_tailcall_1(void) { int err, map_fd, prog_fd, main_fd, i, j; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, i, "tailcall retval"); err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_OK(topts.retval, "tailcall retval"); for (i = 0; i < bpf_map__max_entries(prog_array); i++) { j = bpf_map__max_entries(prog_array) - 1 - i; snprintf(prog_name, sizeof(prog_name), "classifier_%d", j); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { j = bpf_map__max_entries(prog_array) - 1 - i; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, j, "tailcall retval"); err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err >= 0 || errno != ENOENT)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: bpf_object__close(obj); } /* test_tailcall_2 checks that patching multiple programs for a single * tail call slot works. It also jumps through several programs and tests * the tail call limit counter. */ static void test_tailcall_2(void) { int err, map_fd, prog_fd, main_fd, i; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 2, "tailcall retval"); i = 2; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); out: bpf_object__close(obj); } static void test_tailcall_count(const char *which, bool test_fentry, bool test_fexit) { struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; struct bpf_link *fentry_link = NULL, *fexit_link = NULL; int err, map_fd, prog_fd, main_fd, data_fd, i, val; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; prog = bpf_object__find_program_by_name(obj, "classifier_0"); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; if (test_fentry) { fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", NULL); if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) goto out; prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); if (!ASSERT_OK_PTR(prog, "find fentry prog")) goto out; err = bpf_program__set_attach_target(prog, prog_fd, "subprog_tail"); if (!ASSERT_OK(err, "set_attach_target subprog_tail")) goto out; err = bpf_object__load(fentry_obj); if (!ASSERT_OK(err, "load fentry_obj")) goto out; fentry_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) goto out; } if (test_fexit) { fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o", NULL); if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file")) goto out; prog = bpf_object__find_program_by_name(fexit_obj, "fexit"); if (!ASSERT_OK_PTR(prog, "find fexit prog")) goto out; err = bpf_program__set_attach_target(prog, prog_fd, "subprog_tail"); if (!ASSERT_OK(err, "set_attach_target subprog_tail")) goto out; err = bpf_object__load(fexit_obj); if (!ASSERT_OK(err, "load fexit_obj")) goto out; fexit_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(fexit_link, "attach_trace")) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out; data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val, 33, "tailcall count"); if (test_fentry) { data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall_bpf2bpf_fentry.bss map")) goto out; data_fd = bpf_map__fd(data_map); if (!ASSERT_FALSE(data_fd < 0, "find tailcall_bpf2bpf_fentry.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "fentry count"); ASSERT_EQ(val, 33, "fentry count"); } if (test_fexit) { data_map = bpf_object__find_map_by_name(fexit_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall_bpf2bpf_fexit.bss map")) goto out; data_fd = bpf_map__fd(data_map); if (!ASSERT_FALSE(data_fd < 0, "find tailcall_bpf2bpf_fexit.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "fexit count"); ASSERT_EQ(val, 33, "fexit count"); } i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_OK(topts.retval, "tailcall retval"); out: bpf_link__destroy(fentry_link); bpf_link__destroy(fexit_link); bpf_object__close(fentry_obj); bpf_object__close(fexit_obj); bpf_object__close(obj); } /* test_tailcall_3 checks that the count value of the tail call limit * enforcement matches with expectations. JIT uses direct jump. */ static void test_tailcall_3(void) { test_tailcall_count("tailcall3.bpf.o", false, false); } /* test_tailcall_6 checks that the count value of the tail call limit * enforcement matches with expectations. JIT uses indirect jump. */ static void test_tailcall_6(void) { test_tailcall_count("tailcall6.bpf.o", false, false); } /* test_tailcall_4 checks that the kernel properly selects indirect jump * for the case where the key is not known. Latter is passed via global * data to select different targets we can compare return value of. */ static void test_tailcall_4(void) { int err, map_fd, prog_fd, main_fd, data_fd, i; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; static const int zero = 0; char buff[128] = {}; char prog_name[32]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out; data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, i, "tailcall retval"); } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: bpf_object__close(obj); } /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates * an indirect jump when the keys are const but different from different branches. */ static void test_tailcall_5(void) { int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 }; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; static const int zero = 0; char buff[128] = {}; char prog_name[32]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out; data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, i, "tailcall retval"); } for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: bpf_object__close(obj); } /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working * correctly in correlation with BPF subprograms */ static void test_tailcall_bpf2bpf_1(void) { int err, map_fd, prog_fd, main_fd, i; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, ); err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; /* nop -> jmp */ for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); /* jmp -> nop, call subprog that will do tailcall */ i = 1; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_OK(topts.retval, "tailcall retval"); /* make sure that subprog can access ctx and entry prog that * called this subprog can properly return */ i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); out: bpf_object__close(obj); } /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit * enforcement matches with expectations when tailcall is preceded with * bpf2bpf call. */ static void test_tailcall_bpf2bpf_2(void) { int err, map_fd, prog_fd, main_fd, data_fd, i, val; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; prog = bpf_object__find_program_by_name(obj, "classifier_0"); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out; data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val, 33, "tailcall count"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_OK(topts.retval, "tailcall retval"); out: bpf_object__close(obj); } /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to * 256 bytes) can be used within bpf subprograms that have the tailcalls * in them */ static void test_tailcall_bpf2bpf_3(void) { int err, map_fd, prog_fd, main_fd, i; struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, ); err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); i = 1; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); out: bpf_object__close(obj); } #include "tailcall_bpf2bpf4.skel.h" /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved * across tailcalls combined with bpf2bpf calls. for making sure that tailcall * counter behaves correctly, bpf program will go through following flow: * * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 -> * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 -> * subprog2 [here bump global counter] --------^ * * We go through first two tailcalls and start counting from the subprog2 where * the loop begins. At the end of the test make sure that the global counter is * equal to 31, because tailcall counter includes the first two tailcalls * whereas global counter is incremented only on loop presented on flow above. * * The noise parameter is used to insert bpf_map_update calls into the logic * to force verifier to patch instructions. This allows us to ensure jump * logic remains correct with instruction movement. */ static void test_tailcall_bpf2bpf_4(bool noise) { int err, map_fd, prog_fd, main_fd, data_fd, i; struct tailcall_bpf2bpf4__bss val; struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; char prog_name[32]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, ); err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (CHECK_FAIL(!prog)) goto out; main_fd = bpf_program__fd(prog); if (CHECK_FAIL(main_fd < 0)) goto out; prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (CHECK_FAIL(!prog_array)) goto out; map_fd = bpf_map__fd(prog_array); if (CHECK_FAIL(map_fd < 0)) goto out; for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); if (CHECK_FAIL(!prog)) goto out; prog_fd = bpf_program__fd(prog); if (CHECK_FAIL(prog_fd < 0)) goto out; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (CHECK_FAIL(err)) goto out; } data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) goto out; data_fd = bpf_map__fd(data_map); if (CHECK_FAIL(data_fd < 0)) goto out; i = 0; val.noise = noise; val.count = 0; err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY); if (CHECK_FAIL(err)) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val.count, 31, "tailcall count"); out: bpf_object__close(obj); } #include "tailcall_bpf2bpf6.skel.h" /* Tail call counting works even when there is data on stack which is * not aligned to 8 bytes. */ static void test_tailcall_bpf2bpf_6(void) { struct tailcall_bpf2bpf6 *obj; int err, map_fd, prog_fd, main_fd, data_fd, i, val; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, ); obj = tailcall_bpf2bpf6__open_and_load(); if (!ASSERT_OK_PTR(obj, "open and load")) return; main_fd = bpf_program__fd(obj->progs.entry); if (!ASSERT_GE(main_fd, 0, "entry prog fd")) goto out; map_fd = bpf_map__fd(obj->maps.jmp_table); if (!ASSERT_GE(map_fd, 0, "jmp_table map fd")) goto out; prog_fd = bpf_program__fd(obj->progs.classifier_0); if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd")) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "jmp_table map update")) goto out; err = bpf_prog_test_run_opts(main_fd, &topts); ASSERT_OK(err, "entry prog test run"); ASSERT_EQ(topts.retval, 0, "tailcall retval"); data_fd = bpf_map__fd(obj->maps.bss); if (!ASSERT_GE(data_fd, 0, "bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "bss map lookup"); ASSERT_EQ(val, 1, "done flag is set"); out: tailcall_bpf2bpf6__destroy(obj); } /* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call * limit enforcement matches with expectations when tailcall is preceded with * bpf2bpf call, and the bpf2bpf call is traced by fentry. */ static void test_tailcall_bpf2bpf_fentry(void) { test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false); } /* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call * limit enforcement matches with expectations when tailcall is preceded with * bpf2bpf call, and the bpf2bpf call is traced by fexit. */ static void test_tailcall_bpf2bpf_fexit(void) { test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true); } /* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail * call limit enforcement matches with expectations when tailcall is preceded * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit. */ static void test_tailcall_bpf2bpf_fentry_fexit(void) { test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true); } /* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail * call limit enforcement matches with expectations when tailcall is preceded * with bpf2bpf call, and the bpf2bpf caller is traced by fentry. */ static void test_tailcall_bpf2bpf_fentry_entry(void) { struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL; int err, map_fd, prog_fd, data_fd, i, val; struct bpf_map *prog_array, *data_map; struct bpf_link *fentry_link = NULL; struct bpf_program *prog; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &tgt_obj, &prog_fd); if (!ASSERT_OK(err, "load tgt_obj")) return; prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table"); if (!ASSERT_OK_PTR(prog_array, "find jmp_table map")) goto out; map_fd = bpf_map__fd(prog_array); if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd")) goto out; prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0"); if (!ASSERT_OK_PTR(prog, "find classifier_0 prog")) goto out; prog_fd = bpf_program__fd(prog); if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd")) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", NULL); if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) goto out; prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); if (!ASSERT_OK_PTR(prog, "find fentry prog")) goto out; err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0"); if (!ASSERT_OK(err, "set_attach_target classifier_0")) goto out; err = bpf_object__load(fentry_obj); if (!ASSERT_OK(err, "load fentry_obj")) goto out; fentry_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) goto out; err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall.bss map")) goto out; data_fd = bpf_map__fd(data_map); if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val, 34, "tailcall count"); data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall_bpf2bpf_fentry.bss map")) goto out; data_fd = bpf_map__fd(data_map); if (!ASSERT_FALSE(data_fd < 0, "find tailcall_bpf2bpf_fentry.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); ASSERT_OK(err, "fentry count"); ASSERT_EQ(val, 1, "fentry count"); out: bpf_link__destroy(fentry_link); bpf_object__close(fentry_obj); bpf_object__close(tgt_obj); } #define JMP_TABLE "/sys/fs/bpf/jmp_table" static int poke_thread_exit; static void *poke_update(void *arg) { __u32 zero = 0, prog1_fd, prog2_fd, map_fd; struct tailcall_poke *call = arg; map_fd = bpf_map__fd(call->maps.jmp_table); prog1_fd = bpf_program__fd(call->progs.call1); prog2_fd = bpf_program__fd(call->progs.call2); while (!poke_thread_exit) { bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY); bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY); } return NULL; } /* * We are trying to hit prog array update during another program load * that shares the same prog array map. * * For that we share the jmp_table map between two skeleton instances * by pinning the jmp_table to same path. Then first skeleton instance * periodically updates jmp_table in 'poke update' thread while we load * the second skeleton instance in the main thread. */ static void test_tailcall_poke(void) { struct tailcall_poke *call, *test; int err, cnt = 10; pthread_t thread; unlink(JMP_TABLE); call = tailcall_poke__open_and_load(); if (!ASSERT_OK_PTR(call, "tailcall_poke__open")) return; err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE); if (!ASSERT_OK(err, "bpf_map__pin")) goto out; err = pthread_create(&thread, NULL, poke_update, call); if (!ASSERT_OK(err, "new toggler")) goto out; while (cnt--) { test = tailcall_poke__open(); if (!ASSERT_OK_PTR(test, "tailcall_poke__open")) break; err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE); if (!ASSERT_OK(err, "bpf_map__pin")) { tailcall_poke__destroy(test); break; } bpf_program__set_autoload(test->progs.test, true); bpf_program__set_autoload(test->progs.call1, false); bpf_program__set_autoload(test->progs.call2, false); err = tailcall_poke__load(test); tailcall_poke__destroy(test); if (!ASSERT_OK(err, "tailcall_poke__load")) break; } poke_thread_exit = 1; ASSERT_OK(pthread_join(thread, NULL), "pthread_join"); out: bpf_map__unpin(call->maps.jmp_table, JMP_TABLE); tailcall_poke__destroy(call); } static void test_tailcall_hierarchy_count(const char *which, bool test_fentry, bool test_fexit, bool test_fentry_entry) { int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val; struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; struct bpf_link *fentry_link = NULL, *fexit_link = NULL; struct bpf_program *prog, *fentry_prog; struct bpf_map *prog_array, *data_map; int fentry_prog_fd; char buff[128] = {}; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (!ASSERT_OK(err, "load obj")) return; prog = bpf_object__find_program_by_name(obj, "entry"); if (!ASSERT_OK_PTR(prog, "find entry prog")) goto out; prog_fd = bpf_program__fd(prog); if (!ASSERT_GE(prog_fd, 0, "prog_fd")) goto out; if (test_fentry_entry) { fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o", NULL); if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) goto out; fentry_prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); if (!ASSERT_OK_PTR(prog, "find fentry prog")) goto out; err = bpf_program__set_attach_target(fentry_prog, prog_fd, "entry"); if (!ASSERT_OK(err, "set_attach_target entry")) goto out; err = bpf_object__load(fentry_obj); if (!ASSERT_OK(err, "load fentry_obj")) goto out; fentry_link = bpf_program__attach_trace(fentry_prog); if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) goto out; fentry_prog_fd = bpf_program__fd(fentry_prog); if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd")) goto out; prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table"); if (!ASSERT_OK_PTR(prog_array, "find jmp_table")) goto out; map_fd = bpf_map__fd(prog_array); if (!ASSERT_GE(map_fd, 0, "map_fd")) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find data_map")) goto out; } else { prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); if (!ASSERT_OK_PTR(prog_array, "find jmp_table")) goto out; map_fd = bpf_map__fd(prog_array); if (!ASSERT_GE(map_fd, 0, "map_fd")) goto out; i = 0; err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; data_map = bpf_object__find_map_by_name(obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find data_map")) goto out; } if (test_fentry) { fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", NULL); if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) goto out; prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); if (!ASSERT_OK_PTR(prog, "find fentry prog")) goto out; err = bpf_program__set_attach_target(prog, prog_fd, "subprog_tail"); if (!ASSERT_OK(err, "set_attach_target subprog_tail")) goto out; err = bpf_object__load(fentry_obj); if (!ASSERT_OK(err, "load fentry_obj")) goto out; fentry_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) goto out; } if (test_fexit) { fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o", NULL); if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file")) goto out; prog = bpf_object__find_program_by_name(fexit_obj, "fexit"); if (!ASSERT_OK_PTR(prog, "find fexit prog")) goto out; err = bpf_program__set_attach_target(prog, prog_fd, "subprog_tail"); if (!ASSERT_OK(err, "set_attach_target subprog_tail")) goto out; err = bpf_object__load(fexit_obj); if (!ASSERT_OK(err, "load fexit_obj")) goto out; fexit_link = bpf_program__attach_trace(prog); if (!ASSERT_OK_PTR(fexit_link, "attach_trace")) goto out; } err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); main_data_fd = bpf_map__fd(data_map); if (!ASSERT_GE(main_data_fd, 0, "main_data_fd")) goto out; i = 0; err = bpf_map_lookup_elem(main_data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val, 34, "tailcall count"); if (test_fentry) { data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall_bpf2bpf_fentry.bss map")) goto out; fentry_data_fd = bpf_map__fd(data_map); if (!ASSERT_GE(fentry_data_fd, 0, "find tailcall_bpf2bpf_fentry.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(fentry_data_fd, &i, &val); ASSERT_OK(err, "fentry count"); ASSERT_EQ(val, 68, "fentry count"); } if (test_fexit) { data_map = bpf_object__find_map_by_name(fexit_obj, ".bss"); if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), "find tailcall_bpf2bpf_fexit.bss map")) goto out; fexit_data_fd = bpf_map__fd(data_map); if (!ASSERT_GE(fexit_data_fd, 0, "find tailcall_bpf2bpf_fexit.bss map fd")) goto out; i = 0; err = bpf_map_lookup_elem(fexit_data_fd, &i, &val); ASSERT_OK(err, "fexit count"); ASSERT_EQ(val, 68, "fexit count"); } i = 0; err = bpf_map_delete_elem(map_fd, &i); if (!ASSERT_OK(err, "delete_elem from jmp_table")) goto out; err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "tailcall"); ASSERT_EQ(topts.retval, 1, "tailcall retval"); i = 0; err = bpf_map_lookup_elem(main_data_fd, &i, &val); ASSERT_OK(err, "tailcall count"); ASSERT_EQ(val, 35, "tailcall count"); if (test_fentry) { i = 0; err = bpf_map_lookup_elem(fentry_data_fd, &i, &val); ASSERT_OK(err, "fentry count"); ASSERT_EQ(val, 70, "fentry count"); } if (test_fexit) { i = 0; err = bpf_map_lookup_elem(fexit_data_fd, &i, &val); ASSERT_OK(err, "fexit count"); ASSERT_EQ(val, 70, "fexit count"); } out: bpf_link__destroy(fentry_link); bpf_link__destroy(fexit_link); bpf_object__close(fentry_obj); bpf_object__close(fexit_obj); bpf_object__close(obj); } /* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail * call limit enforcement matches with expectations when tailcalls are preceded * with two bpf2bpf calls. * * subprog --tailcall-> entry * entry < * subprog --tailcall-> entry */ static void test_tailcall_bpf2bpf_hierarchy_1(void) { test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", false, false, false); } /* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the * tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry. */ static void test_tailcall_bpf2bpf_hierarchy_fentry(void) { test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", true, false, false); } /* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail * call limit enforcement matches with expectations when tailcalls are preceded * with two bpf2bpf calls, and the two subprogs are traced by fexit. */ static void test_tailcall_bpf2bpf_hierarchy_fexit(void) { test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", false, true, false); } /* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of * the tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls, and the two subprogs are traced by both * fentry and fexit. */ static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void) { test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", true, true, false); } /* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of * the tail call limit enforcement matches with expectations when tailcalls are * preceded with two bpf2bpf calls in fentry. */ static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void) { test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true); } /* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail * call limit enforcement matches with expectations: * * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0 * entry < * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1 */ static void test_tailcall_bpf2bpf_hierarchy_2(void) { RUN_TESTS(tailcall_bpf2bpf_hierarchy2); } /* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail * call limit enforcement matches with expectations: * * subprog with jmp_table0 to classifier_0 * entry --tailcall-> classifier_0 < * subprog with jmp_table1 to classifier_0 */ static void test_tailcall_bpf2bpf_hierarchy_3(void) { RUN_TESTS(tailcall_bpf2bpf_hierarchy3); } /* test_tailcall_freplace checks that the freplace prog fails to update the * prog_array map, no matter whether the freplace prog attaches to its target. */ static void test_tailcall_freplace(void) { struct tailcall_freplace *freplace_skel = NULL; struct bpf_link *freplace_link = NULL; struct bpf_program *freplace_prog; struct tc_bpf2bpf *tc_skel = NULL; int prog_fd, tc_prog_fd, map_fd; char buff[128] = {}; int err, key; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); freplace_skel = tailcall_freplace__open(); if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) return; tc_skel = tc_bpf2bpf__open_and_load(); if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) goto out; tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); freplace_prog = freplace_skel->progs.entry_freplace; err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd, "subprog_tc"); if (!ASSERT_OK(err, "set_attach_target")) goto out; err = tailcall_freplace__load(freplace_skel); if (!ASSERT_OK(err, "tailcall_freplace__load")) goto out; map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); prog_fd = bpf_program__fd(freplace_prog); key = 0; err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); ASSERT_ERR(err, "update jmp_table failure"); freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd, "subprog_tc"); if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) goto out; err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); ASSERT_ERR(err, "update jmp_table failure"); out: bpf_link__destroy(freplace_link); tailcall_freplace__destroy(freplace_skel); tc_bpf2bpf__destroy(tc_skel); } /* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail * callee prog with freplace prog or fails to update an extended prog to * prog_array map. */ static void test_tailcall_bpf2bpf_freplace(void) { struct tailcall_freplace *freplace_skel = NULL; struct bpf_link *freplace_link = NULL; struct tc_bpf2bpf *tc_skel = NULL; char buff[128] = {}; int prog_fd, map_fd; int err, key; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = buff, .data_size_in = sizeof(buff), .repeat = 1, ); tc_skel = tc_bpf2bpf__open_and_load(); if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) goto out; prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); freplace_skel = tailcall_freplace__open(); if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) goto out; err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace, prog_fd, "subprog_tc"); if (!ASSERT_OK(err, "set_attach_target")) goto out; err = tailcall_freplace__load(freplace_skel); if (!ASSERT_OK(err, "tailcall_freplace__load")) goto out; /* OK to attach then detach freplace prog. */ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, prog_fd, "subprog_tc"); if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) goto out; err = bpf_link__destroy(freplace_link); if (!ASSERT_OK(err, "destroy link")) goto out; /* OK to update prog_array map then delete element from the map. */ key = 0; map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; err = bpf_map_delete_elem(map_fd, &key); if (!ASSERT_OK(err, "delete_elem from jmp_table")) goto out; /* Fail to attach a tail callee prog with freplace prog. */ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, prog_fd, "subprog_tc"); if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure")) goto out; err = bpf_map_delete_elem(map_fd, &key); if (!ASSERT_OK(err, "delete_elem from jmp_table")) goto out; /* Fail to update an extended prog to prog_array map. */ freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, prog_fd, "subprog_tc"); if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) goto out; err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); if (!ASSERT_ERR(err, "update jmp_table failure")) goto out; out: bpf_link__destroy(freplace_link); tailcall_freplace__destroy(freplace_skel); tc_bpf2bpf__destroy(tc_skel); } static void test_tailcall_failure() { RUN_TESTS(tailcall_fail); } void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) test_tailcall_1(); if (test__start_subtest("tailcall_2")) test_tailcall_2(); if (test__start_subtest("tailcall_3")) test_tailcall_3(); if (test__start_subtest("tailcall_4")) test_tailcall_4(); if (test__start_subtest("tailcall_5")) test_tailcall_5(); if (test__start_subtest("tailcall_6")) test_tailcall_6(); if (test__start_subtest("tailcall_bpf2bpf_1")) test_tailcall_bpf2bpf_1(); if (test__start_subtest("tailcall_bpf2bpf_2")) test_tailcall_bpf2bpf_2(); if (test__start_subtest("tailcall_bpf2bpf_3")) test_tailcall_bpf2bpf_3(); if (test__start_subtest("tailcall_bpf2bpf_4")) test_tailcall_bpf2bpf_4(false); if (test__start_subtest("tailcall_bpf2bpf_5")) test_tailcall_bpf2bpf_4(true); if (test__start_subtest("tailcall_bpf2bpf_6")) test_tailcall_bpf2bpf_6(); if (test__start_subtest("tailcall_bpf2bpf_fentry")) test_tailcall_bpf2bpf_fentry(); if (test__start_subtest("tailcall_bpf2bpf_fexit")) test_tailcall_bpf2bpf_fexit(); if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit")) test_tailcall_bpf2bpf_fentry_fexit(); if (test__start_subtest("tailcall_bpf2bpf_fentry_entry")) test_tailcall_bpf2bpf_fentry_entry(); if (test__start_subtest("tailcall_poke")) test_tailcall_poke(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1")) test_tailcall_bpf2bpf_hierarchy_1(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry")) test_tailcall_bpf2bpf_hierarchy_fentry(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit")) test_tailcall_bpf2bpf_hierarchy_fexit(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit")) test_tailcall_bpf2bpf_hierarchy_fentry_fexit(); if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry")) test_tailcall_bpf2bpf_hierarchy_fentry_entry(); test_tailcall_bpf2bpf_hierarchy_2(); test_tailcall_bpf2bpf_hierarchy_3(); if (test__start_subtest("tailcall_freplace")) test_tailcall_freplace(); if (test__start_subtest("tailcall_bpf2bpf_freplace")) test_tailcall_bpf2bpf_freplace(); if (test__start_subtest("tailcall_failure")) test_tailcall_failure(); }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2022 MediaTek Inc. */ #include <dt-bindings/clock/mediatek,mt8365-clk.h> #include <linux/clk-provider.h> #include <linux/platform_device.h> #include "clk-gate.h" #include "clk-mtk.h" static const struct mtk_gate_regs mfg0_cg_regs = { .set_ofs = 0x4, .clr_ofs = 0x8, .sta_ofs = 0x0, }; static const struct mtk_gate_regs mfg1_cg_regs = { .set_ofs = 0x280, .clr_ofs = 0x280, .sta_ofs = 0x280, }; #define GATE_MFG0(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &mfg0_cg_regs, _shift, \ &mtk_clk_gate_ops_setclr) #define GATE_MFG1(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &mfg1_cg_regs, _shift, \ &mtk_clk_gate_ops_no_setclr) static const struct mtk_gate mfg_clks[] = { /* MFG0 */ GATE_MFG0(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0), /* MFG1 */ GATE_MFG1(CLK_MFG_MBIST_DIAG, "mfg_mbist_diag", "mbist_diag_sel", 24), }; static const struct mtk_clk_desc mfg_desc = { .clks = mfg_clks, .num_clks = ARRAY_SIZE(mfg_clks), }; static const struct of_device_id of_match_clk_mt8365_mfg[] = { { .compatible = "mediatek,mt8365-mfgcfg", .data = &mfg_desc, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg); static struct platform_driver clk_mt8365_mfg_drv = { .probe = mtk_clk_simple_probe, .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt8365-mfg", .of_match_table = of_match_clk_mt8365_mfg, }, }; module_platform_driver(clk_mt8365_mfg_drv); MODULE_DESCRIPTION("MediaTek MT8365 GPU mfg clocks driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later /* * fs/inotify_user.c - inotify support for userspace * * Authors: * John McCutchan <[email protected]> * Robert Love <[email protected]> * * Copyright (C) 2005 John McCutchan * Copyright 2006 Hewlett-Packard Development Company, L.P. * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * inotify was largely rewriten to make use of the fsnotify infrastructure */ #include <linux/dcache.h> /* d_unlinked */ #include <linux/fs.h> /* struct inode */ #include <linux/fsnotify_backend.h> #include <linux/inotify.h> #include <linux/path.h> /* struct path */ #include <linux/slab.h> /* kmem_* */ #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/user.h> #include <linux/sched/mm.h> #include "inotify.h" /* * Check if 2 events contain the same information. */ static bool event_compare(struct fsnotify_event *old_fsn, struct fsnotify_event *new_fsn) { struct inotify_event_info *old, *new; old = INOTIFY_E(old_fsn); new = INOTIFY_E(new_fsn); if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && (old->wd == new->wd) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; return false; } static int inotify_merge(struct fsnotify_group *group, struct fsnotify_event *event) { struct list_head *list = &group->notification_list; struct fsnotify_event *last_event; last_event = list_entry(list->prev, struct fsnotify_event, list); return event_compare(last_event, event); } int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie) { struct inotify_inode_mark *i_mark; struct inotify_event_info *event; struct fsnotify_event *fsn_event; struct fsnotify_group *group = inode_mark->group; int ret; int len = 0, wd; int alloc_len = sizeof(struct inotify_event_info); struct mem_cgroup *old_memcg; if (name) { len = name->len; alloc_len += len + 1; } pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, mask); i_mark = container_of(inode_mark, struct inotify_inode_mark, fsn_mark); /* * We can be racing with mark being detached. Don't report event with * invalid wd. */ wd = READ_ONCE(i_mark->wd); if (wd == -1) return 0; /* * Whoever is interested in the event, pays for the allocation. Do not * trigger OOM killer in the target monitoring memcg as it may have * security repercussion. */ old_memcg = set_active_memcg(group->memcg); event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); set_active_memcg(old_memcg); if (unlikely(!event)) { /* * Treat lost event due to ENOMEM the same way as queue * overflow to let userspace know event was lost. */ fsnotify_queue_overflow(group); return -ENOMEM; } /* * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events * for fanotify. inotify never reported IN_ISDIR with those events. * It looks like an oversight, but to avoid the risk of breaking * existing inotify programs, mask the flag out from those events. */ if (mask & (IN_MOVE_SELF | IN_DELETE_SELF)) mask &= ~IN_ISDIR; fsn_event = &event->fse; fsnotify_init_event(fsn_event); event->mask = mask; event->wd = wd; event->sync_cookie = cookie; event->name_len = len; if (len) strcpy(event->name, name->name); ret = fsnotify_add_event(group, fsn_event, inotify_merge); if (ret) { /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); } if (inode_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT) fsnotify_destroy_mark(inode_mark, group); return 0; } static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { inotify_ignored_and_remove_idr(fsn_mark, group); } /* * This is NEVER supposed to be called. Inotify marks should either have been * removed from the idr when the watch was removed or in the * fsnotify_destroy_mark_by_group() call when the inotify instance was being * torn down. This is only called if the idr is about to be freed but there * are still marks in it. */ static int idr_callback(int id, void *p, void *data) { struct fsnotify_mark *fsn_mark; struct inotify_inode_mark *i_mark; static bool warned = false; if (warned) return 0; warned = true; fsn_mark = p; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " "idr. Probably leaking memory\n", id, p, data); /* * I'm taking the liberty of assuming that the mark in question is a * valid address and I'm dereferencing it. This might help to figure * out why we got here and the panic is no worse than the original * BUG() that was here. */ if (fsn_mark) printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n", fsn_mark->group, i_mark->wd); return 0; } static void inotify_free_group_priv(struct fsnotify_group *group) { /* ideally the idr is empty and we won't hit the BUG in the callback */ idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_destroy(&group->inotify_data.idr); if (group->inotify_data.ucounts) dec_inotify_instances(group->inotify_data.ucounts); } static void inotify_free_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { kfree(INOTIFY_E(fsn_event)); } /* ding dong the mark is dead */ static void inotify_free_mark(struct fsnotify_mark *fsn_mark) { struct inotify_inode_mark *i_mark; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); kmem_cache_free(inotify_inode_mark_cachep, i_mark); } const struct fsnotify_ops inotify_fsnotify_ops = { .handle_inode_event = inotify_handle_inode_event, .free_group_priv = inotify_free_group_priv, .free_event = inotify_free_event, .freeing_mark = inotify_freeing_mark, .free_mark = inotify_free_mark, };
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "defs.h" #include "user.h" #include "irdma.h" /** * irdma_set_fragment - set fragment in wqe * @wqe: wqe for setting fragment * @offset: offset value * @sge: sge length and stag * @valid: The wqe valid */ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_VALID, valid) | FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) | FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_VALID, valid)); } } /** * irdma_set_fragment_gen_1 - set fragment in wqe * @wqe: wqe for setting fragment * @offset: offset value * @sge: sge length and stag * @valid: wqe valid flag */ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) | FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, 0); } } /** * irdma_nop_1 - insert a NOP wqe * @qp: hw qp ptr */ static int irdma_nop_1(struct irdma_qp_uk *qp) { u64 hdr; __le64 *wqe; u32 wqe_idx; bool signaled = false; if (!qp->sq_ring.head) return -EINVAL; wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); wqe = qp->sq_base[wqe_idx].elem; qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); /* make sure WQE is written before valid bit is set */ dma_wmb(); set_64bit_val(wqe, 24, hdr); return 0; } /** * irdma_clr_wqes - clear next 128 sq entries * @qp: hw qp ptr * @qp_wqe_idx: wqe_idx */ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) { struct irdma_qp_quanta *sq; u32 wqe_idx; if (!(qp_wqe_idx & 0x7F)) { wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; sq = qp->sq_base + wqe_idx; if (wqe_idx) memset(sq, qp->swqe_polarity ? 0 : 0xFF, 128 * sizeof(*sq)); else memset(sq, qp->swqe_polarity ? 0xFF : 0, 128 * sizeof(*sq)); } } /** * irdma_uk_qp_post_wr - ring doorbell * @qp: hw qp ptr */ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) { u64 temp; u32 hw_sq_tail; u32 sw_sq_head; /* valid bit is written and loads completed before reading shadow */ mb(); /* read the doorbell shadow area */ get_64bit_val(qp->shadow_area, 0, &temp); hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (sw_sq_head != qp->initial_ring.head) { if (sw_sq_head != hw_sq_tail) { if (sw_sq_head > qp->initial_ring.head) { if (hw_sq_tail >= qp->initial_ring.head && hw_sq_tail < sw_sq_head) writel(qp->qp_id, qp->wqe_alloc_db); } else { if (hw_sq_tail >= qp->initial_ring.head || hw_sq_tail < sw_sq_head) writel(qp->qp_id, qp->wqe_alloc_db); } } } qp->initial_ring.head = qp->sq_ring.head; } /** * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go * @qp: hw qp ptr * @wqe_idx: return wqe index * @quanta: size of WR in quanta * @total_size: size of WR in bytes * @info: info on WR */ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 quanta, u32 total_size, struct irdma_post_sq_info *info) { __le64 *wqe; __le64 *wqe_0 = NULL; u16 avail_quanta; u16 i; avail_quanta = qp->uk_attrs->max_hw_sq_chunk - (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) % qp->uk_attrs->max_hw_sq_chunk); if (quanta <= avail_quanta) { /* WR fits in current chunk */ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) return NULL; } else { /* Need to pad with NOP */ if (quanta + avail_quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) return NULL; for (i = 0; i < avail_quanta; i++) { irdma_nop_1(qp); IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); } } *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (!*wqe_idx) qp->swqe_polarity = !qp->swqe_polarity; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); wqe = qp->sq_base[*wqe_idx].elem; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 && (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) { wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem; wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1)); } qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; return wqe; } /** * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe * @qp: hw qp ptr * @wqe_idx: return wqe index */ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) { __le64 *wqe; int ret_code; if (IRDMA_RING_FULL_ERR(qp->rq_ring)) return NULL; IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); if (ret_code) return NULL; if (!*wqe_idx) qp->rwqe_polarity = !qp->rwqe_polarity; /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; return wqe; } /** * irdma_uk_rdma_write - rdma write operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { u64 hdr; __le64 *wqe; struct irdma_rdma_write *op_info; u32 i, wqe_idx; u32 total_size = 0, byte_off; int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; op_info = &info->op.rdma_write; if (op_info->num_lo_sges > qp->max_sq_frag_cnt) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; read_fence |= info->read_fence; if (info->imm_data_valid) frag_cnt = op_info->num_lo_sges + 1; else frag_cnt = op_info->num_lo_sges; addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); if (info->imm_data_valid) { set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); i = 0; } else { qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list, qp->swqe_polarity); i = 1; } for (byte_off = 32; i < op_info->num_lo_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && frag_cnt) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_rdma_read - rdma read command * @qp: hw qp ptr * @info: post sq information * @inv_stag: flag for inv_stag * @post_sq: flag to post sq */ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool inv_stag, bool post_sq) { struct irdma_rdma_read *op_info; int ret_code; u32 i, byte_off, total_size = 0; bool local_fence = false; u32 addl_frag_cnt; __le64 *wqe; u32 wqe_idx; u16 quanta; u64 hdr; op_info = &info->op.rdma_read; if (qp->max_sq_frag_cnt < op_info->num_lo_sges) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0; local_fence |= info->local_fence; qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list, qp->swqe_polarity); for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_OPCODE, (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_send - rdma send command * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_post_send *op_info; u64 hdr; u32 i, wqe_idx, total_size = 0, byte_off; int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; op_info = &info->op.send; if (qp->max_sq_frag_cnt < op_info->num_sges) return -EINVAL; for (i = 0; i < op_info->num_sges; i++) total_size += op_info->sg_list[i].length; if (info->imm_data_valid) frag_cnt = op_info->num_sges + 1; else frag_cnt = op_info->num_sges; ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta); if (ret_code) return ret_code; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); read_fence |= info->read_fence; addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; if (info->imm_data_valid) { set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); i = 0; } else { qp->wqe_ops.iw_set_fragment(wqe, 0, frag_cnt ? op_info->sg_list : NULL, qp->swqe_polarity); i = 1; } for (byte_off = 32; i < op_info->num_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i], qp->swqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) && frag_cnt) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->swqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe * @wqe: wqe for setting fragment * @op_info: info for setting bind wqe values */ static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe, struct irdma_bind_window *op_info) { set_64bit_val(wqe, 0, (uintptr_t)op_info->va); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) | FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag)); set_64bit_val(wqe, 16, op_info->bind_len); } /** * irdma_copy_inline_data_gen_1 - Copy inline data to wqe * @wqe: pointer to wqe * @sge_list: table of pointers to inline data * @num_sges: Total inline data length * @polarity: compatibility parameter */ static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) { u32 quanta_bytes_remaining = 16; int i; for (i = 0; i < num_sges; i++) { u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; u32 sge_len = sge_list[i].length; while (sge_len) { u32 bytes_copied; bytes_copied = min(sge_len, quanta_bytes_remaining); memcpy(wqe, cur_sge, bytes_copied); wqe += bytes_copied; cur_sge += bytes_copied; quanta_bytes_remaining -= bytes_copied; sge_len -= bytes_copied; if (!quanta_bytes_remaining) { /* Remaining inline bytes reside after hdr */ wqe += 16; quanta_bytes_remaining = 32; } } } } /** * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta * @data_size: data size for inline * * Gets the quanta based on inline and immediate data. */ static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) { return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2; } /** * irdma_set_mw_bind_wqe - set mw bind in wqe * @wqe: wqe for setting mw bind * @op_info: info for setting wqe values */ static void irdma_set_mw_bind_wqe(__le64 *wqe, struct irdma_bind_window *op_info) { set_64bit_val(wqe, 0, (uintptr_t)op_info->va); set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) | FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag)); set_64bit_val(wqe, 16, op_info->bind_len); } /** * irdma_copy_inline_data - Copy inline data to wqe * @wqe: pointer to wqe * @sge_list: table of pointers to inline data * @num_sges: number of SGE's * @polarity: polarity of wqe valid bit */ static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) { u8 inline_valid = polarity << IRDMA_INLINE_VALID_S; u32 quanta_bytes_remaining = 8; bool first_quanta = true; int i; wqe += 8; for (i = 0; i < num_sges; i++) { u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr; u32 sge_len = sge_list[i].length; while (sge_len) { u32 bytes_copied; bytes_copied = min(sge_len, quanta_bytes_remaining); memcpy(wqe, cur_sge, bytes_copied); wqe += bytes_copied; cur_sge += bytes_copied; quanta_bytes_remaining -= bytes_copied; sge_len -= bytes_copied; if (!quanta_bytes_remaining) { quanta_bytes_remaining = 31; /* Remaining inline bytes reside after hdr */ if (first_quanta) { first_quanta = false; wqe += 16; } else { *wqe = inline_valid; wqe++; } } } } if (!first_quanta && quanta_bytes_remaining < 31) *(wqe + quanta_bytes_remaining) = inline_valid; } /** * irdma_inline_data_size_to_quanta - based on inline data, quanta * @data_size: data size for inline * * Gets the quanta based on inline and immediate data. */ static u16 irdma_inline_data_size_to_quanta(u32 data_size) { if (data_size <= 8) return IRDMA_QP_WQE_MIN_QUANTA; else if (data_size <= 39) return 2; else if (data_size <= 70) return 3; else if (data_size <= 101) return 4; else if (data_size <= 132) return 5; else if (data_size <= 163) return 6; else if (data_size <= 194) return 7; else return 8; } /** * irdma_uk_inline_rdma_write - inline rdma write operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_rdma_write *op_info; u64 hdr = 0; u32 wqe_idx; bool read_fence = false; u32 i, total_size = 0; u16 quanta; op_info = &info->op.rdma_write; if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges)) return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); read_fence |= info->read_fence; set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); if (info->imm_data_valid) set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list, op_info->num_lo_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_inline_send - inline send operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_post_send *op_info; u64 hdr; u32 wqe_idx; bool read_fence = false; u32 i, total_size = 0; u16 quanta; op_info = &info->op.send; if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges)) return -EINVAL; for (i = 0; i < op_info->num_sges; i++) total_size += op_info->sg_list[i].length; if (unlikely(total_size > qp->max_inline_data)) return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 16, FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) | FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp)); read_fence |= info->read_fence; hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) | FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, (info->imm_data_valid ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) | FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); if (info->imm_data_valid) set_64bit_val(wqe, 0, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list, op_info->num_sges, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_stag_local_invalidate - stag invalidate operation * @qp: hw qp ptr * @info: post sq information * @post_sq: flag to post sq */ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_inv_local_stag *op_info; u64 hdr; u32 wqe_idx; bool local_fence = false; struct ib_sge sge = {}; op_info = &info->op.inv_local_stag; local_fence = info->local_fence; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); sge.lkey = op_info->target_stag; qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) | FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_uk_post_receive - post receive wqe * @qp: hw qp ptr * @info: post rq information */ int irdma_uk_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info) { u32 wqe_idx, i, byte_off; u32 addl_frag_cnt; __le64 *wqe; u64 hdr; if (qp->max_rq_frag_cnt < info->num_sges) return -EINVAL; wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); if (!wqe) return -ENOMEM; qp->rq_wrid_array[wqe_idx] = info->wr_id; addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0; qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list, qp->rwqe_polarity); for (i = 1, byte_off = 32; i < info->num_sges; i++) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i], qp->rwqe_polarity); byte_off += 16; } /* if not an odd number set valid bit in next fragment */ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) && info->num_sges) { qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, qp->rwqe_polarity); if (qp->uk_attrs->hw_rev == IRDMA_GEN_2) ++addl_frag_cnt; } set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); return 0; } /** * irdma_uk_cq_resize - reset the cq buffer info * @cq: cq to resize * @cq_base: new cq buffer addr * @cq_size: number of cqes */ void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) { cq->cq_base = cq_base; cq->cq_size = cq_size; IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); cq->polarity = 1; } /** * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers * @cq: cq to resize * @cq_cnt: the count of the resized cq buffers */ void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt) { u64 temp_val; u16 sw_cq_sel; u8 arm_next_se; u8 arm_next; u8 arm_seq_num; get_64bit_val(cq->shadow_area, 32, &temp_val); sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); sw_cq_sel += cq_cnt; arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val); temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); set_64bit_val(cq->shadow_area, 32, temp_val); } /** * irdma_uk_cq_request_notification - cq notification request (door bell) * @cq: hw cq * @cq_notify: notification type */ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify) { u64 temp_val; u16 sw_cq_sel; u8 arm_next_se = 0; u8 arm_next = 0; u8 arm_seq_num; get_64bit_val(cq->shadow_area, 32, &temp_val); arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val); arm_seq_num++; sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); arm_next_se |= 1; if (cq_notify == IRDMA_CQ_COMPL_EVENT) arm_next = 1; temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next); set_64bit_val(cq->shadow_area, 32, temp_val); dma_wmb(); /* make sure WQE is populated before valid bit is set */ writel(cq->cq_id, cq->cqe_alloc_db); } /** * irdma_uk_cq_poll_cmpl - get cq completion info * @cq: hw cq * @info: cq poll information returned */ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) { u64 comp_ctx, qword0, qword2, qword3; __le64 *cqe; struct irdma_qp_uk *qp; struct irdma_ring *pring = NULL; u32 wqe_idx; int ret_code; bool move_cq_head = true; u8 polarity; bool ext_valid; __le64 *ext_cqe; if (cq->avoid_mem_cflct) cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq); else cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); if (polarity != cq->polarity) return -ENOENT; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); if (ext_valid) { u64 qword6, qword7; u32 peek_head; if (cq->avoid_mem_cflct) { ext_cqe = (__le64 *)((u8 *)cqe + 32); get_64bit_val(ext_cqe, 24, &qword7); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); } else { peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size; ext_cqe = cq->cq_base[peek_head].buf; get_64bit_val(ext_cqe, 24, &qword7); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7); if (!peek_head) polarity ^= 1; } if (polarity != cq->polarity) return -ENOENT; /* Ensure ext CQE contents are read after ext valid bit is checked */ dma_rmb(); info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7); if (info->imm_valid) { u64 qword4; get_64bit_val(ext_cqe, 0, &qword4); info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4); } info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7); info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7); if (info->ud_smac_valid || info->ud_vlan_valid) { get_64bit_val(ext_cqe, 16, &qword6); if (info->ud_vlan_valid) info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6); if (info->ud_smac_valid) { info->ud_smac[5] = qword6 & 0xFF; info->ud_smac[4] = (qword6 >> 8) & 0xFF; info->ud_smac[3] = (qword6 >> 16) & 0xFF; info->ud_smac[2] = (qword6 >> 24) & 0xFF; info->ud_smac[1] = (qword6 >> 32) & 0xFF; info->ud_smac[0] = (qword6 >> 40) & 0xFF; } } } else { info->imm_valid = false; info->ud_smac_valid = false; info->ud_vlan_valid = false; } info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3); info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3); info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3); if (info->error) { info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3); info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3); if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) { info->comp_status = IRDMA_COMPL_STATUS_FLUSHED; /* Set the min error to standard flush error code for remaining cqes */ if (info->minor_err != FLUSH_GENERAL_ERR) { qword3 &= ~IRDMA_CQ_MINERR; qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR); set_64bit_val(cqe, 24, qword3); } } else { info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN; } } else { info->comp_status = IRDMA_COMPL_STATUS_SUCCESS; } get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 16, &qword2); info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0); info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); get_64bit_val(cqe, 8, &comp_ctx); info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3); qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; if (!qp || qp->destroy_pending) { ret_code = -EFAULT; goto exit; } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); info->qp_handle = (irdma_qp_handle)(unsigned long)qp; info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); if (info->q_type == IRDMA_CQE_QTYPE_RQ) { u32 array_idx; array_idx = wqe_idx / qp->rq_wqe_size_multiplier; if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED || info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) { if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) { ret_code = -ENOENT; goto exit; } info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; array_idx = qp->rq_ring.tail; } else { info->wr_id = qp->rq_wrid_array[array_idx]; } info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0); if (qword3 & IRDMACQ_STAG) { info->stag_invalid_set = true; info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); } else { info->stag_invalid_set = false; } IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1); if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) { qp->rq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) qp->rq_flush_complete = true; else move_cq_head = false; } pring = &qp->rq_ring; } else { /* q_type is IRDMA_CQE_QTYPE_SQ */ if (qp->first_sq_wq) { if (wqe_idx + 1 >= qp->conn_wqes) qp->first_sq_wq = false; if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); IRDMA_RING_MOVE_TAIL(cq->cq_ring); set_64bit_val(cq->shadow_area, 0, IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); memset(info, 0, sizeof(struct irdma_cq_poll_info)); return irdma_uk_cq_poll_cmpl(cq, info); } } if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) { info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; if (!info->comp_status) info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); IRDMA_RING_SET_TAIL(qp->sq_ring, wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); } else { if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) { ret_code = -ENOENT; goto exit; } do { __le64 *sw_wqe; u64 wqe_qword; u32 tail; tail = qp->sq_ring.tail; sw_wqe = qp->sq_base[tail].elem; get_64bit_val(sw_wqe, 24, &wqe_qword); info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); IRDMA_RING_SET_TAIL(qp->sq_ring, tail + qp->sq_wrtrk_array[tail].quanta); if (info->op_type != IRDMAQP_OP_NOP) { info->wr_id = qp->sq_wrtrk_array[tail].wrid; info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; break; } } while (1); if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) info->minor_err = FLUSH_MW_BIND_ERR; qp->sq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) qp->sq_flush_complete = true; } pring = &qp->sq_ring; } ret_code = 0; exit: if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) if (pring && IRDMA_RING_MORE_WORK(*pring)) move_cq_head = false; if (move_cq_head) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) cq->polarity ^= 1; if (ext_valid && !cq->avoid_mem_cflct) { IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring)) cq->polarity ^= 1; } IRDMA_RING_MOVE_TAIL(cq->cq_ring); if (!cq->avoid_mem_cflct && ext_valid) IRDMA_RING_MOVE_TAIL(cq->cq_ring); set_64bit_val(cq->shadow_area, 0, IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); } else { qword3 &= ~IRDMA_CQ_WQEIDX; qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail); set_64bit_val(cqe, 24, qword3); } return ret_code; } /** * irdma_qp_round_up - return round up qp wq depth * @wqdepth: wq depth in quanta to round up */ static int irdma_qp_round_up(u32 wqdepth) { int scount = 1; for (wqdepth--; scount <= 16; scount *= 2) wqdepth |= wqdepth >> scount; return ++wqdepth; } /** * irdma_get_wqe_shift - get shift count for maximum wqe size * @uk_attrs: qp HW attributes * @sge: Maximum Scatter Gather Elements wqe * @inline_data: Maximum inline data size * @shift: Returns the shift needed based on sge * * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe * size of 64 bytes). * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe * size of 256 bytes). */ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, u32 inline_data, u8 *shift) { *shift = 0; if (uk_attrs->hw_rev >= IRDMA_GEN_2) { if (sge > 1 || inline_data > 8) { if (sge < 4 && inline_data <= 39) *shift = 1; else if (sge < 8 && inline_data <= 101) *shift = 2; else *shift = 3; } } else if (sge > 1 || inline_data > 16) { *shift = (sge < 4 && inline_data <= 48) ? 1 : 2; } } /* * irdma_get_sqdepth - get SQ depth (quanta) * @uk_attrs: qp HW attributes * @sq_size: SQ size * @shift: shift which determines size of WQE * @sqdepth: depth of SQ * */ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth) { u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD); if (*sqdepth < min_size) *sqdepth = min_size; else if (*sqdepth > uk_attrs->max_hw_wq_quanta) return -EINVAL; return 0; } /* * irdma_get_rqdepth - get RQ depth (quanta) * @uk_attrs: qp HW attributes * @rq_size: RQ size * @shift: shift which determines size of WQE * @rqdepth: depth of RQ */ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth) { u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift; *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD); if (*rqdepth < min_size) *rqdepth = min_size; else if (*rqdepth > uk_attrs->max_hw_rq_quanta) return -EINVAL; return 0; } static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = { .iw_copy_inline_data = irdma_copy_inline_data, .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta, .iw_set_fragment = irdma_set_fragment, .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe, }; static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = { .iw_copy_inline_data = irdma_copy_inline_data_gen_1, .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1, .iw_set_fragment = irdma_set_fragment_gen_1, .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1, }; /** * irdma_setup_connection_wqes - setup WQEs necessary to complete * connection. * @qp: hw qp (user and kernel) * @info: qp initialization info */ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { u16 move_cnt = 1; if (!info->legacy_mode && (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)) move_cnt = 3; qp->conn_wqes = move_cnt; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt); } /** * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ * @ukinfo: qp initialization info * @sq_shift: Returns shift of SQ * @rq_shift: Returns shift of RQ */ void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, u8 *rq_shift) { bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; irdma_get_wqe_shift(ukinfo->uk_attrs, imm_support ? ukinfo->max_sq_frag_cnt + 1 : ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, sq_shift); irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, rq_shift); if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { if (ukinfo->abi_ver > 4) *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; } } /** * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size. * @ukinfo: qp initialization info * @sq_depth: Returns depth of SQ * @sq_shift: Returns shift of SQ */ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, u32 *sq_depth, u8 *sq_shift) { bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; int status; irdma_get_wqe_shift(ukinfo->uk_attrs, imm_support ? ukinfo->max_sq_frag_cnt + 1 : ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, sq_shift); status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size, *sq_shift, sq_depth); return status; } /** * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size. * @ukinfo: qp initialization info * @rq_depth: Returns depth of RQ * @rq_shift: Returns shift of RQ */ int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, u32 *rq_depth, u8 *rq_shift) { int status; irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, rq_shift); if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { if (ukinfo->abi_ver > 4) *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; } status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size, *rq_shift, rq_depth); return status; } /** * irdma_uk_qp_init - initialize shared qp * @qp: hw qp (user and kernel) * @info: qp initialization info * * initializes the vars used in both user and kernel mode. * size of the wqe depends on numbers of max. fragements * allowed. Then size of wqe * the number of wqes should be the * amount of memory allocated for sq and rq. */ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { int ret_code = 0; u32 sq_ring_size; qp->uk_attrs = info->uk_attrs; if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) return -EINVAL; qp->qp_caps = info->qp_caps; qp->sq_base = info->sq; qp->rq_base = info->rq; qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP; qp->shadow_area = info->shadow_area; qp->sq_wrtrk_array = info->sq_wrtrk_array; qp->rq_wrid_array = info->rq_wrid_array; qp->wqe_alloc_db = info->wqe_alloc_db; qp->qp_id = info->qp_id; qp->sq_size = info->sq_size; qp->max_sq_frag_cnt = info->max_sq_frag_cnt; sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); if (info->first_sq_wq) { irdma_setup_connection_wqes(qp, info); qp->swqe_polarity = 1; qp->first_sq_wq = true; } else { qp->swqe_polarity = 0; } qp->swqe_polarity_deferred = 1; qp->rwqe_polarity = 0; qp->rq_size = info->rq_size; qp->max_rq_frag_cnt = info->max_rq_frag_cnt; qp->max_inline_data = info->max_inline_data; qp->rq_wqe_size = info->rq_shift; IRDMA_RING_INIT(qp->rq_ring, qp->rq_size); qp->rq_wqe_size_multiplier = 1 << info->rq_shift; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) qp->wqe_ops = iw_wqe_uk_ops_gen_1; else qp->wqe_ops = iw_wqe_uk_ops; return ret_code; } /** * irdma_uk_cq_init - initialize shared cq (user and kernel) * @cq: hw cq * @info: hw cq initialization info */ void irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info) { cq->cq_base = info->cq_base; cq->cq_id = info->cq_id; cq->cq_size = info->cq_size; cq->cqe_alloc_db = info->cqe_alloc_db; cq->cq_ack_db = info->cq_ack_db; cq->shadow_area = info->shadow_area; cq->avoid_mem_cflct = info->avoid_mem_cflct; IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); cq->polarity = 1; } /** * irdma_uk_clean_cq - clean cq entries * @q: completion context * @cq: cq to clean */ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) { __le64 *cqe; u64 qword3, comp_ctx; u32 cq_head; u8 polarity, temp; cq_head = cq->cq_ring.head; temp = cq->polarity; do { if (cq->avoid_mem_cflct) cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf; else cqe = cq->cq_base[cq_head].buf; get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); if (polarity != temp) break; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); get_64bit_val(cqe, 8, &comp_ctx); if ((void *)(unsigned long)comp_ctx == q) set_64bit_val(cqe, 8, 0); cq_head = (cq_head + 1) % cq->cq_ring.size; if (!cq_head) temp ^= 1; } while (true); } /** * irdma_nop - post a nop * @qp: hw qp ptr * @wr_id: work request id * @signaled: signaled for completion * @post_sq: ring doorbell */ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) { __le64 *wqe; u64 hdr; u32 wqe_idx; struct irdma_post_sq_info info = {}; info.wr_id = wr_id; wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &info); if (!wqe) return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); set_64bit_val(wqe, 16, 0); hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) | FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); dma_wmb(); /* make sure WQE is populated before valid bit is set */ set_64bit_val(wqe, 24, hdr); if (post_sq) irdma_uk_qp_post_wr(qp); return 0; } /** * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ * @frag_cnt: number of fragments * @quanta: quanta for frag_cnt */ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) { switch (frag_cnt) { case 0: case 1: *quanta = IRDMA_QP_WQE_MIN_QUANTA; break; case 2: case 3: *quanta = 2; break; case 4: case 5: *quanta = 3; break; case 6: case 7: *quanta = 4; break; case 8: case 9: *quanta = 5; break; case 10: case 11: *quanta = 6; break; case 12: case 13: *quanta = 7; break; case 14: case 15: /* when immediate data is present */ *quanta = 8; break; default: return -EINVAL; } return 0; } /** * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ * @frag_cnt: number of fragments * @wqe_size: size in bytes given frag_cnt */ int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) { switch (frag_cnt) { case 0: case 1: *wqe_size = 32; break; case 2: case 3: *wqe_size = 64; break; case 4: case 5: case 6: case 7: *wqe_size = 128; break; case 8: case 9: case 10: case 11: case 12: case 13: case 14: *wqe_size = 256; break; default: return -EINVAL; } return 0; }
/* * Shared interrupt handling code for IPR and INTC2 types of IRQs. * * Copyright (C) 2007, 2008 Magnus Damm * Copyright (C) 2009, 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/spinlock.h> #include "internals.h" static unsigned long ack_handle[INTC_NR_IRQS]; static intc_enum __init intc_grp_id(struct intc_desc *desc, intc_enum enum_id) { struct intc_group *g = desc->hw.groups; unsigned int i, j; for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { g = desc->hw.groups + i; for (j = 0; g->enum_ids[j]; j++) { if (g->enum_ids[j] != enum_id) continue; return g->enum_id; } } return 0; } static unsigned int __init _intc_mask_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, unsigned int *reg_idx, unsigned int *fld_idx) { struct intc_mask_reg *mr = desc->hw.mask_regs; unsigned int fn, mode; unsigned long reg_e, reg_d; while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { mr = desc->hw.mask_regs + *reg_idx; for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { if (mr->enum_ids[*fld_idx] != enum_id) continue; if (mr->set_reg && mr->clr_reg) { fn = REG_FN_WRITE_BASE; mode = MODE_DUAL_REG; reg_e = mr->clr_reg; reg_d = mr->set_reg; } else { fn = REG_FN_MODIFY_BASE; if (mr->set_reg) { mode = MODE_ENABLE_REG; reg_e = mr->set_reg; reg_d = mr->set_reg; } else { mode = MODE_MASK_REG; reg_e = mr->clr_reg; reg_d = mr->clr_reg; } } fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - *fld_idx); } *fld_idx = 0; (*reg_idx)++; } return 0; } unsigned int __init intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int do_grps) { unsigned int i = 0; unsigned int j = 0; unsigned int ret; ret = _intc_mask_data(desc, d, enum_id, &i, &j); if (ret) return ret; if (do_grps) return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); return 0; } static unsigned int __init _intc_prio_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, unsigned int *reg_idx, unsigned int *fld_idx) { struct intc_prio_reg *pr = desc->hw.prio_regs; unsigned int fn, n, mode, bit; unsigned long reg_e, reg_d; while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { pr = desc->hw.prio_regs + *reg_idx; for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { if (pr->enum_ids[*fld_idx] != enum_id) continue; if (pr->set_reg && pr->clr_reg) { fn = REG_FN_WRITE_BASE; mode = MODE_PCLR_REG; reg_e = pr->set_reg; reg_d = pr->clr_reg; } else { fn = REG_FN_MODIFY_BASE; mode = MODE_PRIO_REG; if (!pr->set_reg) BUG(); reg_e = pr->set_reg; reg_d = pr->set_reg; } fn += (pr->reg_width >> 3) - 1; n = *fld_idx + 1; BUG_ON(n * pr->field_width > pr->reg_width); bit = pr->reg_width - (n * pr->field_width); return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), pr->field_width, bit); } *fld_idx = 0; (*reg_idx)++; } return 0; } unsigned int __init intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int do_grps) { unsigned int i = 0; unsigned int j = 0; unsigned int ret; ret = _intc_prio_data(desc, d, enum_id, &i, &j); if (ret) return ret; if (do_grps) return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); return 0; } static unsigned int intc_ack_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_mask_reg *mr = desc->hw.ack_regs; unsigned int i, j, fn, mode; unsigned long reg_e, reg_d; for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { mr = desc->hw.ack_regs + i; for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { if (mr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; mode = MODE_ENABLE_REG; reg_e = mr->set_reg; reg_d = mr->set_reg; fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - j); } } return 0; } static void intc_enable_disable(struct intc_desc_int *d, unsigned long handle, int do_enable) { unsigned long addr; unsigned int cpu; unsigned long (*fn)(unsigned long, unsigned long, unsigned long (*)(unsigned long, unsigned long, unsigned long), unsigned int); if (do_enable) { for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); } } else { for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); fn = intc_disable_fns[_INTC_MODE(handle)]; fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); } } } void __init intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int enable) { unsigned int i, j, data; /* go through and enable/disable all mask bits */ i = j = 0; do { data = _intc_mask_data(desc, d, enum_id, &i, &j); if (data) intc_enable_disable(d, data, enable); j++; } while (data); /* go through and enable/disable all priority fields */ i = j = 0; do { data = _intc_prio_data(desc, d, enum_id, &i, &j); if (data) intc_enable_disable(d, data, enable); j++; } while (data); } unsigned int __init intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_sense_reg *sr = desc->hw.sense_regs; unsigned int i, j, fn, bit; for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { sr = desc->hw.sense_regs + i; for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { if (sr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; fn += (sr->reg_width >> 3) - 1; BUG_ON((j + 1) * sr->field_width > sr->reg_width); bit = sr->reg_width - ((j + 1) * sr->field_width); return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), 0, sr->field_width, bit); } } return 0; } void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, struct intc_desc_int *d, intc_enum id) { unsigned long flags; /* * Nothing to do for this IRQ. */ if (!desc->hw.ack_regs) return; raw_spin_lock_irqsave(&intc_big_lock, flags); ack_handle[irq] = intc_ack_data(desc, d, id); raw_spin_unlock_irqrestore(&intc_big_lock, flags); } unsigned long intc_get_ack_handle(unsigned int irq) { return ack_handle[irq]; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh */ #include <linux/bits.h> #include <linux/can/dev.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/units.h> #define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ) #define ACC_I2C_ADDON_DETECT_DELAY_MS 10 /* esdACC Overview Module */ #define ACC_OV_OF_PROBE 0x0000 #define ACC_OV_OF_VERSION 0x0004 #define ACC_OV_OF_INFO 0x0008 #define ACC_OV_OF_CANCORE_FREQ 0x000c #define ACC_OV_OF_TS_FREQ_LO 0x0010 #define ACC_OV_OF_TS_FREQ_HI 0x0014 #define ACC_OV_OF_IRQ_STATUS_CORES 0x0018 #define ACC_OV_OF_TS_CURR_LO 0x001c #define ACC_OV_OF_TS_CURR_HI 0x0020 #define ACC_OV_OF_IRQ_STATUS 0x0028 #define ACC_OV_OF_MODE 0x002c #define ACC_OV_OF_BM_IRQ_COUNTER 0x0070 #define ACC_OV_OF_BM_IRQ_MASK 0x0074 #define ACC_OV_OF_MSI_DATA 0x0080 #define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084 /* Feature flags are contained in the upper 16 bit of the version * register at ACC_OV_OF_VERSION but only used with these masks after * extraction into an extra variable => (xx - 16). */ #define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16) #define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16) #define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16) #define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0) #define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1) #define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2) #define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4) #define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5) #define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6) #define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4) #define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7) #define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11) #define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14) #define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15) #define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31) /* esdACC CAN Core Module */ #define ACC_CORE_OF_CTRL 0x0000 #define ACC_CORE_OF_STATUS_IRQ 0x0008 #define ACC_CORE_OF_BRP 0x000c #define ACC_CORE_OF_BTR 0x0010 #define ACC_CORE_OF_FBTR 0x0014 #define ACC_CORE_OF_STATUS 0x0030 #define ACC_CORE_OF_TXFIFO_CONFIG 0x0048 #define ACC_CORE_OF_TXFIFO_STATUS 0x004c #define ACC_CORE_OF_TX_STATUS_IRQ 0x0050 #define ACC_CORE_OF_TX_ABORT_MASK 0x0054 #define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070 #define ACC_CORE_OF_TXFIFO_ID 0x00c0 #define ACC_CORE_OF_TXFIFO_DLC 0x00c4 #define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8 #define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc /* CTRL register layout */ #define ACC_REG_CTRL_MASK_RESETMODE BIT(0) #define ACC_REG_CTRL_MASK_LOM BIT(1) #define ACC_REG_CTRL_MASK_STM BIT(2) #define ACC_REG_CTRL_MASK_TRANSEN BIT(5) #define ACC_REG_CTRL_MASK_TS BIT(6) #define ACC_REG_CTRL_MASK_SCHEDULE BIT(7) #define ACC_REG_CTRL_MASK_IE_RXTX BIT(8) #define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9) #define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10) #define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11) #define ACC_REG_CTRL_MASK_IE_TSI BIT(12) #define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13) #define ACC_REG_CTRL_MASK_IE_ALI BIT(14) #define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15) /* BRP and BTR register layout for CAN-Classic version */ #define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0) #define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0) #define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16) #define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24) /* BRP and BTR register layout for CAN-FD version */ #define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0) #define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0) #define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16) #define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24) /* 256 BM_MSGs of 32 byte size */ #define ACC_CORE_DMAMSG_SIZE 32U #define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE) enum acc_bmmsg_id { BM_MSG_ID_RXTXDONE = 0x01, BM_MSG_ID_TXABORT = 0x02, BM_MSG_ID_OVERRUN = 0x03, BM_MSG_ID_BUSERR = 0x04, BM_MSG_ID_ERRPASSIVE = 0x05, BM_MSG_ID_ERRWARN = 0x06, BM_MSG_ID_TIMESLICE = 0x07, BM_MSG_ID_HWTIMER = 0x08, BM_MSG_ID_HOTPLUG = 0x09, }; /* The struct acc_bmmsg_* structure declarations that follow here provide * access to the ring buffer of bus master messages maintained by the FPGA * bus master engine. All bus master messages have the same size of * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in * memory. * * All structure members are natural aligned. Therefore we should not need * a __packed attribute. All struct acc_bmmsg_* declarations have at least * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE. * * A failure of this property due padding will be detected at compile time * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE). */ struct acc_bmmsg_rxtxdone { u8 msg_id; u8 txfifo_level; u8 reserved1[2]; u8 txtsfifo_level; u8 reserved2[3]; u32 id; struct { u8 len; u8 txdfifo_idx; u8 zeroes8; u8 reserved; } acc_dlc; u8 data[CAN_MAX_DLEN]; /* Time stamps in struct acc_ov::timestamp_frequency ticks. */ u64 ts; }; struct acc_bmmsg_txabort { u8 msg_id; u8 txfifo_level; u16 abort_mask; u8 txtsfifo_level; u8 reserved2[1]; u16 abort_mask_txts; u64 ts; u32 reserved3[4]; }; struct acc_bmmsg_overrun { u8 msg_id; u8 txfifo_level; u8 lost_cnt; u8 reserved1; u8 txtsfifo_level; u8 reserved2[3]; u64 ts; u32 reserved3[4]; }; struct acc_bmmsg_buserr { u8 msg_id; u8 txfifo_level; u8 ecc; u8 reserved1; u8 txtsfifo_level; u8 reserved2[3]; u64 ts; u32 reg_status; u32 reg_btr; u32 reserved3[2]; }; struct acc_bmmsg_errstatechange { u8 msg_id; u8 txfifo_level; u8 reserved1[2]; u8 txtsfifo_level; u8 reserved2[3]; u64 ts; u32 reg_status; u32 reserved3[3]; }; struct acc_bmmsg_timeslice { u8 msg_id; u8 txfifo_level; u8 reserved1[2]; u8 txtsfifo_level; u8 reserved2[3]; u64 ts; u32 reserved3[4]; }; struct acc_bmmsg_hwtimer { u8 msg_id; u8 reserved1[3]; u32 reserved2[1]; u64 timer; u32 reserved3[4]; }; struct acc_bmmsg_hotplug { u8 msg_id; u8 reserved1[3]; u32 reserved2[7]; }; union acc_bmmsg { u8 msg_id; struct acc_bmmsg_rxtxdone rxtxdone; struct acc_bmmsg_txabort txabort; struct acc_bmmsg_overrun overrun; struct acc_bmmsg_buserr buserr; struct acc_bmmsg_errstatechange errstatechange; struct acc_bmmsg_timeslice timeslice; struct acc_bmmsg_hwtimer hwtimer; }; /* Check size of union acc_bmmsg to be of expected size. */ static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE); struct acc_bmfifo { const union acc_bmmsg *messages; /* irq_cnt points to an u32 value where the esdACC FPGA deposits * the bm_fifo head index in coherent DMA memory. Only bits 7..0 * are valid. Use READ_ONCE() to access this memory location. */ const u32 *irq_cnt; u32 local_irq_cnt; u32 msg_fifo_tail; }; struct acc_core { void __iomem *addr; struct net_device *netdev; struct acc_bmfifo bmfifo; u8 tx_fifo_size; u8 tx_fifo_head; u8 tx_fifo_tail; }; struct acc_ov { void __iomem *addr; struct acc_bmfifo bmfifo; u32 timestamp_frequency; u32 core_frequency; u16 version; u16 features; u8 total_cores; u8 active_cores; }; struct acc_net_priv { struct can_priv can; /* must be the first member! */ struct acc_core *core; struct acc_ov *ov; }; static inline u32 acc_read32(struct acc_core *core, unsigned short offs) { return ioread32be(core->addr + offs); } static inline void acc_write32(struct acc_core *core, unsigned short offs, u32 v) { iowrite32be(v, core->addr + offs); } static inline void acc_write32_noswap(struct acc_core *core, unsigned short offs, u32 v) { iowrite32(v, core->addr + offs); } static inline void acc_set_bits(struct acc_core *core, unsigned short offs, u32 mask) { u32 v = acc_read32(core, offs); v |= mask; acc_write32(core, offs, v); } static inline void acc_clear_bits(struct acc_core *core, unsigned short offs, u32 mask) { u32 v = acc_read32(core, offs); v &= ~mask; acc_write32(core, offs, v); } static inline int acc_resetmode_entered(struct acc_core *core) { u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL); return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0; } static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs) { return ioread32be(ov->addr + offs); } static inline void acc_ov_write32(struct acc_ov *ov, unsigned short offs, u32 v) { iowrite32be(v, ov->addr + offs); } static inline void acc_ov_set_bits(struct acc_ov *ov, unsigned short offs, u32 b) { u32 v = acc_ov_read32(ov, offs); v |= b; acc_ov_write32(ov, offs, v); } static inline void acc_ov_clear_bits(struct acc_ov *ov, unsigned short offs, u32 b) { u32 v = acc_ov_read32(ov, offs); v &= ~b; acc_ov_write32(ov, offs, v); } static inline void acc_reset_fpga(struct acc_ov *ov) { acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET); /* (Re-)start and wait for completion of addon detection on the I^2C bus */ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE); mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS); } void acc_init_ov(struct acc_ov *ov, struct device *dev); void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem); int acc_open(struct net_device *netdev); int acc_close(struct net_device *netdev); netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev); int acc_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec); int acc_set_mode(struct net_device *netdev, enum can_mode mode); int acc_set_bittiming(struct net_device *netdev); irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/mach-footbridge/irq.c * * Copyright (C) 1996-2000 Russell King * * Changelog: * 22-Aug-1998 RMK Restructured IRQ routines * 03-Sep-1998 PJB Merged CATS support * 20-Jan-1998 RMK Started merge of EBSA286, CATS and NetWinder * 26-Jan-1999 PJB Don't use IACK on CATS * 16-Mar-1999 RMK Added autodetect of ISA PICs */ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/hardware/dec21285.h> #include <asm/irq.h> #include <asm/mach-types.h> #include "common.h" static void isa_mask_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO); } static void isa_ack_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) | mask, PIC_MASK_LO); outb(0x20, PIC_LO); } static void isa_unmask_pic_lo_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_LO) & ~mask, PIC_MASK_LO); } static struct irq_chip isa_lo_chip = { .irq_ack = isa_ack_pic_lo_irq, .irq_mask = isa_mask_pic_lo_irq, .irq_unmask = isa_unmask_pic_lo_irq, }; static void isa_mask_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI); } static void isa_ack_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) | mask, PIC_MASK_HI); outb(0x62, PIC_LO); outb(0x20, PIC_HI); } static void isa_unmask_pic_hi_irq(struct irq_data *d) { unsigned int mask = 1 << (d->irq & 7); outb(inb(PIC_MASK_HI) & ~mask, PIC_MASK_HI); } static struct irq_chip isa_hi_chip = { .irq_ack = isa_ack_pic_hi_irq, .irq_mask = isa_mask_pic_hi_irq, .irq_unmask = isa_unmask_pic_hi_irq, }; static void isa_irq_handler(struct irq_desc *desc) { unsigned int isa_irq = *(unsigned char *)PCIIACK_BASE; if (isa_irq < _ISA_IRQ(0) || isa_irq >= _ISA_IRQ(16)) { do_bad_IRQ(desc); return; } generic_handle_irq(isa_irq); } static struct resource pic1_resource = { .name = "pic1", .start = 0x20, .end = 0x3f, }; static struct resource pic2_resource = { .name = "pic2", .start = 0xa0, .end = 0xbf, }; void __init isa_init_irq(unsigned int host_irq) { unsigned int irq; /* * Setup, and then probe for an ISA PIC * If the PIC is not there, then we * ignore the PIC. */ outb(0x11, PIC_LO); outb(_ISA_IRQ(0), PIC_MASK_LO); /* IRQ number */ outb(0x04, PIC_MASK_LO); /* Slave on Ch2 */ outb(0x01, PIC_MASK_LO); /* x86 */ outb(0xf5, PIC_MASK_LO); /* pattern: 11110101 */ outb(0x11, PIC_HI); outb(_ISA_IRQ(8), PIC_MASK_HI); /* IRQ number */ outb(0x02, PIC_MASK_HI); /* Slave on Ch1 */ outb(0x01, PIC_MASK_HI); /* x86 */ outb(0xfa, PIC_MASK_HI); /* pattern: 11111010 */ outb(0x0b, PIC_LO); outb(0x0b, PIC_HI); if (inb(PIC_MASK_LO) == 0xf5 && inb(PIC_MASK_HI) == 0xfa) { outb(0xff, PIC_MASK_LO);/* mask all IRQs */ outb(0xff, PIC_MASK_HI);/* mask all IRQs */ } else { printk(KERN_INFO "IRQ: ISA PIC not found\n"); host_irq = (unsigned int)-1; } if (host_irq != (unsigned int)-1) { for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) { irq_set_chip_and_handler(irq, &isa_lo_chip, handle_level_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) { irq_set_chip_and_handler(irq, &isa_hi_chip, handle_level_irq); irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); } request_resource(&ioport_resource, &pic1_resource); request_resource(&ioport_resource, &pic2_resource); irq = IRQ_ISA_CASCADE; if (request_irq(irq, no_action, 0, "cascade", NULL)) pr_err("Failed to request irq %u (cascade)\n", irq); irq_set_chained_handler(host_irq, isa_irq_handler); /* * On the NetWinder, don't automatically * enable ISA IRQ11 when it is requested. * There appears to be a missing pull-up * resistor on this line. */ if (machine_is_netwinder()) irq_modify_status(_ISA_IRQ(11), IRQ_NOREQUEST | IRQ_NOPROBE, IRQ_NOAUTOEN); } }
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2019-2020 Realtek Corporation */ #include "coex.h" #include "debug.h" #include "mac.h" #include "phy.h" #include "reg.h" #include "rtw8852a.h" #include "rtw8852a_rfk.h" #include "rtw8852a_rfk_table.h" #include "rtw8852a_table.h" static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", rtwdev->dbcc_en, phy_idx); if (!rtwdev->dbcc_en) return RF_AB; if (phy_idx == RTW89_PHY_0) return RF_A; else return RF_B; } static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0}; static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs) #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs) static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) { u32 i; for (i = 0; i < BACKUP_BB_REGS_NR; i++) { backup_bb_reg_val[i] = rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i], MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]backup bb reg : %x, value =%x\n", rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]); } } static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path) { u32 i; for (i = 0; i < BACKUP_RF_REGS_NR; i++) { backup_rf_reg_val[i] = rtw89_read_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i], RFREG_MASK); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path, rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]); } } static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) { u32 i; for (i = 0; i < BACKUP_BB_REGS_NR; i++) { rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i], MASKDWORD, backup_bb_reg_val[i]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]restore bb reg : %x, value =%x\n", rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]); } } static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path) { u32 i; for (i = 0; i < BACKUP_RF_REGS_NR; i++) { rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i], RFREG_MASK, backup_rf_reg_val[i]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path, rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]); } } static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) { u8 path; u32 rf_mode; int ret; for (path = 0; path < RF_PATH_MAX; path++) { if (!(kpath & BIT(path))) continue; ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2, 2, 5000, false, rtwdev, path, 0x00, RR_MOD_MASK); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret); } } static void _dack_dump(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u8 i; u8 t; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", dack->addck_d[0][0], dack->addck_d[0][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", dack->addck_d[1][0], dack->addck_d[1][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", dack->dadck_d[0][0], dack->dadck_d[0][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", dack->dadck_d[1][0], dack->dadck_d[1][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", dack->biask_d[0][0], dack->biask_d[0][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", dack->biask_d[1][0], dack->biask_d[1][1]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { t = dack->msbk_d[0][0][i]; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { t = dack->msbk_d[0][1][i]; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { t = dack->msbk_d[1][0][i]; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { t = dack->msbk_d[1][1][i]; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); } } static void _afe_init(struct rtw89_dev *rtwdev) { rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl); } static void _addck_backup(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL); dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK, B_S0_ADDCK_Q); dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK, B_S0_ADDCK_I); rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL); dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK, B_S1_ADDCK_Q); dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK, B_S1_ADDCK_I); } static void _addck_reload(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]); rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2, (dack->addck_d[0][1] >> 6)); rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q, (dack->addck_d[0][1] & 0x3f)); rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN); rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]); rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2, (dack->addck_d[1][1] >> 6)); rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q, (dack->addck_d[1][1] & 0x3f)); rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN); } static void _dack_backup_s0(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u8 i; rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN); rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN); rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i); dack->msbk_d[0][0][i] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K); rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i); dack->msbk_d[0][1][i] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K); } dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2, B_S0_DACKI2_K); dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2, B_S0_DACKQ2_K); dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8, B_S0_DACKI8_K) - 8; dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8, B_S0_DACKQ8_K) - 8; } static void _dack_backup_s1(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u8 i; rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN); rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN); rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON); for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i); dack->msbk_d[1][0][i] = (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K); rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i); dack->msbk_d[1][1][i] = (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K); } dack->biask_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K); dack->biask_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K); dack->dadck_d[1][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8; dack->dadck_d[1][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8; } static void _dack_reload_by_path(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 index) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 tmp = 0, tmp_offset, tmp_reg; u8 i; u32 idx_offset, path_offset; if (index == 0) idx_offset = 0; else idx_offset = 0x50; if (path == RF_PATH_A) path_offset = 0; else path_offset = 0x2000; tmp_offset = idx_offset + path_offset; /* msbk_d: 15/14/13/12 */ tmp = 0x0; for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); tmp_reg = 0x5e14 + tmp_offset; rtw89_phy_write32(rtwdev, tmp_reg, tmp); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); /* msbk_d: 11/10/9/8 */ tmp = 0x0; for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); tmp_reg = 0x5e18 + tmp_offset; rtw89_phy_write32(rtwdev, tmp_reg, tmp); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); /* msbk_d: 7/6/5/4 */ tmp = 0x0; for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); tmp_reg = 0x5e1c + tmp_offset; rtw89_phy_write32(rtwdev, tmp_reg, tmp); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); /* msbk_d: 3/2/1/0 */ tmp = 0x0; for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) tmp |= dack->msbk_d[path][index][i] << (i * 8); tmp_reg = 0x5e20 + tmp_offset; rtw89_phy_write32(rtwdev, tmp_reg, tmp); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); /* dadak_d/biask_d */ tmp = 0x0; tmp = (dack->biask_d[path][index] << 22) | (dack->dadck_d[path][index] << 14); tmp_reg = 0x5e24 + tmp_offset; rtw89_phy_write32(rtwdev, tmp_reg, tmp); } static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { u8 i; for (i = 0; i < 2; i++) _dack_reload_by_path(rtwdev, path, i); rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_rfk_dack_reload_defs_a_tbl, &rtw8852a_rfk_dack_reload_defs_b_tbl); } #define ADDC_T_AVG 100 static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { s32 dc_re = 0, dc_im = 0; u32 tmp; u32 i; rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_rfk_check_addc_defs_a_tbl, &rtw8852a_rfk_check_addc_defs_b_tbl); for (i = 0; i < ADDC_T_AVG; i++) { tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD); dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11); dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11); } dc_re /= ADDC_T_AVG; dc_im /= ADDC_T_AVG; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im); } static void _addck(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 val; int ret; /* S0 */ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n"); _check_addc(rtwdev, RF_PATH_A); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x1e00, BIT(0)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); dack->addck_timeout[0] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n"); _check_addc(rtwdev, RF_PATH_A); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl); /* S1 */ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n"); _check_addc(rtwdev, RF_PATH_B); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x3e00, BIT(0)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); dack->addck_timeout[1] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n"); _check_addc(rtwdev, RF_PATH_B); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl); } static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_rfk_check_dadc_defs_f_a_tbl, &rtw8852a_rfk_check_dadc_defs_f_b_tbl); _check_addc(rtwdev, path); rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_rfk_check_dadc_defs_r_a_tbl, &rtw8852a_rfk_check_dadc_defs_r_b_tbl); } static void _dack_s0(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 val; int ret; rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x5e28, BIT(15)); ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x5e78, BIT(15)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n"); dack->msbk_timeout[0] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x5e48, BIT(17)); ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x5e98, BIT(17)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n"); dack->dadck_timeout[0] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); _check_dadc(rtwdev, RF_PATH_A); _dack_backup_s0(rtwdev); _dack_reload(rtwdev, RF_PATH_A); rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG); } static void _dack_s1(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 val; int ret; rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x7e28, BIT(15)); ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x7e78, BIT(15)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n"); dack->msbk_timeout[1] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x7e48, BIT(17)); ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000, false, rtwdev, 0x7e98, BIT(17)); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n"); dack->dadck_timeout[1] = true; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); _check_dadc(rtwdev, RF_PATH_B); _dack_backup_s1(rtwdev); _dack_reload(rtwdev, RF_PATH_B); rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON); } static void _dack(struct rtw89_dev *rtwdev) { _dack_s0(rtwdev); _dack_s1(rtwdev); } static void _dac_cal(struct rtw89_dev *rtwdev, bool force, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dack_info *dack = &rtwdev->dack; u32 rf0_0, rf1_0; u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx); dack->dack_done = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); _afe_init(rtwdev); rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001); rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); _addck(rtwdev); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); _addck_backup(rtwdev); _addck_reload(rtwdev); rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001); rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001); rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); _dack(rtwdev); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); _dack_dump(rtwdev); dack->dack_done = true; rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); dack->dack_cnt++; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); } #define RTW8852A_NCTL_VER 0xd #define RTW8852A_IQK_VER 0x2a #define RTW8852A_IQK_SS 2 #define RTW8852A_IQK_THR_REK 8 #define RTW8852A_IQK_CFIR_GROUP_NR 4 enum rtw8852a_iqk_type { ID_TXAGC, ID_FLOK_COARSE, ID_FLOK_FINE, ID_TXK, ID_RXAGC, ID_RXK, ID_NBTXK, ID_NBRXK, }; static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path) { u8 i = 0x0; u32 fft[6] = {0x0}; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000); fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000); fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000); fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000); fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000); fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000); fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); for (i = 0; i < 6; i++) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n", path, i, fft[i]); } static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path) { u8 i = 0x0; u32 tmp = 0x0; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1); for (i = 0x0; i < 0x18; i++) { rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i); rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD); tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n", path, BIT(path), tmp); udelay(1); } rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX); rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000); rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100); udelay(1); } static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path, u8 group) { static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = { {0x8f20, 0x8f54, 0x8f88, 0x8fbc}, {0x9320, 0x9354, 0x9388, 0x93bc}, }; u8 idx = 0x0; u32 tmp = 0x0; u32 base_addr; if (path >= RTW8852A_IQK_SS) { rtw89_warn(rtwdev, "cfir path %d out of range\n", path); return; } if (group >= RTW8852A_IQK_CFIR_GROUP_NR) { rtw89_warn(rtwdev, "cfir group %d out of range\n", group); return; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001); base_addr = base_addrs[path][group]; for (idx = 0; idx < 0x0d; idx++) { tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] %x = %x\n", base_addr + (idx << 2), tmp); } if (path == 0x0) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp); } else { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp); } rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc); udelay(1); tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path, BIT(path), tmp); } static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path, u8 group) { static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = { {0x8d00, 0x8d44, 0x8d88, 0x8dcc}, {0x9100, 0x9144, 0x9188, 0x91cc}, }; u8 idx = 0x0; u32 tmp = 0x0; u32 base_addr; if (path >= RTW8852A_IQK_SS) { rtw89_warn(rtwdev, "cfir path %d out of range\n", path); return; } if (group >= RTW8852A_IQK_CFIR_GROUP_NR) { rtw89_warn(rtwdev, "cfir group %d out of range\n", group); return; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001); base_addr = base_addrs[path][group]; for (idx = 0; idx < 0x10; idx++) { tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]%x = %x\n", base_addr + (idx << 2), tmp); } if (path == 0x0) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp); } else { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n"); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp); tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp); } rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd); tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path, BIT(path), tmp); } static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path) { u32 tmp = 0x0; u32 i = 0x0; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000); rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080); rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); for (i = 0; i <= 0x9f; i++) { rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i); tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp); } for (i = 0; i <= 0x9f; i++) { rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i); tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp); } rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD); rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD); } static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 tmp = 0x0; rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303); rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000); switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2); rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2); rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5); rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1); break; default: break; } tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); fsleep(128); } static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) { u32 tmp; u32 val; int ret; ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0); if (ret) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n"); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp); return false; } static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, u8 ktype, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool fail = false; u32 iqk_cmd = 0x0; u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx); u32 addr_rfc_ctl = 0x0; if (path == RF_PATH_A) addr_rfc_ctl = 0x5864; else addr_rfc_ctl = 0x7864; rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); switch (ktype) { case ID_TXAGC: iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); break; case ID_FLOK_COARSE: rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); iqk_cmd = 0x108 | (1 << (4 + path)); break; case ID_FLOK_FINE: rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009); iqk_cmd = 0x208 | (1 << (4 + path)); break; case ID_TXK: rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025); iqk_cmd = 0x008 | (1 << (path + 4)) | (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); break; case ID_RXAGC: iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); break; case ID_RXK: rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); iqk_cmd = 0x008 | (1 << (path + 4)) | (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8); break; case ID_NBTXK: rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025); iqk_cmd = 0x308 | (1 << (4 + path)); break; case ID_NBRXK: rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); iqk_cmd = 0x608 | (1 << (4 + path)); break; default: return false; } rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN); udelay(1); fail = _iqk_check_cal(rtwdev, path, ktype); if (iqk_info->iqk_xym_en) _iqk_read_xym_dbcc0(rtwdev, path); if (iqk_info->iqk_fft_en) _iqk_read_fft_dbcc0(rtwdev, path); if (iqk_info->iqk_sram_en) _iqk_sram(rtwdev, path); if (iqk_info->iqk_cfir_en) { if (ktype == ID_TXK) { _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0); _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1); _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2); _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3); } else { _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0); _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1); _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2); _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3); } } rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); return fail; } static bool _rxk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0}; static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30}; static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1}; static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0}; static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a}; static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0}; u8 gp = 0x0; bool fail = false; u32 rf0 = 0x0; for (gp = 0; gp < 0x4; gp++) { switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]); rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]); rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]); break; default: break; } rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf0 | iqk_info->syn1to2); rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100); rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx); rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail); } switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0); break; default: break; } iqk_info->nb_rxcfir[path] = 0x40000000; rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x5); iqk_info->is_wb_rxiqk[path] = true; return false; } static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 group = 0x0; u32 rf0 = 0x0, tmp = 0x0; u32 idxrxgain_a = 0x1a0; u32 idxattc2_a = 0x00; u32 idxattc1_a = 0x5; u32 idxrxgain_g = 0x1E0; u32 idxattc2_g = 0x15; u32 idxattc1_g = 0x0; bool fail = false; switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a); rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a); rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a); break; default: break; } rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf0 | iqk_info->syn1to2); rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100); rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group); rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx); switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0); break; default: break; } if (!fail) { tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); iqk_info->nb_rxcfir[path] = tmp | 0x2; } else { iqk_info->nb_rxcfir[path] = 0x40000002; } return fail; } static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x4d000a08); rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_VAL, 0x2); rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON); rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1); } else { rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x44000a08); rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_VAL, 0x1); rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON); rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL); } } static bool _txk_group_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED}; static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED}; static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b}; static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12}; static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1}; struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool fail = false; u8 gp = 0x0; u32 tmp = 0x0; for (gp = 0x0; gp < 0x4; gp++) { switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), B_RFGAIN_BND, 0x08); rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_txgain[gp]); rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, g_attsmxr[gp]); rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, g_attsmxr[gp]); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, g_itqt[gp]); break; case RTW89_BAND_5G: rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), B_RFGAIN_BND, 0x04); rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_txgain[gp]); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, a_itqt[gp]); break; default: break; } rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx); rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail); } iqk_info->nb_txcfir[path] = 0x40000000; rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR, 0x5); iqk_info->is_wb_txiqk[path] = true; tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path, BIT(path), tmp); return false; } static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 group = 0x2; u32 a_mode_txgain = 0x64e2; u32 g_mode_txgain = 0x61e8; u32 attsmxr = 0x1; u32 itqt = 0x12; u32 tmp = 0x0; bool fail = false; switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), B_RFGAIN_BND, 0x08); rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain); rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr); rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr); break; case RTW89_BAND_5G: rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8), B_RFGAIN_BND, 0x04); rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain); break; default: break; } rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx); if (!fail) { tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); iqk_info->nb_txcfir[path] = tmp | 0x2; } else { iqk_info->nb_txcfir[path] = 0x40000002; } tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path, BIT(path), tmp); return fail; } static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias); rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2); if (iqk_info->iqk_band[path] == RTW89_BAND_2G) rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0); else rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1); rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias); rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); } static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) { bool is_fail = false; u32 tmp = 0x0; u32 core_i = 0x0; u32 core_q = 0x0; tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n", path, tmp); core_i = FIELD_GET(RR_TXMO_COI, tmp); core_q = FIELD_GET(RR_TXMO_COQ, tmp); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q); if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) is_fail = true; return is_fail; } static bool _iqk_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 rf0 = 0x0; u8 itqt = 0x12; bool fail = false; bool tmp = false; switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0); itqt = 0x09; break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0); itqt = 0x12; break; default: break; } rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET); rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI, rf0 | iqk_info->syn1to2); rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0); rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx); iqk_info->lok_cor_fail[0][path] = tmp; fsleep(10); rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt); tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx); iqk_info->lok_fin_fail[0][path] = tmp; fail = _lok_finetune_check(rtwdev, path); return fail; } static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); udelay(1); rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303); rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000); switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00); rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f); rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1); rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0); rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000); rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x403e0 | iqk_info->syn1to2); udelay(1); break; case RTW89_BAND_5G: rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00); rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f); rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7); rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0); rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0); rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100); rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1); rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0); rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x403e0 | iqk_info->syn1to2); udelay(1); break; default: break; } } static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path) { rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); } static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 tmp = 0x0; bool flag = 0x0; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path, ewma_thermal_read(&rtwdev->phystat.avg_thermal[path])); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path, iqk_info->lok_cor_fail[0][path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path, iqk_info->lok_fin_fail[0][path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path, iqk_info->iqk_tx_fail[0][path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path, iqk_info->iqk_rx_fail[0][path]); flag = iqk_info->lok_cor_fail[0][path]; rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag); flag = iqk_info->lok_fin_fail[0][path]; rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag); flag = iqk_info->iqk_tx_fail[0][path]; rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag); flag = iqk_info->iqk_rx_fail[0][path]; rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag); tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); iqk_info->bp_iqkenable[path] = tmp; tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); iqk_info->bp_txkresult[path] = tmp; tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); iqk_info->bp_rxkresult[path] = tmp; rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, (u8)iqk_info->iqk_times); tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4)); if (tmp != 0x0) iqk_info->iqk_fail_cnt++; rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4), iqk_info->iqk_fail_cnt); } static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool lok_is_fail = false; u8 ibias = 0x1; u8 i = 0; _iqk_txclk_setting(rtwdev, path); for (i = 0; i < 3; i++) { _lok_res_table(rtwdev, path, ibias++); _iqk_txk_setting(rtwdev, path); lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx); if (!lok_is_fail) break; } if (iqk_info->is_nbiqk) iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path, chanctx_idx); else iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path, chanctx_idx); _iqk_rxclk_setting(rtwdev, path); _iqk_rxk_setting(rtwdev, path); if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G) iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path, chanctx_idx); else iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path, chanctx_idx); _iqk_info_iqk(rtwdev, phy_idx, path); } static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u32 reg_rf18 = 0x0, reg_35c = 0x0; u8 idx = 0; u8 get_empty_table = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { if (iqk_info->iqk_mcc_ch[idx][path] == 0) { get_empty_table = true; break; } } if (!get_empty_table) { idx = iqk_info->iqk_table_idx[path] + 1; if (idx > RTW89_IQK_CHS_NR - 1) idx = 0; } reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18); reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00); iqk_info->iqk_band[path] = chan->band_type; iqk_info->iqk_bw[path] = chan->band_width; iqk_info->iqk_ch[path] = chan->channel; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path, iqk_info->iqk_band[path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n", path, iqk_info->iqk_bw[path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n", path, iqk_info->iqk_ch[path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy, rtwdev->dbcc_en ? "on" : "off", iqk_info->iqk_band[path] == 0 ? "2G" : iqk_info->iqk_band[path] == 1 ? "5G" : "6G", iqk_info->iqk_ch[path], iqk_info->iqk_bw[path] == 0 ? "20M" : iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); if (reg_35c == 0x01) iqk_info->syn1to2 = 0x1; else iqk_info->syn1to2 = 0x0; rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER); rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16), (u8)iqk_info->iqk_band[path]); rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16), (u8)iqk_info->iqk_bw[path]); rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16), (u8)iqk_info->iqk_ch[path]); rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER); } static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx); } static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, iqk_info->nb_txcfir[path]); rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, iqk_info->nb_rxcfir[path]); rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD); rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD); rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR); rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000); rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN); rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4); rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL); rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW); rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002); rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0); rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0); rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0); rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); } static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) { const struct rtw89_rfk_tbl *tbl; switch (_kpath(rtwdev, phy_idx)) { case RF_A: tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl; break; case RF_B: tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl; break; default: tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl; break; } rtw89_rfk_parser(rtwdev, tbl); } static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 idx = iqk_info->iqk_table_idx[path]; if (rtwdev->dbcc_en) { rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, path & 0x1); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, path & 0x1); } else { rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx); rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, idx); } rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000); rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000); rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD); } static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) { const struct rtw89_rfk_tbl *tbl; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__); switch (_kpath(rtwdev, phy_idx)) { case RF_A: tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl; break; case RF_B: tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl; break; default: tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl; break; } rtw89_rfk_parser(rtwdev, tbl); } static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 phy_idx = 0x0; iqk_info->iqk_times++; if (path == 0x0) phy_idx = RTW89_PHY_0; else phy_idx = RTW89_PHY_1; _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _iqk_macbb_setting(rtwdev, phy_idx, path); _iqk_preset(rtwdev, path); _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx); _iqk_restore(rtwdev, path); _iqk_afebb_restore(rtwdev, phy_idx, path); } static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { u32 rf_reg5, rck_val = 0; u32 val; int ret; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n", rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); /* RCK trigger */ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20, false, rtwdev, path, 0x1c, BIT(3)); if (ret) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n"); rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); /* RCK_ADC_OFFSET */ rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4); rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1); rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0); rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n", rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK), rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK), rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK)); } static void _iqk_init(struct rtw89_dev *rtwdev) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 ch, path; rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD); if (iqk_info->is_iqk_init) return; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); iqk_info->is_iqk_init = true; iqk_info->is_nbiqk = false; iqk_info->iqk_fft_en = false; iqk_info->iqk_sram_en = false; iqk_info->iqk_cfir_en = false; iqk_info->iqk_xym_en = false; iqk_info->iqk_times = 0x0; for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) { iqk_info->iqk_channel[ch] = 0x0; for (path = 0; path < RTW8852A_IQK_SS; path++) { iqk_info->lok_cor_fail[ch][path] = false; iqk_info->lok_fin_fail[ch][path] = false; iqk_info->iqk_tx_fail[ch][path] = false; iqk_info->iqk_rx_fail[ch][path] = false; iqk_info->iqk_mcc_ch[ch][path] = 0x0; iqk_info->iqk_table_idx[path] = 0x0; } } } static void _doiqk(struct rtw89_dev *rtwdev, bool force, enum rtw89_phy_idx phy_idx, u8 path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR]; u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]==========IQK start!!!!!==========\n"); iqk_info->iqk_times++; iqk_info->version = RTW8852A_IQK_VER; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx); _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); _iqk_macbb_setting(rtwdev, phy_idx, path); _iqk_preset(rtwdev, path); _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx); _iqk_restore(rtwdev, path); _iqk_afebb_restore(rtwdev, phy_idx, path); _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); } static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force, enum rtw89_chanctx_idx chanctx_idx) { switch (_kpath(rtwdev, phy_idx)) { case RF_A: _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); break; case RF_B: _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; case RF_AB: _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx); _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx); break; default: break; } } #define RXDCK_VER_8852A 0xe static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, bool is_afe, enum rtw89_chanctx_idx chanctx_idx) { u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx); u32 ori_val; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ==== S%d RX DCK (by %s)====\n", path, is_afe ? "AFE" : "RFC"); ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD); if (is_afe) { rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON); rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_VAL, 0x3); rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN); rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_AVG, 0x3); rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK); rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST); rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST); rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1); } rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f); rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); fsleep(600); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); if (is_afe) { rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG); rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD, ori_val); } } static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe, enum rtw89_chanctx_idx chanctx_idx) { u8 path, kpath, dck_tune; u32 rf_reg5; u32 addr; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", RXDCK_VER_8852A, rtwdev->hal.cv); kpath = _kpath(rtwdev, phy); for (path = 0; path < 2; path++) { if (!(kpath & BIT(path))) continue; rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE); if (rtwdev->is_tssi_mode[path]) { addr = 0x5818 + (path << 13); /* TSSI pause */ rtw89_phy_write32_set(rtwdev, addr, BIT(30)); } rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx); rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); if (rtwdev->is_tssi_mode[path]) { addr = 0x5818 + (path << 13); /* TSSI resume */ rtw89_phy_write32_clr(rtwdev, addr, BIT(30)); } } } #define RTW8852A_RF_REL_VERSION 34 #define RTW8852A_DPK_VER 0x10 #define RTW8852A_DPK_TH_AVG_NUM 4 #define RTW8852A_DPK_RF_PATH 2 #define RTW8852A_DPK_KIP_REG_NUM 2 enum rtw8852a_dpk_id { LBK_RXIQK = 0x06, SYNC = 0x10, MDPK_IDL = 0x11, MDPK_MPA = 0x12, GAIN_LOSS = 0x13, GAIN_CAL = 0x14, }; static void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb) { if (is_bybb) rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); else rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); } static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off); static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg, u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path) { u8 i; for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) { reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", reg[i] + (path << 8), reg_bkup[path][i]); } } static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg, u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path) { u8 i; for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) { rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD, reg_bkup[path][i]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", reg[i] + (path << 8), reg_bkup[path][i]); } } static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, enum rtw8852a_dpk_id id, enum rtw89_chanctx_idx chanctx_idx) { u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx); u16 dpk_cmd = 0x0; u32 val; int ret; dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4))); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START); rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN); ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0); rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot for %s = 0x%x (ret=%d)\n", id == 0x06 ? "LBK_RXIQK" : id == 0x10 ? "SYNC" : id == 0x11 ? "MDPK_IDL" : id == 0x12 ? "MDPK_MPA" : id == 0x13 ? "GAIN_LOSS" : "PWR_CAL", dpk_cmd, ret); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n"); return 1; } return 0; } static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3); _set_rx_dck(rtwdev, phy, path, false, chanctx_idx); } static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 kidx = dpk->cur_idx[path]; dpk->bp[path][kidx].band = chan->band_type; dpk->bp[path][kidx].ch = chan->channel; dpk->bp[path][kidx].bw = chan->band_width; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", path, dpk->cur_idx[path], phy, rtwdev->is_tssi_mode[path] ? "on" : "off", rtwdev->dbcc_en ? "on" : "off", dpk->bp[path][kidx].band == 0 ? "2G" : dpk->bp[path][kidx].band == 1 ? "5G" : "6G", dpk->bp[path][kidx].ch, dpk->bp[path][kidx].bw == 0 ? "20M" : dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); } static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kpath) { switch (kpath) { case RF_A: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl); if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0) rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl); break; case RF_B: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl); if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1) rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl); break; case RF_AB: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl); break; default: break; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); } static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kpath) { switch (kpath) { case RF_A: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl); break; case RF_B: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl); break; case RF_AB: rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl); break; default: break; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath); } static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_pause) { rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), B_P0_TSSI_TRK_EN, is_pause); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, is_pause ? "pause" : "resume"); } static void _dpk_kip_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) { rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f); rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2); rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2), MASKDWORD, 0x003f2e2e); rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), MASKDWORD, 0x005b5b5b); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n", path, kidx); } static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD); rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000); rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD); if (rtwdev->hal.cv > CHIP_CBV) rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); } static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { u8 cur_rxbb; cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1); rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2); rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK)); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1); fsleep(70); rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f); if (cur_rxbb <= 0xa) rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3); else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb) rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1); else rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD)); rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0); rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0); rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK); rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl); } static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; dpk->bp[path][kidx].ther_dpk = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n", dpk->bp[path][kidx].ther_dpk); } static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain, enum rtw89_rf_path path) { u8 txagc_ori = 0x38; rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori); return txagc_ori; } static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, enum rtw89_rf_path path, u8 kidx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0); rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4); rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0); } else { rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e); rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7); rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3); rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3); } rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n", rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK), rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK)); } static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_manual) { u8 tmp_pad, tmp_txbb; if (is_manual) { rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1); tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD); rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8), B_RFGAIN_PAD, tmp_pad); tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB); rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8), B_RFGAIN_TXBB, tmp_txbb); rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_CFIR, 0x1); rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_CFIR); rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad, tmp_txbb); } else { rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] disable manual switch TXCFIR\n"); } } static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bypass) { if (is_bypass) { rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2, 0x1); rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS, 0x1); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path, rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); } else { rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2); rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path, rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); } } static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F); else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); else rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); } static void _dpk_table_select(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx, u8 gain) { u8 val; val = 0x80 + kidx * 0x20 + gain * 0x10; rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx, gain, val); } static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { #define DPK_SYNC_TH_DC_I 200 #define DPK_SYNC_TH_DC_Q 200 #define DPK_SYNC_TH_CORR 170 struct rtw89_dpk_info *dpk = &rtwdev->dpk; u16 dc_i, dc_q; u8 corr_val, corr_idx; rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx, corr_val); dpk->corr_idx[path][0] = corr_idx; dpk->corr_val[path][0] = corr_val; rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); dc_i = abs(sign_extend32(dc_i, 11)); dc_q = abs(sign_extend32(dc_q, 11)); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", path, dc_i, dc_q); dpk->dc_i[path][0] = dc_i; dpk->dc_q[path][0] = dc_q; if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || corr_val < DPK_SYNC_TH_CORR) return true; else return false; } static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, enum rtw89_chanctx_idx chanctx_idx) { _dpk_tpg_sel(rtwdev, path, kidx); _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx); return _dpk_sync_check(rtwdev, path); /*1= fail*/ } static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) { u16 dgain = 0x0; rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain); return dgain; } static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain) { s8 offset; if (dgain >= 0x783) offset = 0x6; else if (dgain <= 0x782 && dgain >= 0x551) offset = 0x3; else if (dgain <= 0x550 && dgain >= 0x3c4) offset = 0x0; else if (dgain <= 0x3c3 && dgain >= 0x2aa) offset = -3; else if (dgain <= 0x2a9 && dgain >= 0x1e3) offset = -6; else if (dgain <= 0x1e2 && dgain >= 0x156) offset = -9; else if (dgain <= 0x155) offset = -12; else offset = 0x0; return offset; } static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) { rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); } static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, enum rtw89_chanctx_idx chanctx_idx) { _dpk_table_select(rtwdev, path, kidx, 1); _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx); } #define DPK_TXAGC_LOWER 0x2e #define DPK_TXAGC_UPPER 0x3f #define DPK_TXAGC_INVAL 0xff static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, s8 gain_offset) { u8 txagc; txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK); if (txagc - gain_offset < DPK_TXAGC_LOWER) txagc = DPK_TXAGC_LOWER; else if (txagc - gain_offset > DPK_TXAGC_UPPER) txagc = DPK_TXAGC_UPPER; else txagc = txagc - gain_offset; rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n", gain_offset, txagc); return txagc; } enum dpk_agc_step { DPK_AGC_STEP_SYNC_DGAIN, DPK_AGC_STEP_GAIN_ADJ, DPK_AGC_STEP_GAIN_LOSS_IDX, DPK_AGC_STEP_GL_GT_CRITERION, DPK_AGC_STEP_GL_LT_CRITERION, DPK_AGC_STEP_SET_TX_GAIN, }; static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) { u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; u8 i; rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl); if (is_check) { rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); val1_i = abs(sign_extend32(val1_i, 11)); val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); val1_q = abs(sign_extend32(val1_q, 11)); rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); val2_i = abs(sign_extend32(val2_i, 11)); val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); val2_q = abs(sign_extend32(val2_q, 11)); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", phy_div(val1_i * val1_i + val1_q * val1_q, val2_i * val2_i + val2_q * val2_q)); } else { for (i = 0; i < 32; i++) { rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i, rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); } } if ((val1_i * val1_i + val1_q * val1_q) >= ((val2_i * val2_i + val2_q * val2_q) * 8 / 5)) return 1; else return 0; } static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, u8 init_txagc, bool loss_only, enum rtw89_chanctx_idx chanctx_idx) { #define DPK_AGC_ADJ_LMT 6 #define DPK_DGAIN_UPPER 1922 #define DPK_DGAIN_LOWER 342 #define DPK_RXBB_UPPER 0x1f #define DPK_RXBB_LOWER 0 #define DPK_GL_CRIT 7 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0; u8 agc_cnt = 0; bool limited_rxbb = false; s8 offset = 0; u16 dgain = 0; u8 step = DPK_AGC_STEP_SYNC_DGAIN; bool goout = false; tmp_txagc = init_txagc; do { switch (step) { case DPK_AGC_STEP_SYNC_DGAIN: if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) { tmp_txagc = DPK_TXAGC_INVAL; goout = true; break; } dgain = _dpk_dgain_read(rtwdev); if (loss_only || limited_rxbb) step = DPK_AGC_STEP_GAIN_LOSS_IDX; else step = DPK_AGC_STEP_GAIN_ADJ; break; case DPK_AGC_STEP_GAIN_ADJ: tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); offset = _dpk_dgain_mapping(rtwdev, dgain); if (tmp_rxbb + offset > DPK_RXBB_UPPER) { tmp_rxbb = DPK_RXBB_UPPER; limited_rxbb = true; } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) { tmp_rxbb = DPK_RXBB_LOWER; limited_rxbb = true; } else { tmp_rxbb = tmp_rxbb + offset; } rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb); if (offset != 0 || agc_cnt == 0) { if (chan->band_width < RTW89_CHANNEL_WIDTH_80) _dpk_bypass_rxcfir(rtwdev, path, true); else _dpk_lbk_rxiqk(rtwdev, phy, path, chanctx_idx); } if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER) step = DPK_AGC_STEP_SYNC_DGAIN; else step = DPK_AGC_STEP_GAIN_LOSS_IDX; agc_cnt++; break; case DPK_AGC_STEP_GAIN_LOSS_IDX: _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx); tmp_gl_idx = _dpk_gainloss_read(rtwdev); if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || tmp_gl_idx > DPK_GL_CRIT) step = DPK_AGC_STEP_GL_GT_CRITERION; else if (tmp_gl_idx == 0) step = DPK_AGC_STEP_GL_LT_CRITERION; else step = DPK_AGC_STEP_SET_TX_GAIN; break; case DPK_AGC_STEP_GL_GT_CRITERION: if (tmp_txagc == DPK_TXAGC_LOWER) { goout = true; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n"); } else { tmp_txagc = _dpk_set_offset(rtwdev, path, 3); } step = DPK_AGC_STEP_GAIN_LOSS_IDX; agc_cnt++; break; case DPK_AGC_STEP_GL_LT_CRITERION: if (tmp_txagc == DPK_TXAGC_UPPER) { goout = true; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n"); } else { tmp_txagc = _dpk_set_offset(rtwdev, path, -2); } step = DPK_AGC_STEP_GAIN_LOSS_IDX; agc_cnt++; break; case DPK_AGC_STEP_SET_TX_GAIN: tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx); goout = true; agc_cnt++; break; default: goout = true; break; } } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT)); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb); return tmp_txagc; } static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) { switch (order) { case 0: rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1); break; case 1: rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); break; case 2: rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN); rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN); break; default: rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order); break; } rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set MDPD order to 0x%x for IDL\n", order); } static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 kidx, u8 gain, enum rtw89_chanctx_idx chanctx_idx) { _dpk_set_mdpd_para(rtwdev, 0x0); _dpk_table_select(rtwdev, path, kidx, 1); _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx); } static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u16 pwsf = 0x78; u8 gs = 0x5b; rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc, pwsf, gs); dpk->bp[path][kidx].txagc_dpk = txagc; rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8), 0x3F << ((gain << 3) + (kidx << 4)), txagc); dpk->bp[path][kidx].pwsf = pwsf; rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), 0x1FF << (gain << 4), pwsf); rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD); dpk->bp[path][kidx].gs = gs; rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), MASKDWORD, 0x065b5b5b); rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD); rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL); } static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); bool is_reload = false; u8 idx, cur_band, cur_ch; cur_band = chan->band_type; cur_ch = chan->channel; for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { if (cur_band != dpk->bp[path][idx].band || cur_ch != dpk->bp[path][idx].ch) continue; rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, idx); dpk->cur_idx[path] = idx; is_reload = true; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] reload S%d[%d] success\n", path, idx); } return is_reload; } static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, u8 gain, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 txagc = 0, kidx = dpk->cur_idx[path]; bool is_fail = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); _rf_direct_cntrl(rtwdev, path, false); txagc = _dpk_set_tx_pwr(rtwdev, gain, path); _dpk_rf_setting(rtwdev, gain, path, kidx); _dpk_rx_dck(rtwdev, phy, path, chanctx_idx); _dpk_kip_setting(rtwdev, path, kidx); _dpk_manual_txcfir(rtwdev, path, true); txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx); if (txagc == DPK_TXAGC_INVAL) is_fail = true; _dpk_get_thermal(rtwdev, kidx, path); _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx); rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); _dpk_fill_result(rtwdev, path, kidx, gain, txagc); _dpk_manual_txcfir(rtwdev, path, false); if (!is_fail) dpk->bp[path][kidx].path_ok = true; else dpk->bp[path][kidx].path_ok = false; rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, is_fail ? "Check" : "Success"); return is_fail; } static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, enum rtw89_phy_idx phy, u8 kpath, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u32 backup_bb_val[BACKUP_BB_REGS_NR]; u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR]; u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}}; u32 kip_reg[] = {R_RXIQC, R_IQK_RES}; u8 path; bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false}; if (dpk->is_dpk_reload_en) { for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { if (!(kpath & BIT(path))) continue; reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx); if (!reloaded[path] && dpk->bp[path][0].ch != 0) dpk->cur_idx[path] = !dpk->cur_idx[path]; else _dpk_onoff(rtwdev, path, false); } } else { for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) dpk->cur_idx[path] = 0; } if ((kpath == RF_A && reloaded[RF_PATH_A]) || (kpath == RF_B && reloaded[RF_PATH_B]) || (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B])) return; _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]); for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { if (!(kpath & BIT(path)) || reloaded[path]) continue; if (rtwdev->is_tssi_mode[path]) _dpk_tssi_pause(rtwdev, path, true); _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path); _dpk_information(rtwdev, phy, path, chanctx_idx); } _dpk_bb_afe_setting(rtwdev, phy, path, kpath); for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { if (!(kpath & BIT(path)) || reloaded[path]) continue; is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx); _dpk_onoff(rtwdev, path, is_fail); } _dpk_bb_afe_restore(rtwdev, phy, path, kpath); _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]); for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { if (!(kpath & BIT(path)) || reloaded[path]) continue; _dpk_kip_restore(rtwdev, path); _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path); if (rtwdev->is_tssi_mode[path]) _dpk_tssi_pause(rtwdev, path, false); } } static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_fem_info *fem = &rtwdev->fem; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); return true; } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); return true; } return false; } static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { u8 path, kpath; kpath = _kpath(rtwdev, phy); for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { if (kpath & BIT(path)) _dpk_onoff(rtwdev, path, true); } } static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force, enum rtw89_chanctx_idx chanctx_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", RTW8852A_DPK_VER, rtwdev->hal.cv, RTW8852A_RF_REL_VERSION); if (_dpk_bypass_check(rtwdev, phy, chanctx_idx)) _dpk_force_bypass(rtwdev, phy); else _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx); } static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; u8 val, kidx = dpk->cur_idx[path]; val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), MASKBYTE3, 0x6 | val); rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); } static void _dpk_track(struct rtw89_dev *rtwdev) { struct rtw89_dpk_info *dpk = &rtwdev->dpk; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 path, kidx; u8 trk_idx = 0, txagc_rf = 0; s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0; u16 pwsf[2]; u8 cur_ther; s8 delta_ther[2] = {0}; for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) { kidx = dpk->cur_idx[path]; rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", path, kidx, dpk->bp[path][kidx].ch); cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] thermal now = %d\n", cur_ther); if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0) delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther; if (dpk->bp[path][kidx].band == RTW89_BAND_2G) delta_ther[path] = delta_ther[path] * 3 / 2; else delta_ther[path] = delta_ther[path] * 5 / 2; txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), RR_MODOPT_M_TXPWR); if (rtwdev->is_tssi_mode[path]) { trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK); rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n", txagc_rf, trk_idx); txagc_bb = (s8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2); txagc_bb_tp = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13), B_TXAGC_TP); rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", txagc_bb_tp, txagc_bb); txagc_ofst = (s8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE3); rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n", txagc_ofst, delta_ther[path]); if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15)) == 0x1) txagc_ofst = 0; if (txagc_rf != 0 && cur_ther != 0) ini_diff = txagc_ofst + delta_ther[path]; if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13), B_P0_TXDPD) == 0x0) { pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp - txagc_bb + ini_diff + tssi_info->extra_ofst[path]; pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp - txagc_bb + ini_diff + tssi_info->extra_ofst[path]; } else { pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff + tssi_info->extra_ofst[path]; pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff + tssi_info->extra_ofst[path]; } } else { pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; } if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 && txagc_rf != 0) { rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n", pwsf[0], pwsf[1]); rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), 0x000001FF, pwsf[0]); rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), 0x01FF0000, pwsf[1]); } } } static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { enum rtw89_band band = chan->band_type; if (band == RTW89_BAND_2G) rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1); else rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); } static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { enum rtw89_band band = chan->band_type; rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852a_tssi_sys_defs_2g_tbl, &rtw8852a_tssi_sys_defs_5g_tbl); } static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { enum rtw89_band band = chan->band_type; rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl, &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl, &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl); } static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl, &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl); } static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_dck_defs_a_tbl, &rtw8852a_tssi_dck_defs_b_tbl); } static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { #define __get_val(ptr, idx) \ ({ \ s8 *__ptr = (ptr); \ u8 __idx = (idx), __i, __v; \ u32 __val = 0; \ for (__i = 0; __i < 4; __i++) { \ __v = (__ptr[__idx + __i]); \ __val |= (__v << (8 * __i)); \ } \ __val; \ }) struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 ch = chan->channel; u8 subband = chan->subband_type; const s8 *thm_up_a = NULL; const s8 *thm_down_a = NULL; const s8 *thm_up_b = NULL; const s8 *thm_down_b = NULL; u8 thermal = 0xff; s8 thm_ofst[64] = {0}; u32 tmp = 0; u8 i, j; switch (subband) { default: case RTW89_CH_2G: thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p; thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n; thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p; thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n; break; case RTW89_CH_5G_BAND_1: thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0]; thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0]; thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0]; thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0]; break; case RTW89_CH_5G_BAND_3: thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1]; thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1]; thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1]; thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1]; break; case RTW89_CH_5G_BAND_4: thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2]; thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2]; thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2]; thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2]; break; } if (path == RF_PATH_A) { thermal = tssi_info->thermal[RF_PATH_A]; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); if (thermal == 0xff) { rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); for (i = 0; i < 64; i += 4) { rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] write 0x%x val=0x%08x\n", 0x5c00 + i, 0x0); } } else { rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, thermal); i = 0; for (j = 0; j < 32; j++) thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? -thm_down_a[i++] : -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; i = 1; for (j = 63; j >= 32; j--) thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? thm_up_a[i++] : thm_up_a[DELTA_SWINGIDX_SIZE - 1]; for (i = 0; i < 64; i += 4) { tmp = __get_val(thm_ofst, i); rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] write 0x%x val=0x%08x\n", 0x5c00 + i, tmp); } } rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); } else { thermal = tssi_info->thermal[RF_PATH_B]; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); if (thermal == 0xff) { rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); for (i = 0; i < 64; i += 4) { rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] write 0x%x val=0x%08x\n", 0x7c00 + i, 0x0); } } else { rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, thermal); i = 0; for (j = 0; j < 32; j++) thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? -thm_down_b[i++] : -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; i = 1; for (j = 63; j >= 32; j--) thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? thm_up_b[i++] : thm_up_b[DELTA_SWINGIDX_SIZE - 1]; for (i = 0; i < 64; i += 4) { tmp = __get_val(thm_ofst, i); rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] write 0x%x val=0x%08x\n", 0x7c00 + i, tmp); } } rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); } #undef __get_val } static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl, &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl); } static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_slope_cal_org_defs_a_tbl, &rtw8852a_tssi_slope_cal_org_defs_b_tbl); } static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl, &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl); } static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_slope_defs_a_tbl, &rtw8852a_tssi_slope_defs_b_tbl); } static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_track_defs_a_tbl, &rtw8852a_tssi_track_defs_b_tbl); } static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl, &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl); } static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { u8 subband = chan->subband_type; switch (subband) { default: case RTW89_CH_2G: rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_pak_defs_a_2g_tbl, &rtw8852a_tssi_pak_defs_b_2g_tbl); break; case RTW89_CH_5G_BAND_1: rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_pak_defs_a_5g_1_tbl, &rtw8852a_tssi_pak_defs_b_5g_1_tbl); break; case RTW89_CH_5G_BAND_3: rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_pak_defs_a_5g_3_tbl, &rtw8852a_tssi_pak_defs_b_5g_3_tbl); break; case RTW89_CH_5G_BAND_4: rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, &rtw8852a_tssi_pak_defs_a_5g_4_tbl, &rtw8852a_tssi_pak_defs_b_5g_4_tbl); break; } } static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 i; for (i = 0; i < RF_PATH_NUM_8852A; i++) { _tssi_set_track(rtwdev, phy, i); _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A, &rtw8852a_tssi_enable_defs_a_tbl, &rtw8852a_tssi_enable_defs_b_tbl); tssi_info->base_thermal[i] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]); rtwdev->is_tssi_mode[i] = true; } } static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); rtwdev->is_tssi_mode[RF_PATH_A] = false; rtwdev->is_tssi_mode[RF_PATH_B] = false; } static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) { switch (ch) { case 1 ... 2: return 0; case 3 ... 5: return 1; case 6 ... 8: return 2; case 9 ... 11: return 3; case 12 ... 13: return 4; case 14: return 5; } return 0; } #define TSSI_EXTRA_GROUP_BIT (BIT(31)) #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) { switch (ch) { case 1 ... 2: return 0; case 3 ... 5: return 1; case 6 ... 8: return 2; case 9 ... 11: return 3; case 12 ... 14: return 4; case 36 ... 40: return 5; case 41 ... 43: return TSSI_EXTRA_GROUP(5); case 44 ... 48: return 6; case 49 ... 51: return TSSI_EXTRA_GROUP(6); case 52 ... 56: return 7; case 57 ... 59: return TSSI_EXTRA_GROUP(7); case 60 ... 64: return 8; case 100 ... 104: return 9; case 105 ... 107: return TSSI_EXTRA_GROUP(9); case 108 ... 112: return 10; case 113 ... 115: return TSSI_EXTRA_GROUP(10); case 116 ... 120: return 11; case 121 ... 123: return TSSI_EXTRA_GROUP(11); case 124 ... 128: return 12; case 129 ... 131: return TSSI_EXTRA_GROUP(12); case 132 ... 136: return 13; case 137 ... 139: return TSSI_EXTRA_GROUP(13); case 140 ... 144: return 14; case 149 ... 153: return 15; case 154 ... 156: return TSSI_EXTRA_GROUP(15); case 157 ... 161: return 16; case 162 ... 164: return TSSI_EXTRA_GROUP(16); case 165 ... 169: return 17; case 170 ... 172: return TSSI_EXTRA_GROUP(17); case 173 ... 177: return 18; } return 0; } static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) { switch (ch) { case 1 ... 8: return 0; case 9 ... 14: return 1; case 36 ... 48: return 2; case 52 ... 64: return 3; case 100 ... 112: return 4; case 116 ... 128: return 5; case 132 ... 144: return 6; case 149 ... 177: return 7; } return 0; } static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 ch = chan->channel; u32 gidx, gidx_1st, gidx_2nd; s8 de_1st = 0; s8 de_2nd = 0; s8 val; gidx = _tssi_get_ofdm_group(rtwdev, ch); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); if (IS_TSSI_EXTRA_GROUP(gidx)) { gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); de_1st = tssi_info->tssi_mcs[path][gidx_1st]; de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; val = (de_1st + de_2nd) / 2; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", path, val, de_1st, de_2nd); } else { val = tssi_info->tssi_mcs[path][gidx]; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); } return val; } static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 ch = chan->channel; u32 tgidx, tgidx_1st, tgidx_2nd; s8 tde_1st = 0; s8 tde_2nd = 0; s8 val; tgidx = _tssi_get_trim_group(rtwdev, ch); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", path, tgidx); if (IS_TSSI_EXTRA_GROUP(tgidx)) { tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; val = (tde_1st + tde_2nd) / 2; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", path, val, tde_1st, tde_2nd); } else { val = tssi_info->tssi_trim[path][tgidx]; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", path, val); } return val; } static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { #define __DE_MASK 0x003ff000 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858}; static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860}; static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838}; static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840}; static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848}; static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850}; static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828}; static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830}; u8 ch = chan->channel; u8 i, gidx; s8 ofdm_de; s8 trim_de; s32 val; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", phy, ch); for (i = 0; i < RF_PATH_NUM_8852A; i++) { gidx = _tssi_get_cck_group(rtwdev, ch); trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = tssi_info->tssi_cck[i][gidx] + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", r_cck_long[i], rtw89_phy_read32_mask(rtwdev, r_cck_long[i], __DE_MASK)); ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan); trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan); val = ofdm_de + trim_de; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", i, ofdm_de, trim_de); rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val); rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", r_mcs_20m[i], rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i], __DE_MASK)); } #undef __DE_MASK } static void _tssi_track(struct rtw89_dev *rtwdev) { static const u32 tx_gain_scale_table[] = { 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c, 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1 }; struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 path; u8 cur_ther; s32 delta_ther = 0, gain_offset_int, gain_offset_float; s8 gain_offset; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n", __func__); if (!rtwdev->is_tssi_mode[RF_PATH_A]) return; if (!rtwdev->is_tssi_mode[RF_PATH_B]) return; for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) { if (!tssi_info->tssi_tracking_check[path]) { rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n"); continue; } cur_ther = (u8)rtw89_phy_read32_mask(rtwdev, R_TSSI_THER + (path << 13), B_TSSI_THER); if (cur_ther == 0 || tssi_info->base_thermal[path] == 0) continue; delta_ther = cur_ther - tssi_info->base_thermal[path]; gain_offset = (s8)delta_ther * 15 / 10; tssi_info->extra_ofst[path] = gain_offset; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n", tssi_info->base_thermal[path], gain_offset, path); gain_offset_int = gain_offset >> 3; gain_offset_float = gain_offset & 7; if (gain_offset_int > 15) gain_offset_int = 15; else if (gain_offset_int < -16) gain_offset_int = -16; rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13), B_DPD_OFT_EN, 0x1); rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13), B_TXGAIN_SCALE_EN, 0x1); rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13), B_DPD_OFT_ADDR, gain_offset_int); rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13), B_TXGAIN_SCALE_OFT, tx_gain_scale_table[gain_offset_float]); } } static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 ch = chan->channel, ch_tmp; u8 bw = chan->band_width; u8 band = chan->band_type; u8 subband = chan->subband_type; s8 power; s32 xdbm; if (bw == RTW89_CHANNEL_WIDTH_40) ch_tmp = ch - 2; else if (bw == RTW89_CHANNEL_WIDTH_80) ch_tmp = ch - 6; else ch_tmp = ch; power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX, RTW89_RS_MCS, RTW89_NONBF, ch_tmp); xdbm = power * 100 / 4; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n", __func__, phy, xdbm); if (xdbm > 1800 && subband == RTW89_CH_2G) { tssi_info->tssi_tracking_check[RF_PATH_A] = true; tssi_info->tssi_tracking_check[RF_PATH_B] = true; } else { rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl); tssi_info->extra_ofst[RF_PATH_A] = 0; tssi_info->extra_ofst[RF_PATH_B] = 0; tssi_info->tssi_tracking_check[RF_PATH_A] = false; tssi_info->tssi_tracking_check[RF_PATH_B] = false; } } static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan) { rtw8852a_bb_set_plcp_tx(rtwdev); rtw8852a_bb_cfg_tx_path(rtwdev, path); rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy); rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan); } static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_chanctx_idx chanctx_idx) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); const struct rtw89_chip_info *mac_reg = rtwdev->chip; u8 ch = chan->channel, ch_tmp; u8 bw = chan->band_width; u8 band = chan->band_type; u32 tx_en; u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx); s8 power; s16 xdbm; u32 i, tx_counter = 0; if (bw == RTW89_CHANNEL_WIDTH_40) ch_tmp = ch - 2; else if (bw == RTW89_CHANNEL_WIDTH_80) ch_tmp = ch - 6; else ch_tmp = ch; power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20, RTW89_1TX, RTW89_RS_OFDM, RTW89_NONBF, ch_tmp); xdbm = (power * 100) >> mac_reg->txpwr_factor_mac; if (xdbm > 1800) xdbm = 68; else xdbm = power * 2; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n", __func__, phy, power, xdbm); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy)); tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan); mdelay(15); _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan); tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) - tx_counter; if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 && rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) { for (i = 0; i < 6; i++) { tssi_info->default_txagc_offset[RF_PATH_A] = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKBYTE3); if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0) break; } } if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 && rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) { for (i = 0; i < 6; i++) { tssi_info->default_txagc_offset[RF_PATH_B] = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKBYTE3); if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0) break; } } rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: tx counter=%d\n", __func__, tx_counter); rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n", tssi_info->default_txagc_offset[RF_PATH_A], tssi_info->default_txagc_offset[RF_PATH_B]); rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0); rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); } void rtw8852a_rck(struct rtw89_dev *rtwdev) { u8 path; for (path = 0; path < 2; path++) _rck(rtwdev, path); } void rtw8852a_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx) { u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); _dac_cal(rtwdev, false, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _iqk_init(rtwdev); if (rtwdev->dbcc_en) _iqk_dbcc(rtwdev, phy_idx, chanctx_idx); else _iqk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); } void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe, enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, enum rtw89_chanctx_idx chanctx_idx) { u32 tx_en; u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); rtwdev->dpk.is_dpk_enable = true; rtwdev->dpk.is_dpk_reload_en = false; _dpk(rtwdev, phy_idx, false, chanctx_idx); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); } void rtw8852a_dpk_track(struct rtw89_dev *rtwdev) { _dpk_track(rtwdev); } void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_chanctx_idx chanctx_idx) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); u8 i; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { _tssi_rf_setting(rtwdev, phy, i, chan); _tssi_set_sys(rtwdev, phy, chan); _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan); _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); _tssi_set_dck(rtwdev, phy, i); _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_set_dac_gain_tbl(rtwdev, phy, i); _tssi_slope_cal_org(rtwdev, phy, i); _tssi_set_rf_gap_tbl(rtwdev, phy, i); _tssi_set_slope(rtwdev, phy, i); _tssi_pak(rtwdev, phy, i, chan); } _tssi_enable(rtwdev, phy); _tssi_set_efuse_to_de(rtwdev, phy, chan); _tssi_high_power(rtwdev, phy, chan); _tssi_pre_tx(rtwdev, phy, chanctx_idx); } void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, const struct rtw89_chan *chan) { u8 i; rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); if (!rtwdev->is_tssi_mode[RF_PATH_A]) return; if (!rtwdev->is_tssi_mode[RF_PATH_B]) return; _tssi_disable(rtwdev, phy); for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) { _tssi_rf_setting(rtwdev, phy, i, chan); _tssi_set_sys(rtwdev, phy, chan); _tssi_set_tmeter_tbl(rtwdev, phy, i, chan); _tssi_pak(rtwdev, phy, i, chan); } _tssi_enable(rtwdev, phy); _tssi_set_efuse_to_de(rtwdev, phy, chan); } void rtw8852a_tssi_track(struct rtw89_dev *rtwdev) { _tssi_track(rtwdev); } static void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) return; /* disable */ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0); /* enable */ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl); } static void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) return; /* disable */ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); /* enable */ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl); } static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool enable) { if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) return; if (enable) { /* SCAN_START */ _rtw8852a_tssi_avg_scan(rtwdev, phy); } else { /* SCAN_END */ _rtw8852a_tssi_set_avg(rtwdev, phy); } } static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool enable) { struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 i; if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) return; if (enable) { if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 && rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) { for (i = 0; i < 6; i++) { tssi_info->default_txagc_offset[RF_PATH_A] = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB); if (tssi_info->default_txagc_offset[RF_PATH_A]) break; } } if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 && rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) { for (i = 0; i < 6; i++) { tssi_info->default_txagc_offset[RF_PATH_B] = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1); if (tssi_info->default_txagc_offset[RF_PATH_B]) break; } } } else { rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, tssi_info->default_txagc_offset[RF_PATH_A]); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, tssi_info->default_txagc_offset[RF_PATH_B]); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); } } void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, enum rtw89_phy_idx phy_idx) { if (scan_start) { rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true); rtw8852a_tssi_set_avg(rtwdev, phy_idx, true); } else { rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false); rtw8852a_tssi_set_avg(rtwdev, phy_idx, false); } }
// SPDX-License-Identifier: GPL-2.0 /* * KUnit helpers for clk providers and consumers */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/slab.h> #include <kunit/clk.h> #include <kunit/resource.h> KUNIT_DEFINE_ACTION_WRAPPER(clk_disable_unprepare_wrapper, clk_disable_unprepare, struct clk *); /** * clk_prepare_enable_kunit() - Test managed clk_prepare_enable() * @test: The test context * @clk: clk to prepare and enable * * Return: 0 on success, or negative errno on failure. */ int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk) { int ret; ret = clk_prepare_enable(clk); if (ret) return ret; return kunit_add_action_or_reset(test, clk_disable_unprepare_wrapper, clk); } EXPORT_SYMBOL_GPL(clk_prepare_enable_kunit); KUNIT_DEFINE_ACTION_WRAPPER(clk_put_wrapper, clk_put, struct clk *); static struct clk *__clk_get_kunit(struct kunit *test, struct clk *clk) { int ret; if (IS_ERR(clk)) return clk; ret = kunit_add_action_or_reset(test, clk_put_wrapper, clk); if (ret) return ERR_PTR(ret); return clk; } /** * clk_get_kunit() - Test managed clk_get() * @test: The test context * @dev: device for clock "consumer" * @con_id: clock consumer ID * * Just like clk_get(), except the clk is managed by the test case and is * automatically put with clk_put() after the test case concludes. * * Return: new clk consumer or ERR_PTR on failure. */ struct clk * clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id) { struct clk *clk; clk = clk_get(dev, con_id); return __clk_get_kunit(test, clk); } EXPORT_SYMBOL_GPL(clk_get_kunit); /** * of_clk_get_kunit() - Test managed of_clk_get() * @test: The test context * @np: device_node for clock "consumer" * @index: index in 'clocks' property of @np * * Just like of_clk_get(), except the clk is managed by the test case and is * automatically put with clk_put() after the test case concludes. * * Return: new clk consumer or ERR_PTR on failure. */ struct clk * of_clk_get_kunit(struct kunit *test, struct device_node *np, int index) { struct clk *clk; clk = of_clk_get(np, index); return __clk_get_kunit(test, clk); } EXPORT_SYMBOL_GPL(of_clk_get_kunit); /** * clk_hw_get_clk_kunit() - Test managed clk_hw_get_clk() * @test: The test context * @hw: clk_hw associated with the clk being consumed * @con_id: connection ID string on device * * Just like clk_hw_get_clk(), except the clk is managed by the test case and * is automatically put with clk_put() after the test case concludes. * * Return: new clk consumer or ERR_PTR on failure. */ struct clk * clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id) { struct clk *clk; clk = clk_hw_get_clk(hw, con_id); return __clk_get_kunit(test, clk); } EXPORT_SYMBOL_GPL(clk_hw_get_clk_kunit); /** * clk_hw_get_clk_prepared_enabled_kunit() - Test managed clk_hw_get_clk() + clk_prepare_enable() * @test: The test context * @hw: clk_hw associated with the clk being consumed * @con_id: connection ID string on device * * Just like * * .. code-block:: c * * struct clk *clk = clk_hw_get_clk(...); * clk_prepare_enable(clk); * * except the clk is managed by the test case and is automatically disabled and * unprepared with clk_disable_unprepare() and put with clk_put() after the * test case concludes. * * Return: new clk consumer that is prepared and enabled or ERR_PTR on failure. */ struct clk * clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id) { int ret; struct clk *clk; clk = clk_hw_get_clk_kunit(test, hw, con_id); if (IS_ERR(clk)) return clk; ret = clk_prepare_enable_kunit(test, clk); if (ret) return ERR_PTR(ret); return clk; } EXPORT_SYMBOL_GPL(clk_hw_get_clk_prepared_enabled_kunit); KUNIT_DEFINE_ACTION_WRAPPER(clk_hw_unregister_wrapper, clk_hw_unregister, struct clk_hw *); /** * clk_hw_register_kunit() - Test managed clk_hw_register() * @test: The test context * @dev: device that is registering this clock * @hw: link to hardware-specific clock data * * Just like clk_hw_register(), except the clk registration is managed by the * test case and is automatically unregistered after the test case concludes. * * Return: 0 on success or a negative errno value on failure. */ int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw) { int ret; ret = clk_hw_register(dev, hw); if (ret) return ret; return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw); } EXPORT_SYMBOL_GPL(clk_hw_register_kunit); /** * of_clk_hw_register_kunit() - Test managed of_clk_hw_register() * @test: The test context * @node: device_node of device that is registering this clock * @hw: link to hardware-specific clock data * * Just like of_clk_hw_register(), except the clk registration is managed by * the test case and is automatically unregistered after the test case * concludes. * * Return: 0 on success or a negative errno value on failure. */ int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw) { int ret; ret = of_clk_hw_register(node, hw); if (ret) return ret; return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw); } EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit); KUNIT_DEFINE_ACTION_WRAPPER(of_clk_del_provider_wrapper, of_clk_del_provider, struct device_node *); /** * of_clk_add_hw_provider_kunit() - Test managed of_clk_add_hw_provider() * @test: The test context * @np: Device node pointer associated with clock provider * @get: Callback for decoding clk_hw * @data: Context pointer for @get callback. * * Just like of_clk_add_hw_provider(), except the clk_hw provider is managed by * the test case and is automatically unregistered after the test case * concludes. * * Return: 0 on success or a negative errno value on failure. */ int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np, struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), void *data) { int ret; ret = of_clk_add_hw_provider(np, get, data); if (ret) return ret; return kunit_add_action_or_reset(test, of_clk_del_provider_wrapper, np); } EXPORT_SYMBOL_GPL(of_clk_add_hw_provider_kunit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
/* SPDX-License-Identifier: GPL-2.0+ */ /* * NILFS B-tree node cache * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Seiji Kihara. * Revised by Ryusuke Konishi. */ #ifndef _NILFS_BTNODE_H #define _NILFS_BTNODE_H #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/backing-dev.h> /** * struct nilfs_btnode_chkey_ctxt - change key context * @oldkey: old key of block's moving content * @newkey: new key for block's content * @bh: buffer head of old buffer * @newbh: buffer head of new buffer */ struct nilfs_btnode_chkey_ctxt { __u64 oldkey; __u64 newkey; struct buffer_head *bh; struct buffer_head *newbh; }; void nilfs_init_btnc_inode(struct inode *btnc_inode); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr); int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, blk_opf_t, struct buffer_head **, sector_t *); void nilfs_btnode_delete(struct buffer_head *); int nilfs_btnode_prepare_change_key(struct address_space *, struct nilfs_btnode_chkey_ctxt *); void nilfs_btnode_commit_change_key(struct address_space *, struct nilfs_btnode_chkey_ctxt *); void nilfs_btnode_abort_change_key(struct address_space *, struct nilfs_btnode_chkey_ctxt *); #endif /* _NILFS_BTNODE_H */
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __SCU_EVENT_CODES_HEADER__ #define __SCU_EVENT_CODES_HEADER__ /** * This file contains the constants and macros for the SCU event codes. * * */ #define SCU_EVENT_TYPE_CODE_SHIFT 24 #define SCU_EVENT_TYPE_CODE_MASK 0x0F000000 #define SCU_EVENT_SPECIFIC_CODE_SHIFT 18 #define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000 #define SCU_EVENT_CODE_MASK \ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK) /** * SCU_EVENT_TYPE() - * * This macro constructs an SCU event type from the type value. */ #define SCU_EVENT_TYPE(type) \ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT) /** * SCU_EVENT_SPECIFIC() - * * This macro constructs an SCU event specifier from the code value. */ #define SCU_EVENT_SPECIFIC(code) \ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT) /** * SCU_EVENT_MESSAGE() - * * This macro constructs a combines an SCU event type and SCU event specifier * from the type and code values. */ #define SCU_EVENT_MESSAGE(type, code) \ ((type) | SCU_EVENT_SPECIFIC(code)) /** * SCU_EVENT_TYPE() - * * SCU_EVENT_TYPES */ #define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08) #define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09) #define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00) #define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01) #define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02) #define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03) #define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F) #define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04) #define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05) #define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06) #define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07) #define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A) /** * * * SCU_EVENT_SPECIFIERS */ #define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20 #define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00 /** * * * SMU_COMMAND_EVENTS */ #define SCU_EVENT_INVALID_CONTEXT_COMMAND \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00) /** * * * SMU_PCQ_EVENTS */ #define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00) /** * * * SMU_EVENTS */ #define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02) #define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03) #define SCU_EVENT_PCIE_INTERFACE_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04) #define SCU_EVENT_FUNCTION_LEVEL_RESET \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05) /** * * * TRANSPORT_LEVEL_ERRORS */ #define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00) /** * * * BROADCAST_CHANGE_EVENTS */ #define SCU_EVENT_BROADCAST_CHANGE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01) #define SCU_EVENT_BROADCAST_RESERVED0 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02) #define SCU_EVENT_BROADCAST_RESERVED1 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03) #define SCU_EVENT_BROADCAST_SES \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04) #define SCU_EVENT_BROADCAST_EXPANDER \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05) #define SCU_EVENT_BROADCAST_AEN \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06) #define SCU_EVENT_BROADCAST_RESERVED3 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07) #define SCU_EVENT_BROADCAST_RESERVED4 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08) #define SCU_EVENT_PE_SUSPENDED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09) /** * * * OSSP_EVENTS */ #define SCU_EVENT_PORT_SELECTOR_DETECTED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10) #define SCU_EVENT_SENT_PORT_SELECTION \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11) #define SCU_EVENT_HARD_RESET_TRANSMITTED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12) #define SCU_EVENT_HARD_RESET_RECEIVED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13) #define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15) #define SCU_EVENT_LINK_FAILURE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16) #define SCU_EVENT_SATA_SPINUP_HOLD \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17) #define SCU_EVENT_SAS_15_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18) #define SCU_EVENT_SAS_15 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19) #define SCU_EVENT_SAS_30_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A) #define SCU_EVENT_SAS_30 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B) #define SCU_EVENT_SAS_60_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C) #define SCU_EVENT_SAS_60 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D) #define SCU_EVENT_SATA_15_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E) #define SCU_EVENT_SATA_15 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F) #define SCU_EVENT_SATA_30_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20) #define SCU_EVENT_SATA_30 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21) #define SCU_EVENT_SATA_60_SSC \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22) #define SCU_EVENT_SATA_60 \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23) #define SCU_EVENT_SAS_PHY_DETECTED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24) #define SCU_EVENT_SATA_PHY_DETECTED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25) /** * * * FATAL_INTERNAL_MEMORY_ERROR_EVENTS */ #define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00) #define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01) #define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02) /** * * * REMOTE_NODE_SUSPEND_EVENTS */ #define SCU_EVENT_TL_RNC_SUSPEND_TX \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00) #define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00) #define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20) #define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20) /** * * * REMOTE_NODE_MISC_EVENTS */ #define SCU_EVENT_POST_RCN_RELEASE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE) #define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01) #define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02) #define SCU_EVENT_POST_RNC_COMPLETE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03) #define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04) /** * * * ERROR_COUNT_EVENT */ #define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00) #define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01) #define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02) /** * scu_get_event_type() - * * This macro returns the SCU event type from the event code. */ #define scu_get_event_type(event_code) \ ((event_code) & SCU_EVENT_TYPE_CODE_MASK) /** * scu_get_event_specifier() - * * This macro returns the SCU event specifier from the event code. */ #define scu_get_event_specifier(event_code) \ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK) /** * scu_get_event_code() - * * This macro returns the combined SCU event type and SCU event specifier from * the event code. */ #define scu_get_event_code(event_code) \ ((event_code) & SCU_EVENT_CODE_MASK) /** * * * PTS_SCHEDULE_EVENT */ #define SCU_EVENT_SMP_RESPONSE_NO_PE \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00) #define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE) #define SCU_EVENT_TASK_TIMEOUT \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01) #define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT) #define SCU_EVENT_IT_NEXUS_TIMEOUT \ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02) #define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT) #endif /* __SCU_EVENT_CODES_HEADER__ */
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Intel Corporation #include <linux/acpi.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #define DW9807_MAX_FOCUS_POS 1023 /* * This sets the minimum granularity for the focus positions. * A value of 1 gives maximum accuracy for a desired focus position. */ #define DW9807_FOCUS_STEPS 1 /* * This acts as the minimum granularity of lens movement. * Keep this value power of 2, so the control steps can be * uniformly adjusted for gradual lens movement, with desired * number of control steps. */ #define DW9807_CTRL_STEPS 16 #define DW9807_CTRL_DELAY_US 1000 #define DW9807_CTL_ADDR 0x02 /* * DW9807 separates two registers to control the VCM position. * One for MSB value, another is LSB value. */ #define DW9807_MSB_ADDR 0x03 #define DW9807_LSB_ADDR 0x04 #define DW9807_STATUS_ADDR 0x05 #define DW9807_MODE_ADDR 0x06 #define DW9807_RESONANCE_ADDR 0x07 #define MAX_RETRY 10 struct dw9807_device { struct v4l2_ctrl_handler ctrls_vcm; struct v4l2_subdev sd; u16 current_val; }; static inline struct dw9807_device *sd_to_dw9807_vcm( struct v4l2_subdev *subdev) { return container_of(subdev, struct dw9807_device, sd); } static int dw9807_i2c_check(struct i2c_client *client) { const char status_addr = DW9807_STATUS_ADDR; char status_result; int ret; ret = i2c_master_send(client, &status_addr, sizeof(status_addr)); if (ret < 0) { dev_err(&client->dev, "I2C write STATUS address fail ret = %d\n", ret); return ret; } ret = i2c_master_recv(client, &status_result, sizeof(status_result)); if (ret < 0) { dev_err(&client->dev, "I2C read STATUS value fail ret = %d\n", ret); return ret; } return status_result; } static int dw9807_set_dac(struct i2c_client *client, u16 data) { const char tx_data[3] = { DW9807_MSB_ADDR, ((data >> 8) & 0x03), (data & 0xff) }; int val, ret; /* * According to the datasheet, need to check the bus status before we * write VCM position. This ensure that we really write the value * into the register */ ret = readx_poll_timeout(dw9807_i2c_check, client, val, val <= 0, DW9807_CTRL_DELAY_US, MAX_RETRY * DW9807_CTRL_DELAY_US); if (ret || val < 0) { if (ret) { dev_warn(&client->dev, "Cannot do the write operation because VCM is busy\n"); } return ret ? -EBUSY : val; } /* Write VCM position to registers */ ret = i2c_master_send(client, tx_data, sizeof(tx_data)); if (ret < 0) { dev_err(&client->dev, "I2C write MSB fail ret=%d\n", ret); return ret; } return 0; } static int dw9807_set_ctrl(struct v4l2_ctrl *ctrl) { struct dw9807_device *dev_vcm = container_of(ctrl->handler, struct dw9807_device, ctrls_vcm); if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) { struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd); dev_vcm->current_val = ctrl->val; return dw9807_set_dac(client, ctrl->val); } return -EINVAL; } static const struct v4l2_ctrl_ops dw9807_vcm_ctrl_ops = { .s_ctrl = dw9807_set_ctrl, }; static int dw9807_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return pm_runtime_resume_and_get(sd->dev); } static int dw9807_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { pm_runtime_put(sd->dev); return 0; } static const struct v4l2_subdev_internal_ops dw9807_int_ops = { .open = dw9807_open, .close = dw9807_close, }; static const struct v4l2_subdev_ops dw9807_ops = { }; static void dw9807_subdev_cleanup(struct dw9807_device *dw9807_dev) { v4l2_async_unregister_subdev(&dw9807_dev->sd); v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm); media_entity_cleanup(&dw9807_dev->sd.entity); } static int dw9807_init_controls(struct dw9807_device *dev_vcm) { struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; const struct v4l2_ctrl_ops *ops = &dw9807_vcm_ctrl_ops; struct i2c_client *client = v4l2_get_subdevdata(&dev_vcm->sd); v4l2_ctrl_handler_init(hdl, 1); v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE, 0, DW9807_MAX_FOCUS_POS, DW9807_FOCUS_STEPS, 0); dev_vcm->sd.ctrl_handler = hdl; if (hdl->error) { dev_err(&client->dev, "%s fail error: 0x%x\n", __func__, hdl->error); return hdl->error; } return 0; } static int dw9807_probe(struct i2c_client *client) { struct dw9807_device *dw9807_dev; int rval; dw9807_dev = devm_kzalloc(&client->dev, sizeof(*dw9807_dev), GFP_KERNEL); if (dw9807_dev == NULL) return -ENOMEM; v4l2_i2c_subdev_init(&dw9807_dev->sd, client, &dw9807_ops); dw9807_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dw9807_dev->sd.internal_ops = &dw9807_int_ops; rval = dw9807_init_controls(dw9807_dev); if (rval) goto err_cleanup; rval = media_entity_pads_init(&dw9807_dev->sd.entity, 0, NULL); if (rval < 0) goto err_cleanup; dw9807_dev->sd.entity.function = MEDIA_ENT_F_LENS; rval = v4l2_async_register_subdev(&dw9807_dev->sd); if (rval < 0) goto err_cleanup; pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; err_cleanup: v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm); media_entity_cleanup(&dw9807_dev->sd.entity); return rval; } static void dw9807_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd); pm_runtime_disable(&client->dev); dw9807_subdev_cleanup(dw9807_dev); } /* * This function sets the vcm position, so it consumes least current * The lens position is gradually moved in units of DW9807_CTRL_STEPS, * to make the movements smoothly. */ static int __maybe_unused dw9807_vcm_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd); const char tx_data[2] = { DW9807_CTL_ADDR, 0x01 }; int ret, val; for (val = dw9807_dev->current_val & ~(DW9807_CTRL_STEPS - 1); val >= 0; val -= DW9807_CTRL_STEPS) { ret = dw9807_set_dac(client, val); if (ret) dev_err_once(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10); } /* Power down */ ret = i2c_master_send(client, tx_data, sizeof(tx_data)); if (ret < 0) { dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret); return ret; } return 0; } /* * This function sets the vcm position to the value set by the user * through v4l2_ctrl_ops s_ctrl handler * The lens position is gradually moved in units of DW9807_CTRL_STEPS, * to make the movements smoothly. */ static int __maybe_unused dw9807_vcm_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd); const char tx_data[2] = { DW9807_CTL_ADDR, 0x00 }; int ret, val; /* Power on */ ret = i2c_master_send(client, tx_data, sizeof(tx_data)); if (ret < 0) { dev_err(&client->dev, "I2C write CTL fail ret = %d\n", ret); return ret; } for (val = dw9807_dev->current_val % DW9807_CTRL_STEPS; val < dw9807_dev->current_val + DW9807_CTRL_STEPS - 1; val += DW9807_CTRL_STEPS) { ret = dw9807_set_dac(client, val); if (ret) dev_err_ratelimited(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9807_CTRL_DELAY_US, DW9807_CTRL_DELAY_US + 10); } return 0; } static const struct of_device_id dw9807_of_table[] = { { .compatible = "dongwoon,dw9807-vcm" }, /* Compatibility for older firmware, NEVER USE THIS IN FIRMWARE! */ { .compatible = "dongwoon,dw9807" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, dw9807_of_table); static const struct dev_pm_ops dw9807_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume) SET_RUNTIME_PM_OPS(dw9807_vcm_suspend, dw9807_vcm_resume, NULL) }; static struct i2c_driver dw9807_i2c_driver = { .driver = { .name = "dw9807", .pm = &dw9807_pm_ops, .of_match_table = dw9807_of_table, }, .probe = dw9807_probe, .remove = dw9807_remove, }; module_i2c_driver(dw9807_i2c_driver); MODULE_AUTHOR("Chiang, Alan"); MODULE_DESCRIPTION("DW9807 VCM driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018 MediaTek Inc. * Author: Owen Chen <[email protected]> */ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include "clk-mtk.h" #include "clk-gate.h" #include <dt-bindings/clock/mt6765-clk.h> static const struct mtk_gate_regs venc_cg_regs = { .set_ofs = 0x4, .clr_ofs = 0x8, .sta_ofs = 0x0, }; #define GATE_VENC(_id, _name, _parent, _shift) \ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) static const struct mtk_gate venc_clks[] = { GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0), GATE_VENC(CLK_VENC_SET1_VENC, "venc_set1_venc", "mm_ck", 4), GATE_VENC(CLK_VENC_SET2_JPGENC, "jpgenc", "mm_ck", 8), GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12), }; static const struct mtk_clk_desc venc_desc = { .clks = venc_clks, .num_clks = ARRAY_SIZE(venc_clks), }; static const struct of_device_id of_match_clk_mt6765_vcodec[] = { { .compatible = "mediatek,mt6765-vcodecsys", .data = &venc_desc, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec); static struct platform_driver clk_mt6765_vcodec_drv = { .probe = mtk_clk_simple_probe, .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6765-vcodec", .of_match_table = of_match_clk_mt6765_vcodec, }, }; module_platform_driver(clk_mt6765_vcodec_drv); MODULE_DESCRIPTION("MediaTek MT6765 Video Codec clocks driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/node.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ /* start node id of a node block dedicated to the given node id */ #define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) /* node block offset on the NAT area dedicated to the given start node id */ #define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK) /* # of pages to perform synchronous readahead before building free nids */ #define FREE_NID_PAGES 8 #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) /* size of free nid batch when shrinking */ #define SHRINK_NID_BATCH_SIZE 8 #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ /* maximum readahead size for node during getting data blocks */ #define MAX_RA_NODE 128 /* control the memory footprint threshold (10MB per 1GB ram) */ #define DEF_RAM_THRESHOLD 1 /* control dirty nats ratio threshold (default: 10% over max nid count) */ #define DEF_DIRTY_NAT_RATIO_THRESHOLD 10 /* control total # of nats */ #define DEF_NAT_CACHE_THRESHOLD 100000 /* control total # of node writes used for roll-fowrad recovery */ #define DEF_RF_NODE_BLOCKS 0 /* vector size for gang look-up from nat cache that consists of radix tree */ #define NAT_VEC_SIZE 32 /* return value for read_node_page */ #define LOCKED_PAGE 1 /* check pinned file's alignment status of physical blocks */ #define FILE_NOT_ALIGNED 1 /* For flag in struct node_info */ enum { IS_CHECKPOINTED, /* is it checkpointed before? */ HAS_FSYNCED_INODE, /* is the inode fsynced before? */ HAS_LAST_FSYNC, /* has the latest node fsync mark? */ IS_DIRTY, /* this nat entry is dirty? */ IS_PREALLOC, /* nat entry is preallocated */ }; /* * For node information */ struct node_info { nid_t nid; /* node id */ nid_t ino; /* inode number of the node's owner */ block_t blk_addr; /* block address of the node */ unsigned char version; /* version of the node */ unsigned char flag; /* for node information bits */ }; struct nat_entry { struct list_head list; /* for clean or dirty nat list */ struct node_info ni; /* in-memory node information */ }; #define nat_get_nid(nat) ((nat)->ni.nid) #define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) #define nat_get_blkaddr(nat) ((nat)->ni.blk_addr) #define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b)) #define nat_get_ino(nat) ((nat)->ni.ino) #define nat_set_ino(nat, i) ((nat)->ni.ino = (i)) #define nat_get_version(nat) ((nat)->ni.version) #define nat_set_version(nat, v) ((nat)->ni.version = (v)) #define inc_node_version(version) (++(version)) static inline void copy_node_info(struct node_info *dst, struct node_info *src) { dst->nid = src->nid; dst->ino = src->ino; dst->blk_addr = src->blk_addr; dst->version = src->version; /* should not copy flag here */ } static inline void set_nat_flag(struct nat_entry *ne, unsigned int type, bool set) { if (set) ne->ni.flag |= BIT(type); else ne->ni.flag &= ~BIT(type); } static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) { return ne->ni.flag & BIT(type); } static inline void nat_reset_flag(struct nat_entry *ne) { /* these states can be set only after checkpoint was done */ set_nat_flag(ne, IS_CHECKPOINTED, true); set_nat_flag(ne, HAS_FSYNCED_INODE, false); set_nat_flag(ne, HAS_LAST_FSYNC, true); } static inline void node_info_from_raw_nat(struct node_info *ni, struct f2fs_nat_entry *raw_ne) { ni->ino = le32_to_cpu(raw_ne->ino); ni->blk_addr = le32_to_cpu(raw_ne->block_addr); ni->version = raw_ne->version; } static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne, struct node_info *ni) { raw_ne->ino = cpu_to_le32(ni->ino); raw_ne->block_addr = cpu_to_le32(ni->blk_addr); raw_ne->version = ni->version; } static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) { return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid * NM_I(sbi)->dirty_nats_ratio / 100; } static inline bool excess_cached_nats(struct f2fs_sb_info *sbi) { return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD; } enum mem_type { FREE_NIDS, /* indicates the free nid list */ NAT_ENTRIES, /* indicates the cached nat entry */ DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ READ_EXTENT_CACHE, /* indicates read extent cache */ AGE_EXTENT_CACHE, /* indicates age extent cache */ DISCARD_CACHE, /* indicates memory of cached discard cmds */ COMPRESS_PAGE, /* indicates memory of cached compressed pages */ BASE_CHECK, /* check kernel status */ }; struct nat_entry_set { struct list_head set_list; /* link with other nat sets */ struct list_head entry_list; /* link with dirty nat entries */ nid_t set; /* set number*/ unsigned int entry_cnt; /* the # of nat entries in set */ }; struct free_nid { struct list_head list; /* for free node id list */ nid_t nid; /* node id */ int state; /* in use or not: FREE_NID or PREALLOC_NID */ }; static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *fnid; spin_lock(&nm_i->nid_list_lock); if (nm_i->nid_cnt[FREE_NID] <= 0) { spin_unlock(&nm_i->nid_list_lock); return; } fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list); *nid = fnid->nid; spin_unlock(&nm_i->nid_list_lock); } /* * inline functions */ static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) { struct f2fs_nm_info *nm_i = NM_I(sbi); #ifdef CONFIG_F2FS_CHECK_FS if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir, nm_i->bitmap_size)) f2fs_bug_on(sbi, 1); #endif memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); } static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) { struct f2fs_nm_info *nm_i = NM_I(sbi); pgoff_t block_off; pgoff_t block_addr; /* * block_off = segment_off * 512 + off_in_segment * OLD = (segment_off * 512) * 2 + off_in_segment * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment */ block_off = NAT_BLOCK_OFFSET(start); block_addr = (pgoff_t)(nm_i->nat_blkaddr + (block_off << 1) - (block_off & (BLKS_PER_SEG(sbi) - 1))); if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) block_addr += BLKS_PER_SEG(sbi); return block_addr; } static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, pgoff_t block_addr) { struct f2fs_nm_info *nm_i = NM_I(sbi); block_addr -= nm_i->nat_blkaddr; block_addr ^= BIT(sbi->log_blocks_per_seg); return block_addr + nm_i->nat_blkaddr; } static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) { unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); f2fs_change_bit(block_off, nm_i->nat_bitmap); #ifdef CONFIG_F2FS_CHECK_FS f2fs_change_bit(block_off, nm_i->nat_bitmap_mir); #endif } static inline nid_t ino_of_node(struct page *node_page) { struct f2fs_node *rn = F2FS_NODE(node_page); return le32_to_cpu(rn->footer.ino); } static inline nid_t nid_of_node(struct page *node_page) { struct f2fs_node *rn = F2FS_NODE(node_page); return le32_to_cpu(rn->footer.nid); } static inline unsigned int ofs_of_node(struct page *node_page) { struct f2fs_node *rn = F2FS_NODE(node_page); unsigned flag = le32_to_cpu(rn->footer.flag); return flag >> OFFSET_BIT_SHIFT; } static inline __u64 cpver_of_node(struct page *node_page) { struct f2fs_node *rn = F2FS_NODE(node_page); return le64_to_cpu(rn->footer.cp_ver); } static inline block_t next_blkaddr_of_node(struct page *node_page) { struct f2fs_node *rn = F2FS_NODE(node_page); return le32_to_cpu(rn->footer.next_blkaddr); } static inline void fill_node_footer(struct page *page, nid_t nid, nid_t ino, unsigned int ofs, bool reset) { struct f2fs_node *rn = F2FS_NODE(page); unsigned int old_flag = 0; if (reset) memset(rn, 0, sizeof(*rn)); else old_flag = le32_to_cpu(rn->footer.flag); rn->footer.nid = cpu_to_le32(nid); rn->footer.ino = cpu_to_le32(ino); /* should remain old flag bits such as COLD_BIT_SHIFT */ rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | (old_flag & OFFSET_BIT_MASK)); } static inline void copy_node_footer(struct page *dst, struct page *src) { struct f2fs_node *src_rn = F2FS_NODE(src); struct f2fs_node *dst_rn = F2FS_NODE(dst); memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); } static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); struct f2fs_node *rn = F2FS_NODE(page); __u64 cp_ver = cur_cp_version(ckpt); if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) cp_ver |= (cur_cp_crc(ckpt) << 32); rn->footer.cp_ver = cpu_to_le64(cp_ver); rn->footer.next_blkaddr = cpu_to_le32(blkaddr); } static inline bool is_recoverable_dnode(struct page *page) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); __u64 cp_ver = cur_cp_version(ckpt); /* Don't care crc part, if fsck.f2fs sets it. */ if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG)) return (cp_ver << 32) == (cpver_of_node(page) << 32); if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) cp_ver |= (cur_cp_crc(ckpt) << 32); return cp_ver == cpver_of_node(page); } /* * f2fs assigns the following node offsets described as (num). * N = NIDS_PER_BLOCK * * Inode block (0) * |- direct node (1) * |- direct node (2) * |- indirect node (3) * | `- direct node (4 => 4 + N - 1) * |- indirect node (4 + N) * | `- direct node (5 + N => 5 + 2N - 1) * `- double indirect node (5 + 2N) * `- indirect node (6 + 2N) * `- direct node * ...... * `- indirect node ((6 + 2N) + x(N + 1)) * `- direct node * ...... * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) * `- direct node */ static inline bool IS_DNODE(struct page *node_page) { unsigned int ofs = ofs_of_node(node_page); if (f2fs_has_xattr_block(ofs)) return true; if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || ofs == 5 + 2 * NIDS_PER_BLOCK) return false; if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { ofs -= 6 + 2 * NIDS_PER_BLOCK; if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) return false; } return true; } static inline int set_nid(struct page *p, int off, nid_t nid, bool i) { struct f2fs_node *rn = F2FS_NODE(p); f2fs_wait_on_page_writeback(p, NODE, true, true); if (i) rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); else rn->in.nid[off] = cpu_to_le32(nid); return set_page_dirty(p); } static inline nid_t get_nid(struct page *p, int off, bool i) { struct f2fs_node *rn = F2FS_NODE(p); if (i) return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); return le32_to_cpu(rn->in.nid[off]); } /* * Coldness identification: * - Mark cold files in f2fs_inode_info * - Mark cold node blocks in their node footer * - Mark cold data pages in page cache */ static inline int is_node(struct page *page, int type) { struct f2fs_node *rn = F2FS_NODE(page); return le32_to_cpu(rn->footer.flag) & BIT(type); } #define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) #define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) #define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) static inline void set_cold_node(struct page *page, bool is_dir) { struct f2fs_node *rn = F2FS_NODE(page); unsigned int flag = le32_to_cpu(rn->footer.flag); if (is_dir) flag &= ~BIT(COLD_BIT_SHIFT); else flag |= BIT(COLD_BIT_SHIFT); rn->footer.flag = cpu_to_le32(flag); } static inline void set_mark(struct page *page, int mark, int type) { struct f2fs_node *rn = F2FS_NODE(page); unsigned int flag = le32_to_cpu(rn->footer.flag); if (mark) flag |= BIT(type); else flag &= ~BIT(type); rn->footer.flag = cpu_to_le32(flag); #ifdef CONFIG_F2FS_CHECK_FS f2fs_inode_chksum_set(F2FS_P_SB(page), page); #endif } #define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) #define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
/* SPDX-License-Identifier: GPL-2.0-only */ /* Generic definitions for Marvell Dove 88AP510 SoC */ #ifndef __ASM_ARCH_DOVE_H #define __ASM_ARCH_DOVE_H #include "irqs.h" /* * Marvell Dove address maps. * * phys virt size * c8000000 fdb00000 1M Cryptographic SRAM * e0000000 @runtime 128M PCIe-0 Memory space * e8000000 @runtime 128M PCIe-1 Memory space * f1000000 fec00000 1M on-chip south-bridge registers * f1800000 fe400000 8M on-chip north-bridge registers * f2000000 fee00000 1M PCIe-0 I/O space * f2100000 fef00000 1M PCIe-1 I/O space */ #define DOVE_CESA_PHYS_BASE 0xc8000000 #define DOVE_CESA_VIRT_BASE IOMEM(0xfdb00000) #define DOVE_CESA_SIZE SZ_1M #define DOVE_PCIE0_MEM_PHYS_BASE 0xe0000000 #define DOVE_PCIE0_MEM_SIZE SZ_128M #define DOVE_PCIE1_MEM_PHYS_BASE 0xe8000000 #define DOVE_PCIE1_MEM_SIZE SZ_128M #define DOVE_BOOTROM_PHYS_BASE 0xf8000000 #define DOVE_BOOTROM_SIZE SZ_128M #define DOVE_SCRATCHPAD_PHYS_BASE 0xf0000000 #define DOVE_SCRATCHPAD_VIRT_BASE IOMEM(0xfdd00000) #define DOVE_SCRATCHPAD_SIZE SZ_1M #define DOVE_SB_REGS_PHYS_BASE 0xf1000000 #define DOVE_SB_REGS_VIRT_BASE IOMEM(0xfec00000) #define DOVE_SB_REGS_SIZE SZ_1M #define DOVE_NB_REGS_PHYS_BASE 0xf1800000 #define DOVE_NB_REGS_VIRT_BASE IOMEM(0xfe400000) #define DOVE_NB_REGS_SIZE SZ_8M #define DOVE_PCIE0_IO_PHYS_BASE 0xf2000000 #define DOVE_PCIE0_IO_BUS_BASE 0x00000000 #define DOVE_PCIE0_IO_SIZE SZ_64K #define DOVE_PCIE1_IO_PHYS_BASE 0xf2100000 #define DOVE_PCIE1_IO_BUS_BASE 0x00010000 #define DOVE_PCIE1_IO_SIZE SZ_64K /* * Dove Core Registers Map */ /* SPI, I2C, UART */ #define DOVE_I2C_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x11000) #define DOVE_UART0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x12000) #define DOVE_UART0_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x12000) #define DOVE_UART1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x12100) #define DOVE_UART1_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x12100) #define DOVE_UART2_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x12200) #define DOVE_UART2_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x12200) #define DOVE_UART3_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x12300) #define DOVE_UART3_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x12300) #define DOVE_SPI0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x10600) #define DOVE_SPI1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x14600) /* North-South Bridge */ #define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x20000) #define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x20000) #define BRIDGE_WINS_BASE (BRIDGE_PHYS_BASE) #define BRIDGE_WINS_SZ (0x80) /* Cryptographic Engine */ #define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x30000) /* PCIe 0 */ #define DOVE_PCIE0_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x40000) /* USB */ #define DOVE_USB0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x50000) #define DOVE_USB1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x51000) /* XOR 0 Engine */ #define DOVE_XOR0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x60800) #define DOVE_XOR0_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x60800) #define DOVE_XOR0_HIGH_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x60A00) #define DOVE_XOR0_HIGH_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x60A00) /* XOR 1 Engine */ #define DOVE_XOR1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x60900) #define DOVE_XOR1_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x60900) #define DOVE_XOR1_HIGH_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x60B00) #define DOVE_XOR1_HIGH_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x60B00) /* Gigabit Ethernet */ #define DOVE_GE00_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x70000) /* PCIe 1 */ #define DOVE_PCIE1_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0x80000) /* CAFE */ #define DOVE_SDIO0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x92000) #define DOVE_SDIO1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x90000) #define DOVE_CAM_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x94000) #define DOVE_CAFE_WIN_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0x98000) /* SATA */ #define DOVE_SATA_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xa0000) /* I2S/SPDIF */ #define DOVE_AUD0_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xb0000) #define DOVE_AUD1_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xb4000) /* NAND Flash Controller */ #define DOVE_NFC_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xc0000) /* MPP, GPIO, Reset Sampling */ #define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0200) #define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10) #define DOVE_RESET_SAMPLE_LO (DOVE_MPP_VIRT_BASE + 0x014) #define DOVE_RESET_SAMPLE_HI (DOVE_MPP_VIRT_BASE + 0x018) #define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0400) #define DOVE_GPIO_HI_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0420) #define DOVE_GPIO2_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe8400) #define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe803c) #define DOVE_AU1_SPDIFO_GPIO_EN (1 << 1) #define DOVE_NAND_GPIO_EN (1 << 0) #define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40) #define DOVE_SPI_GPIO_SEL (1 << 5) #define DOVE_UART1_GPIO_SEL (1 << 4) #define DOVE_AU1_GPIO_SEL (1 << 3) #define DOVE_CAM_GPIO_SEL (1 << 2) #define DOVE_SD1_GPIO_SEL (1 << 1) #define DOVE_SD0_GPIO_SEL (1 << 0) /* Power Management */ #define DOVE_PMU_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0000) #define DOVE_PMU_SIG_CTRL (DOVE_PMU_VIRT_BASE + 0x802c) /* Real Time Clock */ #define DOVE_RTC_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xd8500) /* AC97 */ #define DOVE_AC97_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xe0000) #define DOVE_AC97_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe0000) /* Peripheral DMA */ #define DOVE_PDMA_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xe4000) #define DOVE_PDMA_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe4000) #define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C) #define DOVE_TWSI_ENABLE_OPTION1 (1 << 7) #define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE + 0xe8030) #define DOVE_TWSI_ENABLE_OPTION2 (1 << 20) #define DOVE_TWSI_ENABLE_OPTION3 (1 << 21) #define DOVE_TWSI_OPTION3_GPIO (1 << 22) #define DOVE_SSP_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE + 0xec000) #define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE + 0xe8034) #define DOVE_SSP_ON_AU1 (1 << 0) #define DOVE_SSP_CLOCK_ENABLE (1 << 1) #define DOVE_SSP_BPB_CLOCK_SRC_SSP (1 << 11) /* Memory Controller */ #define DOVE_MC_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x00000) #define DOVE_MC_WINS_BASE (DOVE_MC_PHYS_BASE + 0x100) #define DOVE_MC_WINS_SZ (0x8) #define DOVE_MC_VIRT_BASE (DOVE_NB_REGS_VIRT_BASE + 0x00000) /* LCD Controller */ #define DOVE_LCD_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x10000) #define DOVE_LCD1_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x20000) #define DOVE_LCD2_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x10000) #define DOVE_LCD_DCON_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x30000) /* Graphic Engine */ #define DOVE_GPU_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x40000) /* Video Engine */ #define DOVE_VPU_PHYS_BASE (DOVE_NB_REGS_PHYS_BASE + 0x400000) #endif
// SPDX-License-Identifier: GPL-2.0 // // Freescale imx6ul pinctrl driver // // Author: Anson Huang <[email protected]> // Copyright (C) 2015 Freescale Semiconductor, Inc. #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-imx.h" enum imx6ul_pads { MX6UL_PAD_RESERVE0 = 0, MX6UL_PAD_RESERVE1 = 1, MX6UL_PAD_RESERVE2 = 2, MX6UL_PAD_RESERVE3 = 3, MX6UL_PAD_RESERVE4 = 4, MX6UL_PAD_RESERVE5 = 5, MX6UL_PAD_RESERVE6 = 6, MX6UL_PAD_RESERVE7 = 7, MX6UL_PAD_RESERVE8 = 8, MX6UL_PAD_RESERVE9 = 9, MX6UL_PAD_RESERVE10 = 10, MX6UL_PAD_SNVS_TAMPER4 = 11, MX6UL_PAD_RESERVE12 = 12, MX6UL_PAD_RESERVE13 = 13, MX6UL_PAD_RESERVE14 = 14, MX6UL_PAD_RESERVE15 = 15, MX6UL_PAD_RESERVE16 = 16, MX6UL_PAD_JTAG_MOD = 17, MX6UL_PAD_JTAG_TMS = 18, MX6UL_PAD_JTAG_TDO = 19, MX6UL_PAD_JTAG_TDI = 20, MX6UL_PAD_JTAG_TCK = 21, MX6UL_PAD_JTAG_TRST_B = 22, MX6UL_PAD_GPIO1_IO00 = 23, MX6UL_PAD_GPIO1_IO01 = 24, MX6UL_PAD_GPIO1_IO02 = 25, MX6UL_PAD_GPIO1_IO03 = 26, MX6UL_PAD_GPIO1_IO04 = 27, MX6UL_PAD_GPIO1_IO05 = 28, MX6UL_PAD_GPIO1_IO06 = 29, MX6UL_PAD_GPIO1_IO07 = 30, MX6UL_PAD_GPIO1_IO08 = 31, MX6UL_PAD_GPIO1_IO09 = 32, MX6UL_PAD_UART1_TX_DATA = 33, MX6UL_PAD_UART1_RX_DATA = 34, MX6UL_PAD_UART1_CTS_B = 35, MX6UL_PAD_UART1_RTS_B = 36, MX6UL_PAD_UART2_TX_DATA = 37, MX6UL_PAD_UART2_RX_DATA = 38, MX6UL_PAD_UART2_CTS_B = 39, MX6UL_PAD_UART2_RTS_B = 40, MX6UL_PAD_UART3_TX_DATA = 41, MX6UL_PAD_UART3_RX_DATA = 42, MX6UL_PAD_UART3_CTS_B = 43, MX6UL_PAD_UART3_RTS_B = 44, MX6UL_PAD_UART4_TX_DATA = 45, MX6UL_PAD_UART4_RX_DATA = 46, MX6UL_PAD_UART5_TX_DATA = 47, MX6UL_PAD_UART5_RX_DATA = 48, MX6UL_PAD_ENET1_RX_DATA0 = 49, MX6UL_PAD_ENET1_RX_DATA1 = 50, MX6UL_PAD_ENET1_RX_EN = 51, MX6UL_PAD_ENET1_TX_DATA0 = 52, MX6UL_PAD_ENET1_TX_DATA1 = 53, MX6UL_PAD_ENET1_TX_EN = 54, MX6UL_PAD_ENET1_TX_CLK = 55, MX6UL_PAD_ENET1_RX_ER = 56, MX6UL_PAD_ENET2_RX_DATA0 = 57, MX6UL_PAD_ENET2_RX_DATA1 = 58, MX6UL_PAD_ENET2_RX_EN = 59, MX6UL_PAD_ENET2_TX_DATA0 = 60, MX6UL_PAD_ENET2_TX_DATA1 = 61, MX6UL_PAD_ENET2_TX_EN = 62, MX6UL_PAD_ENET2_TX_CLK = 63, MX6UL_PAD_ENET2_RX_ER = 64, MX6UL_PAD_LCD_CLK = 65, MX6UL_PAD_LCD_ENABLE = 66, MX6UL_PAD_LCD_HSYNC = 67, MX6UL_PAD_LCD_VSYNC = 68, MX6UL_PAD_LCD_RESET = 69, MX6UL_PAD_LCD_DATA00 = 70, MX6UL_PAD_LCD_DATA01 = 71, MX6UL_PAD_LCD_DATA02 = 72, MX6UL_PAD_LCD_DATA03 = 73, MX6UL_PAD_LCD_DATA04 = 74, MX6UL_PAD_LCD_DATA05 = 75, MX6UL_PAD_LCD_DATA06 = 76, MX6UL_PAD_LCD_DATA07 = 77, MX6UL_PAD_LCD_DATA08 = 78, MX6UL_PAD_LCD_DATA09 = 79, MX6UL_PAD_LCD_DATA10 = 80, MX6UL_PAD_LCD_DATA11 = 81, MX6UL_PAD_LCD_DATA12 = 82, MX6UL_PAD_LCD_DATA13 = 83, MX6UL_PAD_LCD_DATA14 = 84, MX6UL_PAD_LCD_DATA15 = 85, MX6UL_PAD_LCD_DATA16 = 86, MX6UL_PAD_LCD_DATA17 = 87, MX6UL_PAD_LCD_DATA18 = 88, MX6UL_PAD_LCD_DATA19 = 89, MX6UL_PAD_LCD_DATA20 = 90, MX6UL_PAD_LCD_DATA21 = 91, MX6UL_PAD_LCD_DATA22 = 92, MX6UL_PAD_LCD_DATA23 = 93, MX6UL_PAD_NAND_RE_B = 94, MX6UL_PAD_NAND_WE_B = 95, MX6UL_PAD_NAND_DATA00 = 96, MX6UL_PAD_NAND_DATA01 = 97, MX6UL_PAD_NAND_DATA02 = 98, MX6UL_PAD_NAND_DATA03 = 99, MX6UL_PAD_NAND_DATA04 = 100, MX6UL_PAD_NAND_DATA05 = 101, MX6UL_PAD_NAND_DATA06 = 102, MX6UL_PAD_NAND_DATA07 = 103, MX6UL_PAD_NAND_ALE = 104, MX6UL_PAD_NAND_WP_B = 105, MX6UL_PAD_NAND_READY_B = 106, MX6UL_PAD_NAND_CE0_B = 107, MX6UL_PAD_NAND_CE1_B = 108, MX6UL_PAD_NAND_CLE = 109, MX6UL_PAD_NAND_DQS = 110, MX6UL_PAD_SD1_CMD = 111, MX6UL_PAD_SD1_CLK = 112, MX6UL_PAD_SD1_DATA0 = 113, MX6UL_PAD_SD1_DATA1 = 114, MX6UL_PAD_SD1_DATA2 = 115, MX6UL_PAD_SD1_DATA3 = 116, MX6UL_PAD_CSI_MCLK = 117, MX6UL_PAD_CSI_PIXCLK = 118, MX6UL_PAD_CSI_VSYNC = 119, MX6UL_PAD_CSI_HSYNC = 120, MX6UL_PAD_CSI_DATA00 = 121, MX6UL_PAD_CSI_DATA01 = 122, MX6UL_PAD_CSI_DATA02 = 123, MX6UL_PAD_CSI_DATA03 = 124, MX6UL_PAD_CSI_DATA04 = 125, MX6UL_PAD_CSI_DATA05 = 126, MX6UL_PAD_CSI_DATA06 = 127, MX6UL_PAD_CSI_DATA07 = 128, }; enum imx6ull_lpsr_pads { MX6ULL_PAD_BOOT_MODE0 = 0, MX6ULL_PAD_BOOT_MODE1 = 1, MX6ULL_PAD_SNVS_TAMPER0 = 2, MX6ULL_PAD_SNVS_TAMPER1 = 3, MX6ULL_PAD_SNVS_TAMPER2 = 4, MX6ULL_PAD_SNVS_TAMPER3 = 5, MX6ULL_PAD_SNVS_TAMPER4 = 6, MX6ULL_PAD_SNVS_TAMPER5 = 7, MX6ULL_PAD_SNVS_TAMPER6 = 8, MX6ULL_PAD_SNVS_TAMPER7 = 9, MX6ULL_PAD_SNVS_TAMPER8 = 10, MX6ULL_PAD_SNVS_TAMPER9 = 11, }; /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10), IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15), IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK), IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08), IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B), IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22), IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE), IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2), IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06), IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07), }; /* pad for i.MX6ULL lpsr pinmux */ static const struct pinctrl_pin_desc imx6ull_snvs_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX6ULL_PAD_BOOT_MODE0), IMX_PINCTRL_PIN(MX6ULL_PAD_BOOT_MODE1), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER0), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER1), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER2), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER3), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER4), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER5), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER6), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER7), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER8), IMX_PINCTRL_PIN(MX6ULL_PAD_SNVS_TAMPER9), }; static const struct imx_pinctrl_soc_info imx6ul_pinctrl_info = { .pins = imx6ul_pinctrl_pads, .npins = ARRAY_SIZE(imx6ul_pinctrl_pads), .gpr_compatible = "fsl,imx6ul-iomuxc-gpr", }; static const struct imx_pinctrl_soc_info imx6ull_snvs_pinctrl_info = { .pins = imx6ull_snvs_pinctrl_pads, .npins = ARRAY_SIZE(imx6ull_snvs_pinctrl_pads), .flags = ZERO_OFFSET_VALID, }; static const struct of_device_id imx6ul_pinctrl_of_match[] = { { .compatible = "fsl,imx6ul-iomuxc", .data = &imx6ul_pinctrl_info, }, { .compatible = "fsl,imx6ull-iomuxc-snvs", .data = &imx6ull_snvs_pinctrl_info, }, { /* sentinel */ } }; static int imx6ul_pinctrl_probe(struct platform_device *pdev) { const struct imx_pinctrl_soc_info *pinctrl_info; pinctrl_info = of_device_get_match_data(&pdev->dev); if (!pinctrl_info) return -ENODEV; return imx_pinctrl_probe(pdev, pinctrl_info); } static struct platform_driver imx6ul_pinctrl_driver = { .driver = { .name = "imx6ul-pinctrl", .of_match_table = imx6ul_pinctrl_of_match, .suppress_bind_attrs = true, }, .probe = imx6ul_pinctrl_probe, }; static int __init imx6ul_pinctrl_init(void) { return platform_driver_register(&imx6ul_pinctrl_driver); } arch_initcall(imx6ul_pinctrl_init);
// SPDX-License-Identifier: GPL-2.0 #include <linux/ftrace.h> #include <linux/tracepoint.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rv.h> #include <rv/instrumentation.h> #include <rv/da_monitor.h> #define MODULE_NAME "MODEL_NAME" /* * XXX: include required tracepoint headers, e.g., * #include <linux/trace/events/sched.h> */ #include <trace/events/rv.h> /* * This is the self-generated part of the monitor. Generally, there is no need * to touch this section. */ #include "MODEL_NAME.h" /* * Declare the deterministic automata monitor. * * The rv monitor reference is needed for the monitor declaration. */ static struct rv_monitor rv_MODEL_NAME; DECLARE_DA_MON_PER_TASK(MODEL_NAME, MIN_TYPE); /* * This is the instrumentation part of the monitor. * * This is the section where manual work is required. Here the kernel events * are translated into model's event. * */ TRACEPOINT_HANDLERS_SKEL static int enable_MODEL_NAME(void) { int retval; retval = da_monitor_init_MODEL_NAME(); if (retval) return retval; TRACEPOINT_ATTACH return 0; } static void disable_MODEL_NAME(void) { rv_MODEL_NAME.enabled = 0; TRACEPOINT_DETACH da_monitor_destroy_MODEL_NAME(); } /* * This is the monitor register section. */ static struct rv_monitor rv_MODEL_NAME = { .name = "MODEL_NAME", .description = "auto-generated MODEL_NAME", .enable = enable_MODEL_NAME, .disable = disable_MODEL_NAME, .reset = da_monitor_reset_all_MODEL_NAME, .enabled = 0, }; static int __init register_MODEL_NAME(void) { rv_register_monitor(&rv_MODEL_NAME); return 0; } static void __exit unregister_MODEL_NAME(void) { rv_unregister_monitor(&rv_MODEL_NAME); } module_init(register_MODEL_NAME); module_exit(unregister_MODEL_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("dot2k: auto-generated"); MODULE_DESCRIPTION("MODEL_NAME");
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Namjae Jeon <[email protected]> * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #include "glob.h" #include "nterr.h" #include "smb_common.h" #include "../common/smb2status.h" #include "mgmt/user_session.h" #include "connection.h" static int check_smb2_hdr(struct smb2_hdr *hdr) { /* * Make sure that this really is an SMB, that it is a response. */ if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) return 1; return 0; } /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order */ static const __le16 smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ cpu_to_le16(36), /* SMB2_SESSION_SETUP */ cpu_to_le16(25), /* SMB2_LOGOFF */ cpu_to_le16(4), /* SMB2_TREE_CONNECT */ cpu_to_le16(9), /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4), /* SMB2_CREATE */ cpu_to_le16(57), /* SMB2_CLOSE */ cpu_to_le16(24), /* SMB2_FLUSH */ cpu_to_le16(24), /* SMB2_READ */ cpu_to_le16(49), /* SMB2_WRITE */ cpu_to_le16(49), /* SMB2_LOCK */ cpu_to_le16(48), /* SMB2_IOCTL */ cpu_to_le16(57), /* SMB2_CANCEL */ cpu_to_le16(4), /* SMB2_ECHO */ cpu_to_le16(4), /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(33), /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(32), /* SMB2_QUERY_INFO */ cpu_to_le16(41), /* SMB2_SET_INFO */ cpu_to_le16(33), /* use 44 for lease break */ /* SMB2_OPLOCK_BREAK */ cpu_to_le16(36) }; /* * The size of the variable area depends on the offset and length fields * located in different fields for various SMB2 requests. SMB2 requests * with no variable length info, show an offset of zero for the offset field. */ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ true, /* SMB2_SESSION_SETUP */ true, /* SMB2_LOGOFF */ false, /* SMB2_TREE_CONNECT */ true, /* SMB2_TREE_DISCONNECT */ false, /* SMB2_CREATE */ true, /* SMB2_CLOSE */ false, /* SMB2_FLUSH */ false, /* SMB2_READ */ true, /* SMB2_WRITE */ true, /* SMB2_LOCK */ true, /* SMB2_IOCTL */ true, /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */ /* SMB2_ECHO */ false, /* SMB2_QUERY_DIRECTORY */ true, /* SMB2_CHANGE_NOTIFY */ false, /* SMB2_QUERY_INFO */ true, /* SMB2_SET_INFO */ true, /* SMB2_OPLOCK_BREAK */ false }; /* * Set length of the data area and the offset to arguments. * if they are invalid, return error. */ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, struct smb2_hdr *hdr) { int ret = 0; *off = 0; *len = 0; /* * Following commands have data areas so we have to get the location * of the data buffer offset and data buffer length for the particular * command. */ switch (hdr->Command) { case SMB2_SESSION_SETUP: *off = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferOffset); *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength); break; case SMB2_TREE_CONNECT: *off = max_t(unsigned short int, le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset), offsetof(struct smb2_tree_connect_req, Buffer)); *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength); break; case SMB2_CREATE: { unsigned short int name_off = max_t(unsigned short int, le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset), offsetof(struct smb2_create_req, Buffer)); unsigned short int name_len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength); if (((struct smb2_create_req *)hdr)->CreateContextsLength) { *off = le32_to_cpu(((struct smb2_create_req *) hdr)->CreateContextsOffset); *len = le32_to_cpu(((struct smb2_create_req *) hdr)->CreateContextsLength); if (!name_len) break; if (name_off + name_len < (u64)*off + *len) break; } *off = name_off; *len = name_len; break; } case SMB2_QUERY_INFO: *off = max_t(unsigned int, le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset), offsetof(struct smb2_query_info_req, Buffer)); *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength); break; case SMB2_SET_INFO: *off = max_t(unsigned int, le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset), offsetof(struct smb2_set_info_req, Buffer)); *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength); break; case SMB2_READ: *off = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoOffset); *len = le16_to_cpu(((struct smb2_read_req *)hdr)->ReadChannelInfoLength); break; case SMB2_WRITE: if (((struct smb2_write_req *)hdr)->DataOffset || ((struct smb2_write_req *)hdr)->Length) { *off = max_t(unsigned short int, le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset), offsetof(struct smb2_write_req, Buffer)); *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length); break; } *off = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoOffset); *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength); break; case SMB2_QUERY_DIRECTORY: *off = max_t(unsigned short int, le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset), offsetof(struct smb2_query_directory_req, Buffer)); *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength); break; case SMB2_LOCK: { unsigned short lock_count; lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount); if (lock_count > 0) { *off = offsetof(struct smb2_lock_req, locks); *len = sizeof(struct smb2_lock_element) * lock_count; } break; } case SMB2_IOCTL: *off = max_t(unsigned int, le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset), offsetof(struct smb2_ioctl_req, Buffer)); *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount); break; default: ksmbd_debug(SMB, "no length check for command\n"); break; } if (*off > 4096) { ksmbd_debug(SMB, "offset %d too large\n", *off); ret = -EINVAL; } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) { ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n", MAX_STREAM_PROT_LEN, (u64)*off + *len); ret = -EINVAL; } return ret; } /* * Calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message. */ static int smb2_calc_size(void *buf, unsigned int *len) { struct smb2_pdu *pdu = (struct smb2_pdu *)buf; struct smb2_hdr *hdr = &pdu->hdr; unsigned int offset; /* the offset from the beginning of SMB to data area */ unsigned int data_length; /* the length of the variable length data area */ int ret; /* Structure Size has already been checked to make sure it is 64 */ *len = le16_to_cpu(hdr->StructureSize); /* * StructureSize2, ie length of fixed parameter area has already * been checked to make sure it is the correct length. */ *len += le16_to_cpu(pdu->StructureSize2); /* * StructureSize2 of smb2_lock pdu is set to 48, indicating * the size of smb2 lock request with single smb2_lock_element * regardless of number of locks. Subtract single * smb2_lock_element for correct buffer size check. */ if (hdr->Command == SMB2_LOCK) *len -= sizeof(struct smb2_lock_element); if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false) goto calc_size_exit; ret = smb2_get_data_area_len(&offset, &data_length, hdr); if (ret) return ret; ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length, offset); if (data_length > 0) { /* * Check to make sure that data area begins after fixed area, * Note that last byte of the fixed area is part of data area * for some commands, typically those with odd StructureSize, * so we must add one to the calculation. */ if (offset + 1 < *len) { ksmbd_debug(SMB, "data area offset %d overlaps SMB2 header %u\n", offset + 1, *len); return -EINVAL; } *len = offset + data_length; } calc_size_exit: ksmbd_debug(SMB, "SMB2 len %u\n", *len); return 0; } static inline int smb2_query_info_req_len(struct smb2_query_info_req *h) { return le32_to_cpu(h->InputBufferLength) + le32_to_cpu(h->OutputBufferLength); } static inline int smb2_set_info_req_len(struct smb2_set_info_req *h) { return le32_to_cpu(h->BufferLength); } static inline int smb2_read_req_len(struct smb2_read_req *h) { return le32_to_cpu(h->Length); } static inline int smb2_write_req_len(struct smb2_write_req *h) { return le32_to_cpu(h->Length); } static inline int smb2_query_dir_req_len(struct smb2_query_directory_req *h) { return le32_to_cpu(h->OutputBufferLength); } static inline int smb2_ioctl_req_len(struct smb2_ioctl_req *h) { return le32_to_cpu(h->InputCount) + le32_to_cpu(h->OutputCount); } static inline int smb2_ioctl_resp_len(struct smb2_ioctl_req *h) { return le32_to_cpu(h->MaxInputResponse) + le32_to_cpu(h->MaxOutputResponse); } static int smb2_validate_credit_charge(struct ksmbd_conn *conn, struct smb2_hdr *hdr) { unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len; unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge); void *__hdr = hdr; int ret = 0; switch (hdr->Command) { case SMB2_QUERY_INFO: req_len = smb2_query_info_req_len(__hdr); break; case SMB2_SET_INFO: req_len = smb2_set_info_req_len(__hdr); break; case SMB2_READ: req_len = smb2_read_req_len(__hdr); break; case SMB2_WRITE: req_len = smb2_write_req_len(__hdr); break; case SMB2_QUERY_DIRECTORY: req_len = smb2_query_dir_req_len(__hdr); break; case SMB2_IOCTL: req_len = smb2_ioctl_req_len(__hdr); expect_resp_len = smb2_ioctl_resp_len(__hdr); break; case SMB2_CANCEL: return 0; default: req_len = 1; break; } credit_charge = max_t(unsigned short, credit_charge, 1); max_len = max_t(unsigned int, req_len, expect_resp_len); calc_credit_num = DIV_ROUND_UP(max_len, SMB2_MAX_BUFFER_SIZE); if (credit_charge < calc_credit_num) { ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n", credit_charge, calc_credit_num); return 1; } else if (credit_charge > conn->vals->max_credits) { ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge); return 1; } spin_lock(&conn->credits_lock); if (credit_charge > conn->total_credits) { ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n", credit_charge, conn->total_credits); ret = 1; } if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) { ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n", credit_charge, conn->outstanding_credits); ret = 1; } else conn->outstanding_credits += credit_charge; spin_unlock(&conn->credits_lock); return ret; } int ksmbd_smb2_check_message(struct ksmbd_work *work) { struct smb2_pdu *pdu = ksmbd_req_buf_next(work); struct smb2_hdr *hdr = &pdu->hdr; int command; __u32 clc_len; /* calculated length */ __u32 len = get_rfc1002_len(work->request_buf); __u32 req_struct_size, next_cmd = le32_to_cpu(hdr->NextCommand); if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) { pr_err("next command(%u) offset exceeds smb msg size\n", next_cmd); return 1; } if (next_cmd > 0) len = next_cmd; else if (work->next_smb2_rcv_hdr_off) len -= work->next_smb2_rcv_hdr_off; if (check_smb2_hdr(hdr)) return 1; if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { ksmbd_debug(SMB, "Illegal structure size %u\n", le16_to_cpu(hdr->StructureSize)); return 1; } command = le16_to_cpu(hdr->Command); if (command >= NUMBER_OF_SMB2_COMMANDS) { ksmbd_debug(SMB, "Illegal SMB2 command %d\n", command); return 1; } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { if (!(command == SMB2_OPLOCK_BREAK_HE && (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, "Illegal request size %u for command %d\n", le16_to_cpu(pdu->StructureSize2), command); return 1; } } req_struct_size = le16_to_cpu(pdu->StructureSize2) + __SMB2_HEADER_STRUCTURE_SIZE; if (command == SMB2_LOCK_HE) req_struct_size -= sizeof(struct smb2_lock_element); if (req_struct_size > len + 1) return 1; if (smb2_calc_size(hdr, &clc_len)) return 1; if (len != clc_len) { /* client can return one byte more due to implied bcc[0] */ if (clc_len == len + 1) goto validate_credit; /* * Some windows servers (win2016) will pad also the final * PDU in a compound to 8 bytes. */ if (ALIGN(clc_len, 8) == len) goto validate_credit; /* * SMB2 NEGOTIATE request will be validated when message * handling proceeds. */ if (command == SMB2_NEGOTIATE_HE) goto validate_credit; /* * Allow a message that padded to 8byte boundary. * Linux 4.19.217 with smb 3.0.2 are sometimes * sending messages where the cls_len is exactly * 8 bytes less than len. */ if (clc_len < len && (len - clc_len) <= 8) goto validate_credit; pr_err_ratelimited( "cli req too short, len %d not %d. cmd:%d mid:%llu\n", len, clc_len, command, le64_to_cpu(hdr->MessageId)); return 1; } validate_credit: if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) && smb2_validate_credit_charge(work->conn, hdr)) return 1; return 0; } int smb2_negotiate_request(struct ksmbd_work *work) { return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_HE); }
// SPDX-License-Identifier: GPL-2.0 /* * ZynqMP Generic PM domain support * * Copyright (C) 2015-2019 Xilinx, Inc. * * Davorin Mista <[email protected]> * Jolly Shah <[email protected]> * Rajan Vaja <[email protected]> */ #include <linux/err.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/firmware/xlnx-zynqmp.h> #define ZYNQMP_NUM_DOMAINS (100) static int min_capability; /** * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain * @gpd: Generic power domain * @node_id: PM node ID corresponding to device inside PM domain * @requested: The PM node mapped to the PM domain has been requested */ struct zynqmp_pm_domain { struct generic_pm_domain gpd; u32 node_id; bool requested; }; #define to_zynqmp_pm_domain(pm_domain) \ container_of(pm_domain, struct zynqmp_pm_domain, gpd) /** * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source * path * @dev: Device to check for wakeup source path * @not_used: Data member (not required) * * This function is checks device's child hierarchy and checks if any device is * set as wakeup source. * * Return: 1 if device is in wakeup source path else 0 */ static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used) { int may_wakeup; may_wakeup = device_may_wakeup(dev); if (may_wakeup) return may_wakeup; return device_for_each_child(dev, NULL, zynqmp_gpd_is_active_wakeup_path); } /** * zynqmp_gpd_power_on() - Power on PM domain * @domain: Generic PM domain * * This function is called before devices inside a PM domain are resumed, to * power on PM domain. * * Return: 0 on success, error code otherwise */ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) { struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; ret = zynqmp_pm_set_requirement(pd->node_id, ZYNQMP_PM_CAPABILITY_ACCESS, ZYNQMP_PM_MAX_QOS, ZYNQMP_PM_REQUEST_ACK_BLOCKING); if (ret) { dev_err(&domain->dev, "failed to set requirement to 0x%x for PM node id %d: %d\n", ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret); return ret; } dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id); return 0; } /** * zynqmp_gpd_power_off() - Power off PM domain * @domain: Generic PM domain * * This function is called after devices inside a PM domain are suspended, to * power off PM domain. * * Return: 0 on success, error code otherwise */ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) { struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; struct pm_domain_data *pdd, *tmp; u32 capabilities = min_capability; bool may_wakeup; /* If domain is already released there is nothing to be done */ if (!pd->requested) { dev_dbg(&domain->dev, "PM node id %d is already released\n", pd->node_id); return 0; } list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) { /* If device is in wakeup path, set capability to WAKEUP */ may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL); if (may_wakeup) { dev_dbg(pdd->dev, "device is in wakeup path in %s\n", domain->name); capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP; break; } } ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0, ZYNQMP_PM_REQUEST_ACK_NO); if (ret) { dev_err(&domain->dev, "failed to set requirement to 0x%x for PM node id %d: %d\n", capabilities, pd->node_id, ret); return ret; } dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", capabilities, pd->node_id); return 0; } /** * zynqmp_gpd_attach_dev() - Attach device to the PM domain * @domain: Generic PM domain * @dev: Device to attach * * Return: 0 on success, error code otherwise */ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, struct device *dev) { struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); struct device_link *link; int ret; link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY); if (!link) dev_dbg(&domain->dev, "failed to create device link for %s\n", dev_name(dev)); /* If this is not the first device to attach there is nothing to do */ if (domain->device_count) return 0; ret = zynqmp_pm_request_node(pd->node_id, 0, 0, ZYNQMP_PM_REQUEST_ACK_BLOCKING); if (ret) { dev_err(&domain->dev, "%s request failed for node %d: %d\n", domain->name, pd->node_id, ret); return ret; } pd->requested = true; dev_dbg(&domain->dev, "%s requested PM node id %d\n", dev_name(dev), pd->node_id); return 0; } /** * zynqmp_gpd_detach_dev() - Detach device from the PM domain * @domain: Generic PM domain * @dev: Device to detach */ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, struct device *dev) { struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; /* If this is not the last device to detach there is nothing to do */ if (domain->device_count) return; ret = zynqmp_pm_release_node(pd->node_id); if (ret) { dev_err(&domain->dev, "failed to release PM node id %d: %d\n", pd->node_id, ret); return; } pd->requested = false; dev_dbg(&domain->dev, "%s released PM node id %d\n", dev_name(dev), pd->node_id); } static struct generic_pm_domain *zynqmp_gpd_xlate (const struct of_phandle_args *genpdspec, void *data) { struct genpd_onecell_data *genpd_data = data; unsigned int i, idx = genpdspec->args[0]; struct zynqmp_pm_domain *pd; pd = to_zynqmp_pm_domain(genpd_data->domains[0]); if (genpdspec->args_count != 1) return ERR_PTR(-EINVAL); /* Check for existing pm domains */ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { if (pd[i].node_id == idx) goto done; } /* * Add index in empty node_id of power domain list as no existing * power domain found for current index. */ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { if (pd[i].node_id == 0) { pd[i].node_id = idx; break; } } done: if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS) return ERR_PTR(-ENOENT); return genpd_data->domains[i]; } static int zynqmp_gpd_probe(struct platform_device *pdev) { int i; struct genpd_onecell_data *zynqmp_pd_data; struct generic_pm_domain **domains; struct zynqmp_pm_domain *pd; struct device *dev = &pdev->dev; pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL); if (!zynqmp_pd_data) return -ENOMEM; zynqmp_pd_data->xlate = zynqmp_gpd_xlate; domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains), GFP_KERNEL); if (!domains) return -ENOMEM; if (!of_device_is_compatible(dev->parent->of_node, "xlnx,zynqmp-firmware")) min_capability = ZYNQMP_PM_CAPABILITY_UNUSABLE; for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) { pd->node_id = 0; pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i); pd->gpd.power_off = zynqmp_gpd_power_off; pd->gpd.power_on = zynqmp_gpd_power_on; pd->gpd.attach_dev = zynqmp_gpd_attach_dev; pd->gpd.detach_dev = zynqmp_gpd_detach_dev; domains[i] = &pd->gpd; /* Mark all PM domains as initially powered off */ pm_genpd_init(&pd->gpd, NULL, true); } zynqmp_pd_data->domains = domains; zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS; of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data); return 0; } static void zynqmp_gpd_remove(struct platform_device *pdev) { of_genpd_del_provider(pdev->dev.parent->of_node); } static void zynqmp_gpd_sync_state(struct device *dev) { int ret; ret = zynqmp_pm_init_finalize(); if (ret) dev_warn(dev, "failed to release power management to firmware\n"); } static struct platform_driver zynqmp_power_domain_driver = { .driver = { .name = "zynqmp_power_controller", .sync_state = zynqmp_gpd_sync_state, }, .probe = zynqmp_gpd_probe, .remove = zynqmp_gpd_remove, }; module_platform_driver(zynqmp_power_domain_driver); MODULE_ALIAS("platform:zynqmp_power_controller");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ /dts-v1/; #include "omap4460.dtsi" #include "omap4-panda-common.dtsi" / { model = "TI OMAP4 PandaBoard-ES"; compatible = "ti,omap4-panda-es", "ti,omap4-panda", "ti,omap4460", "ti,omap4430", "ti,omap4"; }; /* Audio routing is differnet between PandaBoard4430 and PandaBoardES */ &sound { ti,model = "PandaBoardES"; /* Audio routing */ ti,audio-routing = "Headset Stereophone", "HSOL", "Headset Stereophone", "HSOR", "Ext Spk", "HFL", "Ext Spk", "HFR", "Line Out", "AUXL", "Line Out", "AUXR", "AFML", "Line In", "AFMR", "Line In"; }; /* PandaboardES has external pullups on SCL & SDA */ &dss_hdmi_pins { pinctrl-single,pins = < OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */ OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ >; }; &omap4_pmx_core { led_gpio_pins: gpio-led-pmx-pins { pinctrl-single,pins = < OMAP4_IOPAD(0x0f6, PIN_OUTPUT | MUX_MODE3) /* gpio_110 */ >; }; button_pins: button-pins { pinctrl-single,pins = < OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */ >; }; bt_pins: bt-pins { pinctrl-single,pins = < OMAP4_IOPAD(0x06c, PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 - BTEN */ OMAP4_IOPAD(0x072, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 - BTWAKEUP */ >; }; uart2_pins: uart2-pins { pinctrl-single,pins = < OMAP4_IOPAD(0x118, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts - HCI */ OMAP4_IOPAD(0x11a, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ OMAP4_IOPAD(0x11c, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_rx.uart2_rx */ OMAP4_IOPAD(0x11e, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ >; }; }; &led_wkgpio_pins { pinctrl-single,pins = < OMAP4_IOPAD(0x05c, PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */ >; }; &leds { pinctrl-0 = < &led_gpio_pins &led_wkgpio_pins >; led-heartbeat { gpios = <&gpio4 14 GPIO_ACTIVE_HIGH>; }; led-mmc { gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>; }; }; &gpio_keys { buttonS2 { gpios = <&gpio4 17 GPIO_ACTIVE_LOW>; /* gpio_113 */ }; }; &gpio1_target { ti,no-reset-on-init; }; &wl12xx_gpio { pinctrl-single,pins = < OMAP4_IOPAD(0x066, PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */ OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */ >; }; &uart2 { pinctrl-names = "default"; pinctrl-0 = <&uart2_pins &bt_pins>; bluetooth: tiwi { compatible = "ti,wl1271-st"; enable-gpios = <&gpio2 14 GPIO_ACTIVE_HIGH>; /* GPIO_46 */ }; };
// SPDX-License-Identifier: GPL-2.0-only /* * lib/debug_locks.c * * Generic place for common debugging facilities for various locks: * spinlocks, rwlocks, mutexes and rwsems. * * Started by Ingo Molnar: * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <[email protected]> */ #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/debug_locks.h> /* * We want to turn all lock-debugging facilities on/off at once, * via a global flag. The reason is that once a single bug has been * detected and reported, there might be cascade of followup bugs * that would just muddy the log. So we report the first one and * shut up after that. */ int debug_locks __read_mostly = 1; EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses <debug_locks_silent> to get a * 'silent failure': nothing is printed to the console when * a locking bug is detected. */ int debug_locks_silent __read_mostly; EXPORT_SYMBOL_GPL(debug_locks_silent); /* * Generic 'turn off all lock debugging' function: */ int debug_locks_off(void) { if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { console_verbose(); return 1; } } return 0; } EXPORT_SYMBOL_GPL(debug_locks_off);
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015-2021, Linaro Limited */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/arm-smccc.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/tee_core.h> #include "optee_private.h" struct notif_entry { struct list_head link; struct completion c; u_int key; }; static bool have_key(struct optee *optee, u_int key) { struct notif_entry *entry; list_for_each_entry(entry, &optee->notif.db, link) if (entry->key == key) return true; return false; } int optee_notif_wait(struct optee *optee, u_int key, u32 timeout) { unsigned long flags; struct notif_entry *entry; int rc = 0; if (key > optee->notif.max_key) return -EINVAL; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; init_completion(&entry->c); entry->key = key; spin_lock_irqsave(&optee->notif.lock, flags); /* * If the bit is already set it means that the key has already * been posted and we must not wait. */ if (test_bit(key, optee->notif.bitmap)) { clear_bit(key, optee->notif.bitmap); goto out; } /* * Check if someone is already waiting for this key. If there is * it's a programming error. */ if (have_key(optee, key)) { rc = -EBUSY; goto out; } list_add_tail(&entry->link, &optee->notif.db); /* * Unlock temporarily and wait for completion. */ spin_unlock_irqrestore(&optee->notif.lock, flags); if (timeout != 0) { if (!wait_for_completion_timeout(&entry->c, timeout)) rc = -ETIMEDOUT; } else { wait_for_completion(&entry->c); } spin_lock_irqsave(&optee->notif.lock, flags); list_del(&entry->link); out: spin_unlock_irqrestore(&optee->notif.lock, flags); kfree(entry); return rc; } int optee_notif_send(struct optee *optee, u_int key) { unsigned long flags; struct notif_entry *entry; if (key > optee->notif.max_key) return -EINVAL; spin_lock_irqsave(&optee->notif.lock, flags); list_for_each_entry(entry, &optee->notif.db, link) if (entry->key == key) { complete(&entry->c); goto out; } /* Only set the bit in case there where nobody waiting */ set_bit(key, optee->notif.bitmap); out: spin_unlock_irqrestore(&optee->notif.lock, flags); return 0; } int optee_notif_init(struct optee *optee, u_int max_key) { spin_lock_init(&optee->notif.lock); INIT_LIST_HEAD(&optee->notif.db); optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL); if (!optee->notif.bitmap) return -ENOMEM; optee->notif.max_key = max_key; return 0; } void optee_notif_uninit(struct optee *optee) { bitmap_free(optee->notif.bitmap); }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016 MediaTek Inc. * Author: Daniel Hsiao <[email protected]> * Jungchang Tsao <[email protected]> * Tiffany Lin <[email protected]> */ #ifndef _VENC_DRV_BASE_ #define _VENC_DRV_BASE_ #include "mtk_vcodec_enc_drv.h" #include "venc_drv_if.h" struct venc_common_if { /** * (*init)() - initialize driver * @ctx: [in] mtk v4l2 context * @handle: [out] driver handle */ int (*init)(struct mtk_vcodec_enc_ctx *ctx); /** * (*encode)() - trigger encode * @handle: [in] driver handle * @opt: [in] encode option * @frm_buf: [in] frame buffer to store input frame * @bs_buf: [in] bitstream buffer to store output bitstream * @result: [out] encode result */ int (*encode)(void *handle, enum venc_start_opt opt, struct venc_frm_buf *frm_buf, struct mtk_vcodec_mem *bs_buf, struct venc_done_result *result); /** * (*set_param)() - set driver's parameter * @handle: [in] driver handle * @type: [in] parameter type * @in: [in] buffer to store the parameter */ int (*set_param)(void *handle, enum venc_set_param_type type, struct venc_enc_param *in); /** * (*deinit)() - deinitialize driver. * @handle: [in] driver handle */ int (*deinit)(void *handle); }; #endif
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MMU_H #define __MMU_H #include <linux/const.h> #include <asm/page.h> #include <asm/hypervisor.h> #define CTX_NR_BITS 13 #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) /* UltraSPARC-III+ and later have a feature whereby you can * select what page size the various Data-TLB instances in the * chip. In order to gracefully support this, we put the version * field in a spot outside of the areas of the context register * where this parameter is specified. */ #define CTX_VERSION_SHIFT 22 #define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT) #define CTX_PGSZ_8KB _AC(0x0,UL) #define CTX_PGSZ_64KB _AC(0x1,UL) #define CTX_PGSZ_512KB _AC(0x2,UL) #define CTX_PGSZ_4MB _AC(0x3,UL) #define CTX_PGSZ_BITS _AC(0x7,UL) #define CTX_PGSZ0_NUC_SHIFT 61 #define CTX_PGSZ1_NUC_SHIFT 58 #define CTX_PGSZ0_SHIFT 16 #define CTX_PGSZ1_SHIFT 19 #define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \ (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT)) #define CTX_PGSZ_BASE CTX_PGSZ_8KB #define CTX_PGSZ_HUGE CTX_PGSZ_4MB #define CTX_PGSZ_KERN CTX_PGSZ_4MB /* Thus, when running on UltraSPARC-III+ and later, we use the following * PRIMARY_CONTEXT register values for the kernel context. */ #define CTX_CHEETAH_PLUS_NUC \ ((CTX_PGSZ_KERN << CTX_PGSZ0_NUC_SHIFT) | \ (CTX_PGSZ_BASE << CTX_PGSZ1_NUC_SHIFT)) #define CTX_CHEETAH_PLUS_CTX0 \ ((CTX_PGSZ_KERN << CTX_PGSZ0_SHIFT) | \ (CTX_PGSZ_BASE << CTX_PGSZ1_SHIFT)) /* If you want "the TLB context number" use CTX_NR_MASK. If you * want "the bits I program into the context registers" use * CTX_HW_MASK. */ #define CTX_NR_MASK TAG_CONTEXT_BITS #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) #define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) #define CTX_VALID(__ctx) \ (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) #define CTX_NRBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_NR_MASK) #ifndef __ASSEMBLY__ #define TSB_ENTRY_ALIGNMENT 16 struct tsb { unsigned long tag; unsigned long pte; } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); void tsb_flush(unsigned long ent, unsigned long tag); void tsb_init(struct tsb *tsb, unsigned long size); struct tsb_config { struct tsb *tsb; unsigned long tsb_rss_limit; unsigned long tsb_nentries; unsigned long tsb_reg_val; unsigned long tsb_map_vaddr; unsigned long tsb_map_pte; }; #define MM_TSB_BASE 0 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define MM_TSB_HUGE 1 #define MM_NUM_TSBS 2 #else #define MM_NUM_TSBS 1 #endif /* ADI tags are stored when a page is swapped out and the storage for * tags is allocated dynamically. There is a tag storage descriptor * associated with each set of tag storage pages. Tag storage descriptors * are allocated dynamically. Since kernel will allocate a full page for * each tag storage descriptor, we can store up to * PAGE_SIZE/sizeof(tag storage descriptor) descriptors on that page. */ typedef struct { unsigned long start; /* Start address for this tag storage */ unsigned long end; /* Last address for tag storage */ unsigned char *tags; /* Where the tags are */ unsigned long tag_users; /* number of references to descriptor */ } tag_storage_desc_t; typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long hugetlb_pte_count; unsigned long thp_pte_count; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; void *vdso; bool adi; tag_storage_desc_t *tag_store; spinlock_t tag_lock; } mm_context_t; #endif /* !__ASSEMBLY__ */ #define TSB_CONFIG_TSB 0x00 #define TSB_CONFIG_RSS_LIMIT 0x08 #define TSB_CONFIG_NENTRIES 0x10 #define TSB_CONFIG_REG_VAL 0x18 #define TSB_CONFIG_MAP_VADDR 0x20 #define TSB_CONFIG_MAP_PTE 0x28 #endif /* __MMU_H */
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef INITVALS_9462_2P1_H #define INITVALS_9462_2P1_H /* AR9462 2.1 */ #define ar9462_2p1_mac_postamble ar9462_2p0_mac_postamble #define ar9462_2p1_baseband_core ar9462_2p0_baseband_core #define ar9462_2p1_radio_core ar9462_2p0_radio_core #define ar9462_2p1_radio_postamble ar9462_2p0_radio_postamble #define ar9462_2p1_soc_postamble ar9462_2p0_soc_postamble #define ar9462_2p1_radio_postamble_sys2ant ar9462_2p0_radio_postamble_sys2ant #define ar9462_2p1_common_rx_gain ar9462_2p0_common_rx_gain #define ar9462_2p1_common_mixed_rx_gain ar9462_2p0_common_mixed_rx_gain #define ar9462_2p1_common_5g_xlna_only_rxgain ar9462_2p0_common_5g_xlna_only_rxgain #define ar9462_2p1_baseband_core_mix_rxgain ar9462_2p0_baseband_core_mix_rxgain #define ar9462_2p1_baseband_postamble_mix_rxgain ar9462_2p0_baseband_postamble_mix_rxgain #define ar9462_2p1_baseband_postamble_5g_xlna ar9462_2p0_baseband_postamble_5g_xlna #define ar9462_2p1_common_wo_xlna_rx_gain ar9462_2p0_common_wo_xlna_rx_gain #define ar9462_2p1_modes_low_ob_db_tx_gain ar9462_2p0_modes_low_ob_db_tx_gain #define ar9462_2p1_modes_high_ob_db_tx_gain ar9462_2p0_modes_high_ob_db_tx_gain #define ar9462_2p1_modes_mix_ob_db_tx_gain ar9462_2p0_modes_mix_ob_db_tx_gain #define ar9462_2p1_modes_fast_clock ar9462_2p0_modes_fast_clock #define ar9462_2p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484 #define ar9462_2p1_pciephy_clkreq_disable_L1 ar9462_2p0_pciephy_clkreq_disable_L1 static const u32 ar9462_2p1_mac_core[][2] = { /* Addr allmodes */ {0x00000008, 0x00000000}, {0x00000030, 0x000e0085}, {0x00000034, 0x00000005}, {0x00000040, 0x00000000}, {0x00000044, 0x00000000}, {0x00000048, 0x00000008}, {0x0000004c, 0x00000010}, {0x00000050, 0x00000000}, {0x00001040, 0x002ffc0f}, {0x00001044, 0x002ffc0f}, {0x00001048, 0x002ffc0f}, {0x0000104c, 0x002ffc0f}, {0x00001050, 0x002ffc0f}, {0x00001054, 0x002ffc0f}, {0x00001058, 0x002ffc0f}, {0x0000105c, 0x002ffc0f}, {0x00001060, 0x002ffc0f}, {0x00001064, 0x002ffc0f}, {0x000010f0, 0x00000100}, {0x00001270, 0x00000000}, {0x000012b0, 0x00000000}, {0x000012f0, 0x00000000}, {0x0000143c, 0x00000000}, {0x0000147c, 0x00000000}, {0x00001810, 0x0f000003}, {0x00008000, 0x00000000}, {0x00008004, 0x00000000}, {0x00008008, 0x00000000}, {0x0000800c, 0x00000000}, {0x00008018, 0x00000000}, {0x00008020, 0x00000000}, {0x00008038, 0x00000000}, {0x0000803c, 0x00080000}, {0x00008040, 0x00000000}, {0x00008044, 0x00000000}, {0x00008048, 0x00000000}, {0x0000804c, 0xffffffff}, {0x00008054, 0x00000000}, {0x00008058, 0x00000000}, {0x0000805c, 0x000fc78f}, {0x00008060, 0x0000000f}, {0x00008064, 0x00000000}, {0x00008070, 0x00000310}, {0x00008074, 0x00000020}, {0x00008078, 0x00000000}, {0x0000809c, 0x0000000f}, {0x000080a0, 0x00000000}, {0x000080a4, 0x02ff0000}, {0x000080a8, 0x0e070605}, {0x000080ac, 0x0000000d}, {0x000080b0, 0x00000000}, {0x000080b4, 0x00000000}, {0x000080b8, 0x00000000}, {0x000080bc, 0x00000000}, {0x000080c0, 0x2a800000}, {0x000080c4, 0x06900168}, {0x000080c8, 0x13881c20}, {0x000080cc, 0x01f40000}, {0x000080d0, 0x00252500}, {0x000080d4, 0x00b00005}, {0x000080d8, 0x00400002}, {0x000080dc, 0x00000000}, {0x000080e0, 0xffffffff}, {0x000080e4, 0x0000ffff}, {0x000080e8, 0x3f3f3f3f}, {0x000080ec, 0x00000000}, {0x000080f0, 0x00000000}, {0x000080f4, 0x00000000}, {0x000080fc, 0x00020000}, {0x00008100, 0x00000000}, {0x00008108, 0x00000052}, {0x0000810c, 0x00000000}, {0x00008110, 0x00000000}, {0x00008114, 0x000007ff}, {0x00008118, 0x000000aa}, {0x0000811c, 0x00003210}, {0x00008124, 0x00000000}, {0x00008128, 0x00000000}, {0x0000812c, 0x00000000}, {0x00008130, 0x00000000}, {0x00008134, 0x00000000}, {0x00008138, 0x00000000}, {0x0000813c, 0x0000ffff}, {0x00008144, 0xffffffff}, {0x00008168, 0x00000000}, {0x0000816c, 0x00000000}, {0x00008170, 0x18486e00}, {0x00008174, 0x33332210}, {0x00008178, 0x00000000}, {0x0000817c, 0x00020000}, {0x000081c4, 0x33332210}, {0x000081c8, 0x00000000}, {0x000081cc, 0x00000000}, {0x000081d4, 0x00000000}, {0x000081ec, 0x00000000}, {0x000081f0, 0x00000000}, {0x000081f4, 0x00000000}, {0x000081f8, 0x00000000}, {0x000081fc, 0x00000000}, {0x00008240, 0x00100000}, {0x00008244, 0x0010f400}, {0x00008248, 0x00000800}, {0x0000824c, 0x0001e800}, {0x00008250, 0x00000000}, {0x00008254, 0x00000000}, {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, {0x00008264, 0x99c00010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, {0x00008274, 0x40000000}, {0x00008278, 0x003e4180}, {0x0000827c, 0x00000004}, {0x00008284, 0x0000002c}, {0x00008288, 0x0000002c}, {0x0000828c, 0x000000ff}, {0x00008294, 0x00000000}, {0x00008298, 0x00000000}, {0x0000829c, 0x00000000}, {0x00008300, 0x00000140}, {0x00008314, 0x00000000}, {0x0000831c, 0x0000010d}, {0x00008328, 0x00000000}, {0x0000832c, 0x0000001f}, {0x00008330, 0x00000302}, {0x00008334, 0x00000700}, {0x00008338, 0xffff0000}, {0x0000833c, 0x02400000}, {0x00008340, 0x000107ff}, {0x00008344, 0xaa48107b}, {0x00008348, 0x008f0000}, {0x0000835c, 0x00000000}, {0x00008360, 0xffffffff}, {0x00008364, 0xffffffff}, {0x00008368, 0x00000000}, {0x00008370, 0x00000000}, {0x00008374, 0x000000ff}, {0x00008378, 0x00000000}, {0x0000837c, 0x00000000}, {0x00008380, 0xffffffff}, {0x00008384, 0xffffffff}, {0x00008390, 0xffffffff}, {0x00008394, 0xffffffff}, {0x00008398, 0x00000000}, {0x0000839c, 0x00000000}, {0x000083a4, 0x0000fa14}, {0x000083a8, 0x000f0c00}, {0x000083ac, 0x33332210}, {0x000083b0, 0x33332210}, {0x000083b4, 0x33332210}, {0x000083b8, 0x33332210}, {0x000083bc, 0x00000000}, {0x000083c0, 0x00000000}, {0x000083c4, 0x00000000}, {0x000083c8, 0x00000000}, {0x000083cc, 0x00000200}, {0x000083d0, 0x000301ff}, }; static const u32 ar9462_2p1_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d}, {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81}, {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2}, {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0}, {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f}, {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff}, {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501}, {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000}, {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa}, {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00}, {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, }; static const u32 ar9462_2p1_soc_preamble[][2] = { /* Addr allmodes */ {0x000040a4, 0x00a0c9c9}, {0x00007020, 0x00000000}, {0x00007034, 0x00000002}, {0x00007038, 0x000004c2}, }; #endif /* INITVALS_9462_2P1_H */
// SPDX-License-Identifier: MIT // // Copyright 2024 Advanced Micro Devices, Inc. #ifndef __DC_SPL_SCL_FILTERS_H__ #define __DC_SPL_SCL_FILTERS_H__ #include "dc_spl_types.h" const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio); const uint16_t *spl_get_filter_2tap_16p(void); const uint16_t *spl_get_filter_2tap_64p(void); const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio); #endif /* __DC_SPL_SCL_FILTERS_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * KXCJK-1013 3-axis accelerometer driver * Copyright (c) 2014, Intel Corporation. */ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/acpi.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger.h> #include <linux/iio/events.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/accel/kxcjk_1013.h> #define KXCJK1013_DRV_NAME "kxcjk1013" #define KXCJK1013_IRQ_NAME "kxcjk1013_event" #define KXTF9_REG_HP_XOUT_L 0x00 #define KXTF9_REG_HP_XOUT_H 0x01 #define KXTF9_REG_HP_YOUT_L 0x02 #define KXTF9_REG_HP_YOUT_H 0x03 #define KXTF9_REG_HP_ZOUT_L 0x04 #define KXTF9_REG_HP_ZOUT_H 0x05 #define KXCJK1013_REG_XOUT_L 0x06 /* * From low byte X axis register, all the other addresses of Y and Z can be * obtained by just applying axis offset. The following axis defines are just * provide clarity, but not used. */ #define KXCJK1013_REG_XOUT_H 0x07 #define KXCJK1013_REG_YOUT_L 0x08 #define KXCJK1013_REG_YOUT_H 0x09 #define KXCJK1013_REG_ZOUT_L 0x0A #define KXCJK1013_REG_ZOUT_H 0x0B #define KXCJK1013_REG_DCST_RESP 0x0C #define KXCJK1013_REG_WHO_AM_I 0x0F #define KXTF9_REG_TILT_POS_CUR 0x10 #define KXTF9_REG_TILT_POS_PREV 0x11 #define KXTF9_REG_INT_SRC1 0x15 #define KXTF9_REG_INT_SRC2 0x16 #define KXCJK1013_REG_INT_SRC1 0x16 #define KXCJK1013_REG_INT_SRC2 0x17 #define KXCJK1013_REG_STATUS_REG 0x18 #define KXCJK1013_REG_INT_REL 0x1A #define KXCJK1013_REG_CTRL1 0x1B #define KXTF9_REG_CTRL2 0x1C #define KXTF9_REG_CTRL3 0x1D #define KXCJK1013_REG_CTRL2 0x1D #define KXCJK1013_REG_INT_CTRL1 0x1E #define KXCJK1013_REG_INT_CTRL2 0x1F #define KXTF9_REG_INT_CTRL3 0x20 #define KXCJK1013_REG_DATA_CTRL 0x21 #define KXTF9_REG_TILT_TIMER 0x28 #define KXCJK1013_REG_WAKE_TIMER 0x29 #define KXTF9_REG_TDT_TIMER 0x2B #define KXTF9_REG_TDT_THRESH_H 0x2C #define KXTF9_REG_TDT_THRESH_L 0x2D #define KXTF9_REG_TDT_TAP_TIMER 0x2E #define KXTF9_REG_TDT_TOTAL_TIMER 0x2F #define KXTF9_REG_TDT_LATENCY_TIMER 0x30 #define KXTF9_REG_TDT_WINDOW_TIMER 0x31 #define KXCJK1013_REG_SELF_TEST 0x3A #define KXTF9_REG_WAKE_THRESH 0x5A #define KXTF9_REG_TILT_ANGLE 0x5C #define KXTF9_REG_HYST_SET 0x5F #define KXCJK1013_REG_WAKE_THRES 0x6A /* Everything up to 0x11 is equal to KXCJK1013/KXTF9 above */ #define KX023_REG_INS1 0x12 #define KX023_REG_INS2 0x13 #define KX023_REG_INS3 0x14 #define KX023_REG_STAT 0x15 #define KX023_REG_INT_REL 0x17 #define KX023_REG_CNTL1 0x18 #define KX023_REG_CNTL2 0x19 #define KX023_REG_CNTL3 0x1A #define KX023_REG_ODCNTL 0x1B #define KX023_REG_INC1 0x1C #define KX023_REG_INC2 0x1D #define KX023_REG_INC3 0x1E #define KX023_REG_INC4 0x1F #define KX023_REG_INC5 0x20 #define KX023_REG_INC6 0x21 #define KX023_REG_TILT_TIMER 0x22 #define KX023_REG_WUFC 0x23 #define KX023_REG_TDTRC 0x24 #define KX023_REG_TDTC 0x25 #define KX023_REG_TTH 0x26 #define KX023_REG_TTL 0x27 #define KX023_REG_FTD 0x28 #define KX023_REG_STD 0x29 #define KX023_REG_TLT 0x2A #define KX023_REG_TWS 0x2B #define KX023_REG_ATH 0x30 #define KX023_REG_TILT_ANGLE_LL 0x32 #define KX023_REG_TILT_ANGLE_HL 0x33 #define KX023_REG_HYST_SET 0x34 #define KX023_REG_LP_CNTL 0x35 #define KX023_REG_BUF_CNTL1 0x3A #define KX023_REG_BUF_CNTL2 0x3B #define KX023_REG_BUF_STATUS_1 0x3C #define KX023_REG_BUF_STATUS_2 0x3D #define KX023_REG_BUF_CLEAR 0x3E #define KX023_REG_BUF_READ 0x3F #define KX023_REG_SELF_TEST 0x60 #define KXCJK1013_REG_CTRL1_BIT_PC1 BIT(7) #define KXCJK1013_REG_CTRL1_BIT_RES BIT(6) #define KXCJK1013_REG_CTRL1_BIT_DRDY BIT(5) #define KXCJK1013_REG_CTRL1_BIT_GSEL1 BIT(4) #define KXCJK1013_REG_CTRL1_BIT_GSEL0 BIT(3) #define KXCJK1013_REG_CTRL1_BIT_WUFE BIT(1) #define KXCJK1013_REG_INT_CTRL1_BIT_IEU BIT(2) /* KXTF9 */ #define KXCJK1013_REG_INT_CTRL1_BIT_IEL BIT(3) #define KXCJK1013_REG_INT_CTRL1_BIT_IEA BIT(4) #define KXCJK1013_REG_INT_CTRL1_BIT_IEN BIT(5) #define KXTF9_REG_TILT_BIT_LEFT_EDGE BIT(5) #define KXTF9_REG_TILT_BIT_RIGHT_EDGE BIT(4) #define KXTF9_REG_TILT_BIT_LOWER_EDGE BIT(3) #define KXTF9_REG_TILT_BIT_UPPER_EDGE BIT(2) #define KXTF9_REG_TILT_BIT_FACE_DOWN BIT(1) #define KXTF9_REG_TILT_BIT_FACE_UP BIT(0) #define KXCJK1013_DATA_MASK_12_BIT 0x0FFF #define KXCJK1013_MAX_STARTUP_TIME_US 100000 #define KXCJK1013_SLEEP_DELAY_MS 2000 #define KXCJK1013_REG_INT_SRC1_BIT_TPS BIT(0) /* KXTF9 */ #define KXCJK1013_REG_INT_SRC1_BIT_WUFS BIT(1) #define KXCJK1013_REG_INT_SRC1_MASK_TDTS (BIT(2) | BIT(3)) /* KXTF9 */ #define KXCJK1013_REG_INT_SRC1_TAP_NONE 0 #define KXCJK1013_REG_INT_SRC1_TAP_SINGLE BIT(2) #define KXCJK1013_REG_INT_SRC1_TAP_DOUBLE BIT(3) #define KXCJK1013_REG_INT_SRC1_BIT_DRDY BIT(4) /* KXCJK: INT_SOURCE2: motion detect, KXTF9: INT_SRC_REG1: tap detect */ #define KXCJK1013_REG_INT_SRC2_BIT_ZP BIT(0) #define KXCJK1013_REG_INT_SRC2_BIT_ZN BIT(1) #define KXCJK1013_REG_INT_SRC2_BIT_YP BIT(2) #define KXCJK1013_REG_INT_SRC2_BIT_YN BIT(3) #define KXCJK1013_REG_INT_SRC2_BIT_XP BIT(4) #define KXCJK1013_REG_INT_SRC2_BIT_XN BIT(5) /* KX023 interrupt routing to INT1. INT2 can be configured with INC6 */ #define KX023_REG_INC4_BFI1 BIT(6) #define KX023_REG_INC4_WMI1 BIT(5) #define KX023_REG_INC4_DRDY1 BIT(4) #define KX023_REG_INC4_TDTI1 BIT(2) #define KX023_REG_INC4_WUFI1 BIT(1) #define KX023_REG_INC4_TPI1 BIT(0) #define KXCJK1013_DEFAULT_WAKE_THRES 1 /* Refer to section 4 of the specification */ struct kx_odr_start_up_time { int odr_bits; int usec; }; /* KXCJK-1013 */ static const struct kx_odr_start_up_time kxcjk1013_odr_start_up_times[] = { { 0x08, 100000 }, { 0x09, 100000 }, { 0x0A, 100000 }, { 0x0B, 100000 }, { 0x00, 80000 }, { 0x01, 41000 }, { 0x02, 21000 }, { 0x03, 11000 }, { 0x04, 6400 }, { 0x05, 3900 }, { 0x06, 2700 }, { 0x07, 2100 }, { } }; /* KXCTJ2-1009 */ static const struct kx_odr_start_up_time kxtj21009_odr_start_up_times[] = { { 0x08, 1240000 }, { 0x09, 621000 }, { 0x0A, 309000 }, { 0x0B, 151000 }, { 0x00, 80000 }, { 0x01, 41000 }, { 0x02, 21000 }, { 0x03, 11000 }, { 0x04, 6000 }, { 0x05, 4000 }, { 0x06, 3000 }, { 0x07, 2000 }, { } }; /* KXTF9 */ static const struct kx_odr_start_up_time kxtf9_odr_start_up_times[] = { { 0x01, 81000 }, { 0x02, 41000 }, { 0x03, 21000 }, { 0x04, 11000 }, { 0x05, 5100 }, { 0x06, 2700 }, { } }; /* KX023-1025 */ static const struct kx_odr_start_up_time kx0231025_odr_start_up_times[] = { /* First 4 are not in datasheet, taken from KXCTJ2-1009 */ { 0x08, 1240000 }, { 0x09, 621000 }, { 0x0A, 309000 }, { 0x0B, 151000 }, { 0x00, 81000 }, { 0x01, 40000 }, { 0x02, 22000 }, { 0x03, 12000 }, { 0x04, 7000 }, { 0x05, 4400 }, { 0x06, 3000 }, { 0x07, 3000 }, { } }; enum kx_acpi_type { ACPI_GENERIC, ACPI_SMO8500, ACPI_KIOX010A, }; struct kx_chipset_regs { u8 int_src1; u8 int_src2; u8 int_rel; u8 ctrl1; u8 wuf_ctrl; u8 int_ctrl1; u8 data_ctrl; u8 wake_timer; u8 wake_thres; }; static const struct kx_chipset_regs kxcjk1013_regs = { .int_src1 = KXCJK1013_REG_INT_SRC1, .int_src2 = KXCJK1013_REG_INT_SRC2, .int_rel = KXCJK1013_REG_INT_REL, .ctrl1 = KXCJK1013_REG_CTRL1, .wuf_ctrl = KXCJK1013_REG_CTRL2, .int_ctrl1 = KXCJK1013_REG_INT_CTRL1, .data_ctrl = KXCJK1013_REG_DATA_CTRL, .wake_timer = KXCJK1013_REG_WAKE_TIMER, .wake_thres = KXCJK1013_REG_WAKE_THRES, }; static const struct kx_chipset_regs kxtf9_regs = { /* .int_src1 was moved to INT_SRC2 on KXTF9 */ .int_src1 = KXTF9_REG_INT_SRC2, /* .int_src2 is not available */ .int_rel = KXCJK1013_REG_INT_REL, .ctrl1 = KXCJK1013_REG_CTRL1, .wuf_ctrl = KXTF9_REG_CTRL3, .int_ctrl1 = KXCJK1013_REG_INT_CTRL1, .data_ctrl = KXCJK1013_REG_DATA_CTRL, .wake_timer = KXCJK1013_REG_WAKE_TIMER, .wake_thres = KXTF9_REG_WAKE_THRESH, }; /* The registers have totally different names but the bits are compatible */ static const struct kx_chipset_regs kx0231025_regs = { .int_src1 = KX023_REG_INS2, .int_src2 = KX023_REG_INS3, .int_rel = KX023_REG_INT_REL, .ctrl1 = KX023_REG_CNTL1, .wuf_ctrl = KX023_REG_CNTL3, .int_ctrl1 = KX023_REG_INC1, .data_ctrl = KX023_REG_ODCNTL, .wake_timer = KX023_REG_WUFC, .wake_thres = KX023_REG_ATH, }; struct kx_chipset_info { const struct kx_chipset_regs *regs; const struct kx_odr_start_up_time *times; enum kx_acpi_type acpi_type; }; static const struct kx_chipset_info kxcjk1013_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxcjk1013_odr_start_up_times), }; static const struct kx_chipset_info kxcj91008_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxcjk1013_odr_start_up_times), }; static const struct kx_chipset_info kxcj91008_kiox010a_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxcjk1013_odr_start_up_times), .acpi_type = ACPI_KIOX010A, }; static const struct kx_chipset_info kxcj91008_kiox020a_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxcjk1013_odr_start_up_times), .acpi_type = ACPI_GENERIC, }; static const struct kx_chipset_info kxcj91008_smo8500_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxcjk1013_odr_start_up_times), .acpi_type = ACPI_SMO8500, }; static const struct kx_chipset_info kxtj21009_info = { .regs = &kxcjk1013_regs, .times = pm_ptr(kxtj21009_odr_start_up_times), }; static const struct kx_chipset_info kxtf9_info = { .regs = &kxtf9_regs, .times = pm_ptr(kxtf9_odr_start_up_times), }; static const struct kx_chipset_info kx0231025_info = { .regs = &kx0231025_regs, .times = pm_ptr(kx0231025_odr_start_up_times), }; enum kxcjk1013_axis { AXIS_X, AXIS_Y, AXIS_Z, AXIS_MAX }; struct kxcjk1013_data { struct i2c_client *client; struct iio_trigger *dready_trig; struct iio_trigger *motion_trig; struct iio_mount_matrix orientation; struct mutex mutex; /* Ensure timestamp naturally aligned */ struct { s16 chans[AXIS_MAX]; aligned_s64 timestamp; } scan; u8 odr_bits; u8 range; int wake_thres; int wake_dur; bool active_high_intr; bool dready_trigger_on; int ev_enable_state; bool motion_trigger_on; int64_t timestamp; const struct kx_chipset_info *info; }; enum kxcjk1013_mode { STANDBY, OPERATION, }; enum kxcjk1013_range { KXCJK1013_RANGE_2G, KXCJK1013_RANGE_4G, KXCJK1013_RANGE_8G, }; struct kx_odr_map { int val; int val2; int odr_bits; int wuf_bits; }; static const struct kx_odr_map samp_freq_table[] = { { 0, 781000, 0x08, 0x00 }, { 1, 563000, 0x09, 0x01 }, { 3, 125000, 0x0A, 0x02 }, { 6, 250000, 0x0B, 0x03 }, { 12, 500000, 0x00, 0x04 }, { 25, 0, 0x01, 0x05 }, { 50, 0, 0x02, 0x06 }, { 100, 0, 0x03, 0x06 }, { 200, 0, 0x04, 0x06 }, { 400, 0, 0x05, 0x06 }, { 800, 0, 0x06, 0x06 }, { 1600, 0, 0x07, 0x06 }, }; static const char *const kxcjk1013_samp_freq_avail = "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600"; static const struct kx_odr_map kxtf9_samp_freq_table[] = { { 25, 0, 0x01, 0x00 }, { 50, 0, 0x02, 0x01 }, { 100, 0, 0x03, 0x01 }, { 200, 0, 0x04, 0x01 }, { 400, 0, 0x05, 0x01 }, { 800, 0, 0x06, 0x01 }, }; static const char *const kxtf9_samp_freq_avail = "25 50 100 200 400 800"; static const struct { u16 scale; u8 gsel_0; u8 gsel_1; } KXCJK1013_scale_table[] = { {9582, 0, 0}, {19163, 1, 0}, {38326, 0, 1} }; #ifdef CONFIG_ACPI enum kiox010a_fn_index { KIOX010A_SET_LAPTOP_MODE = 1, KIOX010A_SET_TABLET_MODE = 2, }; static int kiox010a_dsm(struct device *dev, int fn_index) { acpi_handle handle = ACPI_HANDLE(dev); guid_t kiox010a_dsm_guid; union acpi_object *obj; if (!handle) return -ENODEV; guid_parse("1f339696-d475-4e26-8cad-2e9f8e6d7a91", &kiox010a_dsm_guid); obj = acpi_evaluate_dsm(handle, &kiox010a_dsm_guid, 1, fn_index, NULL); if (!obj) return -EIO; ACPI_FREE(obj); return 0; } #endif static int kxcjk1013_set_mode(struct kxcjk1013_data *data, enum kxcjk1013_mode mode) { const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } if (mode == STANDBY) ret &= ~KXCJK1013_REG_CTRL1_BIT_PC1; else ret |= KXCJK1013_REG_CTRL1_BIT_PC1; ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_ctrl1\n"); return ret; } return 0; } static int kxcjk1013_get_mode(struct kxcjk1013_data *data, enum kxcjk1013_mode *mode) { const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } if (ret & KXCJK1013_REG_CTRL1_BIT_PC1) *mode = OPERATION; else *mode = STANDBY; return 0; } static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index) { const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 | KXCJK1013_REG_CTRL1_BIT_GSEL1); ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_ctrl1\n"); return ret; } data->range = range_index; return 0; } static int kxcjk1013_chip_init(struct kxcjk1013_data *data) { const struct kx_chipset_regs *regs = data->info->regs; int ret; #ifdef CONFIG_ACPI if (data->info->acpi_type == ACPI_KIOX010A) { /* Make sure the kbd and touchpad on 2-in-1s using 2 KXCJ91008-s work */ kiox010a_dsm(&data->client->dev, KIOX010A_SET_LAPTOP_MODE); } #endif ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I); if (ret < 0) { dev_err(&data->client->dev, "Error reading who_am_i\n"); return ret; } dev_dbg(&data->client->dev, "KXCJK1013 Chip Id %x\n", ret); ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } /* Set 12 bit mode */ ret |= KXCJK1013_REG_CTRL1_BIT_RES; ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl\n"); return ret; } /* Setting range to 4G */ ret = kxcjk1013_set_range(data, KXCJK1013_RANGE_4G); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(data->client, regs->data_ctrl); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_data_ctrl\n"); return ret; } data->odr_bits = ret; /* Set up INT polarity */ ret = i2c_smbus_read_byte_data(data->client, regs->int_ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n"); return ret; } if (data->active_high_intr) ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEA; else ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEA; ret = i2c_smbus_write_byte_data(data->client, regs->int_ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n"); return ret; } /* On KX023, route all used interrupts to INT1 for now */ if (data->info == &kx0231025_info && data->client->irq > 0) { ret = i2c_smbus_write_byte_data(data->client, KX023_REG_INC4, KX023_REG_INC4_DRDY1 | KX023_REG_INC4_WUFI1); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_inc4\n"); return ret; } } ret = kxcjk1013_set_mode(data, OPERATION); if (ret < 0) return ret; data->wake_thres = KXCJK1013_DEFAULT_WAKE_THRES; return 0; } static int kxcjk1013_get_startup_times(struct kxcjk1013_data *data) { const struct kx_odr_start_up_time *times; for (times = data->info->times; times->usec; times++) { if (times->odr_bits == data->odr_bits) return times->usec; } return KXCJK1013_MAX_STARTUP_TIME_US; } static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on) { #ifdef CONFIG_PM int ret; if (on) ret = pm_runtime_resume_and_get(&data->client->dev); else { pm_runtime_mark_last_busy(&data->client->dev); ret = pm_runtime_put_autosuspend(&data->client->dev); } if (ret < 0) { dev_err(&data->client->dev, "Failed: %s for %d\n", __func__, on); return ret; } #endif return 0; } static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data) { const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_write_byte_data(data->client, regs->wake_timer, data->wake_dur); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_wake_timer\n"); return ret; } ret = i2c_smbus_write_byte_data(data->client, regs->wake_thres, data->wake_thres); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_wake_thres\n"); return ret; } return 0; } static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data, bool status) { const struct kx_chipset_regs *regs = data->info->regs; int ret; enum kxcjk1013_mode store_mode; ret = kxcjk1013_get_mode(data, &store_mode); if (ret < 0) return ret; /* This is requirement by spec to change state to STANDBY */ ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) return ret; ret = kxcjk1013_chip_update_thresholds(data); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(data->client, regs->int_ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n"); return ret; } if (status) ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN; else ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN; ret = i2c_smbus_write_byte_data(data->client, regs->int_ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n"); return ret; } ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } if (status) ret |= KXCJK1013_REG_CTRL1_BIT_WUFE; else ret &= ~KXCJK1013_REG_CTRL1_BIT_WUFE; ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_ctrl1\n"); return ret; } if (store_mode == OPERATION) { ret = kxcjk1013_set_mode(data, OPERATION); if (ret < 0) return ret; } return 0; } static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data, bool status) { const struct kx_chipset_regs *regs = data->info->regs; int ret; enum kxcjk1013_mode store_mode; ret = kxcjk1013_get_mode(data, &store_mode); if (ret < 0) return ret; /* This is requirement by spec to change state to STANDBY */ ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(data->client, regs->int_ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n"); return ret; } if (status) ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN; else ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN; ret = i2c_smbus_write_byte_data(data->client, regs->int_ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n"); return ret; } ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_ctrl1\n"); return ret; } if (status) ret |= KXCJK1013_REG_CTRL1_BIT_DRDY; else ret &= ~KXCJK1013_REG_CTRL1_BIT_DRDY; ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_ctrl1\n"); return ret; } if (store_mode == OPERATION) { ret = kxcjk1013_set_mode(data, OPERATION); if (ret < 0) return ret; } return 0; } static const struct kx_odr_map *kxcjk1013_find_odr_value( const struct kx_odr_map *map, size_t map_size, int val, int val2) { int i; for (i = 0; i < map_size; ++i) { if (map[i].val == val && map[i].val2 == val2) return &map[i]; } return ERR_PTR(-EINVAL); } static int kxcjk1013_convert_odr_value(const struct kx_odr_map *map, size_t map_size, int odr_bits, int *val, int *val2) { int i; for (i = 0; i < map_size; ++i) { if (map[i].odr_bits == odr_bits) { *val = map[i].val; *val2 = map[i].val2; return IIO_VAL_INT_PLUS_MICRO; } } return -EINVAL; } static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2) { const struct kx_chipset_regs *regs = data->info->regs; int ret; enum kxcjk1013_mode store_mode; const struct kx_odr_map *odr_setting; ret = kxcjk1013_get_mode(data, &store_mode); if (ret < 0) return ret; if (data->info == &kxtf9_info) odr_setting = kxcjk1013_find_odr_value(kxtf9_samp_freq_table, ARRAY_SIZE(kxtf9_samp_freq_table), val, val2); else odr_setting = kxcjk1013_find_odr_value(samp_freq_table, ARRAY_SIZE(samp_freq_table), val, val2); if (IS_ERR(odr_setting)) return PTR_ERR(odr_setting); /* To change ODR, the chip must be set to STANDBY as per spec */ ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(data->client, regs->data_ctrl, odr_setting->odr_bits); if (ret < 0) { dev_err(&data->client->dev, "Error writing data_ctrl\n"); return ret; } data->odr_bits = odr_setting->odr_bits; ret = i2c_smbus_write_byte_data(data->client, regs->wuf_ctrl, odr_setting->wuf_bits); if (ret < 0) { dev_err(&data->client->dev, "Error writing reg_ctrl2\n"); return ret; } if (store_mode == OPERATION) { ret = kxcjk1013_set_mode(data, OPERATION); if (ret < 0) return ret; } return 0; } static int kxcjk1013_get_odr(struct kxcjk1013_data *data, int *val, int *val2) { if (data->info == &kxtf9_info) return kxcjk1013_convert_odr_value(kxtf9_samp_freq_table, ARRAY_SIZE(kxtf9_samp_freq_table), data->odr_bits, val, val2); else return kxcjk1013_convert_odr_value(samp_freq_table, ARRAY_SIZE(samp_freq_table), data->odr_bits, val, val2); } static int kxcjk1013_get_acc_reg(struct kxcjk1013_data *data, int axis) { u8 reg = KXCJK1013_REG_XOUT_L + axis * 2; int ret; ret = i2c_smbus_read_word_data(data->client, reg); if (ret < 0) { dev_err(&data->client->dev, "failed to read accel_%c registers\n", 'x' + axis); return ret; } return ret; } static int kxcjk1013_set_scale(struct kxcjk1013_data *data, int val) { int ret, i; enum kxcjk1013_mode store_mode; for (i = 0; i < ARRAY_SIZE(KXCJK1013_scale_table); ++i) { if (KXCJK1013_scale_table[i].scale == val) { ret = kxcjk1013_get_mode(data, &store_mode); if (ret < 0) return ret; ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) return ret; ret = kxcjk1013_set_range(data, i); if (ret < 0) return ret; if (store_mode == OPERATION) { ret = kxcjk1013_set_mode(data, OPERATION); if (ret) return ret; } return 0; } } return -EINVAL; } static int kxcjk1013_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_RAW: mutex_lock(&data->mutex); if (iio_buffer_enabled(indio_dev)) ret = -EBUSY; else { ret = kxcjk1013_set_power_state(data, true); if (ret < 0) { mutex_unlock(&data->mutex); return ret; } ret = kxcjk1013_get_acc_reg(data, chan->scan_index); if (ret < 0) { kxcjk1013_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } *val = sign_extend32(ret >> chan->scan_type.shift, chan->scan_type.realbits - 1); ret = kxcjk1013_set_power_state(data, false); } mutex_unlock(&data->mutex); if (ret < 0) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = 0; *val2 = KXCJK1013_scale_table[data->range].scale; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SAMP_FREQ: mutex_lock(&data->mutex); ret = kxcjk1013_get_odr(data, val, val2); mutex_unlock(&data->mutex); return ret; default: return -EINVAL; } } static int kxcjk1013_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: mutex_lock(&data->mutex); ret = kxcjk1013_set_odr(data, val, val2); mutex_unlock(&data->mutex); break; case IIO_CHAN_INFO_SCALE: if (val) return -EINVAL; mutex_lock(&data->mutex); ret = kxcjk1013_set_scale(data, val2); mutex_unlock(&data->mutex); break; default: ret = -EINVAL; } return ret; } static int kxcjk1013_read_event(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, enum iio_event_info info, int *val, int *val2) { struct kxcjk1013_data *data = iio_priv(indio_dev); *val2 = 0; switch (info) { case IIO_EV_INFO_VALUE: *val = data->wake_thres; break; case IIO_EV_INFO_PERIOD: *val = data->wake_dur; break; default: return -EINVAL; } return IIO_VAL_INT; } static int kxcjk1013_write_event(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, enum iio_event_info info, int val, int val2) { struct kxcjk1013_data *data = iio_priv(indio_dev); if (data->ev_enable_state) return -EBUSY; switch (info) { case IIO_EV_INFO_VALUE: data->wake_thres = val; break; case IIO_EV_INFO_PERIOD: data->wake_dur = val; break; default: return -EINVAL; } return 0; } static int kxcjk1013_read_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir) { struct kxcjk1013_data *data = iio_priv(indio_dev); return data->ev_enable_state; } static int kxcjk1013_write_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, bool state) { struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; if (state && data->ev_enable_state) return 0; mutex_lock(&data->mutex); if (!state && data->motion_trigger_on) { data->ev_enable_state = 0; mutex_unlock(&data->mutex); return 0; } /* * We will expect the enable and disable to do operation in * reverse order. This will happen here anyway as our * resume operation uses sync mode runtime pm calls, the * suspend operation will be delayed by autosuspend delay * So the disable operation will still happen in reverse of * enable operation. When runtime pm is disabled the mode * is always on so sequence doesn't matter */ ret = kxcjk1013_set_power_state(data, state); if (ret < 0) { mutex_unlock(&data->mutex); return ret; } ret = kxcjk1013_setup_any_motion_interrupt(data, state); if (ret < 0) { kxcjk1013_set_power_state(data, false); data->ev_enable_state = 0; mutex_unlock(&data->mutex); return ret; } data->ev_enable_state = state; mutex_unlock(&data->mutex); return 0; } static int kxcjk1013_buffer_preenable(struct iio_dev *indio_dev) { struct kxcjk1013_data *data = iio_priv(indio_dev); return kxcjk1013_set_power_state(data, true); } static int kxcjk1013_buffer_postdisable(struct iio_dev *indio_dev) { struct kxcjk1013_data *data = iio_priv(indio_dev); return kxcjk1013_set_power_state(data, false); } static ssize_t kxcjk1013_get_samp_freq_avail(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct kxcjk1013_data *data = iio_priv(indio_dev); const char *str; if (data->info == &kxtf9_info) str = kxtf9_samp_freq_avail; else str = kxcjk1013_samp_freq_avail; return sprintf(buf, "%s\n", str); } static IIO_DEVICE_ATTR(in_accel_sampling_frequency_available, S_IRUGO, kxcjk1013_get_samp_freq_avail, NULL, 0); static IIO_CONST_ATTR(in_accel_scale_available, "0.009582 0.019163 0.038326"); static struct attribute *kxcjk1013_attributes[] = { &iio_dev_attr_in_accel_sampling_frequency_available.dev_attr.attr, &iio_const_attr_in_accel_scale_available.dev_attr.attr, NULL, }; static const struct attribute_group kxcjk1013_attrs_group = { .attrs = kxcjk1013_attributes, }; static const struct iio_event_spec kxcjk1013_event = { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_EITHER, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_PERIOD) }; static const struct iio_mount_matrix * kxcjk1013_get_mount_matrix(const struct iio_dev *indio_dev, const struct iio_chan_spec *chan) { struct kxcjk1013_data *data = iio_priv(indio_dev); return &data->orientation; } static const struct iio_chan_spec_ext_info kxcjk1013_ext_info[] = { IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxcjk1013_get_mount_matrix), { } }; #define KXCJK1013_CHANNEL(_axis) { \ .type = IIO_ACCEL, \ .modified = 1, \ .channel2 = IIO_MOD_##_axis, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = AXIS_##_axis, \ .scan_type = { \ .sign = 's', \ .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ .endianness = IIO_LE, \ }, \ .event_spec = &kxcjk1013_event, \ .ext_info = kxcjk1013_ext_info, \ .num_event_specs = 1 \ } static const struct iio_chan_spec kxcjk1013_channels[] = { KXCJK1013_CHANNEL(X), KXCJK1013_CHANNEL(Y), KXCJK1013_CHANNEL(Z), IIO_CHAN_SOFT_TIMESTAMP(3), }; static const struct iio_buffer_setup_ops kxcjk1013_buffer_setup_ops = { .preenable = kxcjk1013_buffer_preenable, .postdisable = kxcjk1013_buffer_postdisable, }; static const struct iio_info kxcjk1013_iio_info = { .attrs = &kxcjk1013_attrs_group, .read_raw = kxcjk1013_read_raw, .write_raw = kxcjk1013_write_raw, .read_event_value = kxcjk1013_read_event, .write_event_value = kxcjk1013_write_event, .write_event_config = kxcjk1013_write_event_config, .read_event_config = kxcjk1013_read_event_config, }; static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0}; static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; mutex_lock(&data->mutex); ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client, KXCJK1013_REG_XOUT_L, AXIS_MAX * 2, (u8 *)data->scan.chans); mutex_unlock(&data->mutex); if (ret < 0) goto err; iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, data->timestamp); err: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static void kxcjk1013_trig_reen(struct iio_trigger *trig) { struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); struct kxcjk1013_data *data = iio_priv(indio_dev); const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_read_byte_data(data->client, regs->int_rel); if (ret < 0) dev_err(&data->client->dev, "Error reading reg_int_rel\n"); } static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; mutex_lock(&data->mutex); if (!state && data->ev_enable_state && data->motion_trigger_on) { data->motion_trigger_on = false; mutex_unlock(&data->mutex); return 0; } ret = kxcjk1013_set_power_state(data, state); if (ret < 0) { mutex_unlock(&data->mutex); return ret; } if (data->motion_trig == trig) ret = kxcjk1013_setup_any_motion_interrupt(data, state); else ret = kxcjk1013_setup_new_data_interrupt(data, state); if (ret < 0) { kxcjk1013_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } if (data->motion_trig == trig) data->motion_trigger_on = state; else data->dready_trigger_on = state; mutex_unlock(&data->mutex); return 0; } static const struct iio_trigger_ops kxcjk1013_trigger_ops = { .set_trigger_state = kxcjk1013_data_rdy_trigger_set_state, .reenable = kxcjk1013_trig_reen, }; static void kxcjk1013_report_motion_event(struct iio_dev *indio_dev) { struct kxcjk1013_data *data = iio_priv(indio_dev); const struct kx_chipset_regs *regs = data->info->regs; int ret = i2c_smbus_read_byte_data(data->client, regs->int_src2); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_int_src2\n"); return; } if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), data->timestamp); if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), data->timestamp); if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), data->timestamp); if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), data->timestamp); if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), data->timestamp); if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), data->timestamp); } static irqreturn_t kxcjk1013_event_handler(int irq, void *private) { struct iio_dev *indio_dev = private; struct kxcjk1013_data *data = iio_priv(indio_dev); const struct kx_chipset_regs *regs = data->info->regs; int ret; ret = i2c_smbus_read_byte_data(data->client, regs->int_src1); if (ret < 0) { dev_err(&data->client->dev, "Error reading reg_int_src1\n"); goto ack_intr; } if (ret & KXCJK1013_REG_INT_SRC1_BIT_WUFS) { if (data->info == &kxtf9_info) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_AND_Y_AND_Z, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), data->timestamp); else kxcjk1013_report_motion_event(indio_dev); } ack_intr: if (data->dready_trigger_on) return IRQ_HANDLED; ret = i2c_smbus_read_byte_data(data->client, regs->int_rel); if (ret < 0) dev_err(&data->client->dev, "Error reading reg_int_rel\n"); return IRQ_HANDLED; } static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private) { struct iio_dev *indio_dev = private; struct kxcjk1013_data *data = iio_priv(indio_dev); data->timestamp = iio_get_time_ns(indio_dev); if (data->dready_trigger_on) iio_trigger_poll(data->dready_trig); else if (data->motion_trigger_on) iio_trigger_poll(data->motion_trig); if (data->ev_enable_state) return IRQ_WAKE_THREAD; else return IRQ_HANDLED; } static int kxcjk1013_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); static const char * const regulator_names[] = { "vdd", "vddio" }; struct kxcjk1013_data *data; struct iio_dev *indio_dev; struct kxcjk_1013_platform_data *pdata; const void *ddata = NULL; const char *name; int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; pdata = dev_get_platdata(&client->dev); if (pdata) { data->active_high_intr = pdata->active_high_intr; data->orientation = pdata->orientation; } else { data->active_high_intr = true; /* default polarity */ if (!iio_read_acpi_mount_matrix(&client->dev, &data->orientation, "ROTM")) { ret = iio_read_mount_matrix(&client->dev, &data->orientation); if (ret) return ret; } } ret = devm_regulator_bulk_get_enable(&client->dev, ARRAY_SIZE(regulator_names), regulator_names); if (ret) return dev_err_probe(&client->dev, ret, "Failed to get regulators\n"); /* * A typical delay of 10ms is required for powering up * according to the data sheets of supported chips. * Hence double that to play safe. */ msleep(20); if (id) { name = id->name; data->info = (const struct kx_chipset_info *)(id->driver_data); } else { name = iio_get_acpi_device_name_and_data(&client->dev, &ddata); data->info = ddata; if (data->info == &kxcj91008_kiox010a_info) indio_dev->label = "accel-display"; else if (data->info == &kxcj91008_kiox020a_info) indio_dev->label = "accel-base"; } if (!name) return -ENODEV; ret = kxcjk1013_chip_init(data); if (ret < 0) return ret; mutex_init(&data->mutex); indio_dev->channels = kxcjk1013_channels; indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels); indio_dev->available_scan_masks = kxcjk1013_scan_masks; indio_dev->name = name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &kxcjk1013_iio_info; if (client->irq > 0 && data->info->acpi_type != ACPI_SMO8500) { ret = devm_request_threaded_irq(&client->dev, client->irq, kxcjk1013_data_rdy_trig_poll, kxcjk1013_event_handler, IRQF_TRIGGER_RISING, KXCJK1013_IRQ_NAME, indio_dev); if (ret) goto err_poweroff; data->dready_trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d", indio_dev->name, iio_device_id(indio_dev)); if (!data->dready_trig) { ret = -ENOMEM; goto err_poweroff; } data->motion_trig = devm_iio_trigger_alloc(&client->dev, "%s-any-motion-dev%d", indio_dev->name, iio_device_id(indio_dev)); if (!data->motion_trig) { ret = -ENOMEM; goto err_poweroff; } data->dready_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); ret = iio_trigger_register(data->dready_trig); if (ret) goto err_poweroff; indio_dev->trig = iio_trigger_get(data->dready_trig); data->motion_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->motion_trig, indio_dev); ret = iio_trigger_register(data->motion_trig); if (ret) { data->motion_trig = NULL; goto err_trigger_unregister; } } ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, kxcjk1013_trigger_handler, &kxcjk1013_buffer_setup_ops); if (ret < 0) { dev_err(&client->dev, "iio triggered buffer setup failed\n"); goto err_trigger_unregister; } ret = pm_runtime_set_active(&client->dev); if (ret) goto err_buffer_cleanup; pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, KXCJK1013_SLEEP_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); goto err_pm_cleanup; } return 0; err_pm_cleanup: pm_runtime_dont_use_autosuspend(&client->dev); pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: if (data->dready_trig) iio_trigger_unregister(data->dready_trig); if (data->motion_trig) iio_trigger_unregister(data->motion_trig); err_poweroff: kxcjk1013_set_mode(data, STANDBY); return ret; } static void kxcjk1013_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); struct kxcjk1013_data *data = iio_priv(indio_dev); iio_device_unregister(indio_dev); pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); iio_triggered_buffer_cleanup(indio_dev); if (data->dready_trig) { iio_trigger_unregister(data->dready_trig); iio_trigger_unregister(data->motion_trig); } mutex_lock(&data->mutex); kxcjk1013_set_mode(data, STANDBY); mutex_unlock(&data->mutex); } static int kxcjk1013_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; mutex_lock(&data->mutex); ret = kxcjk1013_set_mode(data, STANDBY); mutex_unlock(&data->mutex); return ret; } static int kxcjk1013_resume(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct kxcjk1013_data *data = iio_priv(indio_dev); int ret = 0; mutex_lock(&data->mutex); ret = kxcjk1013_set_mode(data, OPERATION); if (ret == 0) ret = kxcjk1013_set_range(data, data->range); mutex_unlock(&data->mutex); return ret; } static int kxcjk1013_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; ret = kxcjk1013_set_mode(data, STANDBY); if (ret < 0) { dev_err(&data->client->dev, "powering off device failed\n"); return -EAGAIN; } return 0; } static int kxcjk1013_runtime_resume(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct kxcjk1013_data *data = iio_priv(indio_dev); int ret; int sleep_val; ret = kxcjk1013_set_mode(data, OPERATION); if (ret < 0) return ret; sleep_val = kxcjk1013_get_startup_times(data); if (sleep_val < 20000) usleep_range(sleep_val, 20000); else msleep_interruptible(sleep_val/1000); return 0; } static const struct dev_pm_ops kxcjk1013_pm_ops = { SYSTEM_SLEEP_PM_OPS(kxcjk1013_suspend, kxcjk1013_resume) RUNTIME_PM_OPS(kxcjk1013_runtime_suspend, kxcjk1013_runtime_resume, NULL) }; static const struct i2c_device_id kxcjk1013_id[] = { { "kxcjk1013", (kernel_ulong_t)&kxcjk1013_info }, { "kxcj91008", (kernel_ulong_t)&kxcj91008_info }, { "kxtj21009", (kernel_ulong_t)&kxtj21009_info }, { "kxtf9", (kernel_ulong_t)&kxtf9_info }, { "kx023-1025", (kernel_ulong_t)&kx0231025_info }, { } }; MODULE_DEVICE_TABLE(i2c, kxcjk1013_id); static const struct of_device_id kxcjk1013_of_match[] = { { .compatible = "kionix,kxcjk1013", &kxcjk1013_info }, { .compatible = "kionix,kxcj91008", &kxcj91008_info }, { .compatible = "kionix,kxtj21009", &kxtj21009_info }, { .compatible = "kionix,kxtf9", &kxtf9_info }, { .compatible = "kionix,kx023-1025", &kx0231025_info }, { } }; MODULE_DEVICE_TABLE(of, kxcjk1013_of_match); static const struct acpi_device_id kx_acpi_match[] = { { "KIOX0008", (kernel_ulong_t)&kxcj91008_info }, { "KIOX0009", (kernel_ulong_t)&kxtj21009_info }, { "KIOX000A", (kernel_ulong_t)&kxcj91008_info }, /* KXCJ91008 in the display of a yoga 2-in-1 */ { "KIOX010A", (kernel_ulong_t)&kxcj91008_kiox010a_info }, /* KXCJ91008 in the base of a yoga 2-in-1 */ { "KIOX020A", (kernel_ulong_t)&kxcj91008_kiox020a_info }, { "KXCJ1008", (kernel_ulong_t)&kxcj91008_info }, { "KXCJ1013", (kernel_ulong_t)&kxcjk1013_info }, { "KXCJ9000", (kernel_ulong_t)&kxcj91008_info }, { "KXJ2109", (kernel_ulong_t)&kxtj21009_info }, { "KXTJ1009", (kernel_ulong_t)&kxtj21009_info }, { "SMO8500", (kernel_ulong_t)&kxcj91008_smo8500_info }, { } }; MODULE_DEVICE_TABLE(acpi, kx_acpi_match); static struct i2c_driver kxcjk1013_driver = { .driver = { .name = KXCJK1013_DRV_NAME, .acpi_match_table = kx_acpi_match, .of_match_table = kxcjk1013_of_match, .pm = pm_ptr(&kxcjk1013_pm_ops), }, .probe = kxcjk1013_probe, .remove = kxcjk1013_remove, .id_table = kxcjk1013_id, }; module_i2c_driver(kxcjk1013_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("KXCJK1013 accelerometer driver");
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * FriendlyElec NanoPC-T4 board device tree source * * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd. * (http://www.friendlyarm.com) * * Copyright (c) 2018 Collabora Ltd. */ /dts-v1/; #include "rk3399-nanopi4.dtsi" / { model = "FriendlyElec NanoPC-T4"; compatible = "friendlyarm,nanopc-t4", "rockchip,rk3399"; vcc12v0_sys: regulator-vcc12v0-sys { compatible = "regulator-fixed"; regulator-always-on; regulator-boot-on; regulator-max-microvolt = <12000000>; regulator-min-microvolt = <12000000>; regulator-name = "vcc12v0_sys"; }; vcc5v0_host0: regulator-vcc5v0-host0 { compatible = "regulator-fixed"; regulator-always-on; regulator-boot-on; regulator-name = "vcc5v0_host0"; vin-supply = <&vcc5v0_sys>; }; adc-keys { compatible = "adc-keys"; io-channels = <&saradc 1>; io-channel-names = "buttons"; keyup-threshold-microvolt = <1800000>; poll-interval = <100>; button-recovery { label = "Recovery"; linux,code = <KEY_VENDOR>; press-threshold-microvolt = <18000>; }; }; ir-receiver { compatible = "gpio-ir-receiver"; gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&ir_rx>; }; fan: pwm-fan { compatible = "pwm-fan"; /* * With 20KHz PWM and an EVERCOOL EC4007H12SA fan, these levels * work out to 0, ~1200, ~3000, and 5000RPM respectively. */ cooling-levels = <0 12 18 255>; #cooling-cells = <2>; fan-supply = <&vcc12v0_sys>; pwms = <&pwm1 0 50000 0>; }; }; &cpu_thermal { trips { cpu_warm: cpu_warm { temperature = <55000>; hysteresis = <2000>; type = "active"; }; cpu_hot: cpu_hot { temperature = <65000>; hysteresis = <2000>; type = "active"; }; }; cooling-maps { map2 { trip = <&cpu_warm>; cooling-device = <&fan THERMAL_NO_LIMIT 1>; }; map3 { trip = <&cpu_hot>; cooling-device = <&fan 2 THERMAL_NO_LIMIT>; }; }; }; &pcie0 { ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; num-lanes = <4>; vpcie3v3-supply = <&vcc3v3_sys>; }; &pinctrl { ir { ir_rx: ir-rx { /* external pullup to VCC3V3_SYS, despite being 1.8V :/ */ rockchip,pins = <0 RK_PA6 1 &pcfg_pull_none>; }; }; }; &sdhci { mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; }; &u2phy0_host { phy-supply = <&vcc5v0_host0>; }; &u2phy1_host { phy-supply = <&vcc5v0_host0>; }; &vcc5v0_sys { vin-supply = <&vcc12v0_sys>; }; &vcc3v3_sys { vin-supply = <&vcc12v0_sys>; }; &vbus_typec { enable-active-high; gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>; vin-supply = <&vcc5v0_sys>; };
// SPDX-License-Identifier: GPL-2.0-only /* * FPDT support for exporting boot and suspend/resume performance data * * Copyright (C) 2021 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "ACPI FPDT: " fmt #include <linux/acpi.h> /* * FPDT contains ACPI table header and a number of fpdt_subtable_entries. * Each fpdt_subtable_entry points to a subtable: FBPT or S3PT. * Each FPDT subtable (FBPT/S3PT) is composed of a fpdt_subtable_header * and a number of fpdt performance records. * Each FPDT performance record is composed of a fpdt_record_header and * performance data fields, for boot or suspend or resume phase. */ enum fpdt_subtable_type { SUBTABLE_FBPT, SUBTABLE_S3PT, }; struct fpdt_subtable_entry { u16 type; /* refer to enum fpdt_subtable_type */ u8 length; u8 revision; u32 reserved; u64 address; /* physical address of the S3PT/FBPT table */ }; struct fpdt_subtable_header { u32 signature; u32 length; }; enum fpdt_record_type { RECORD_S3_RESUME, RECORD_S3_SUSPEND, RECORD_BOOT, }; struct fpdt_record_header { u16 type; /* refer to enum fpdt_record_type */ u8 length; u8 revision; }; struct resume_performance_record { struct fpdt_record_header header; u32 resume_count; u64 resume_prev; u64 resume_avg; } __attribute__((packed)); struct boot_performance_record { struct fpdt_record_header header; u32 reserved; u64 firmware_start; u64 bootloader_load; u64 bootloader_launch; u64 exitbootservice_start; u64 exitbootservice_end; } __attribute__((packed)); struct suspend_performance_record { struct fpdt_record_header header; u64 suspend_start; u64 suspend_end; } __attribute__((packed)); static struct resume_performance_record *record_resume; static struct suspend_performance_record *record_suspend; static struct boot_performance_record *record_boot; #define FPDT_ATTR(phase, name) \ static ssize_t name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%llu\n", record_##phase->name); \ } \ static struct kobj_attribute name##_attr = \ __ATTR(name##_ns, 0444, name##_show, NULL) FPDT_ATTR(resume, resume_prev); FPDT_ATTR(resume, resume_avg); FPDT_ATTR(suspend, suspend_start); FPDT_ATTR(suspend, suspend_end); FPDT_ATTR(boot, firmware_start); FPDT_ATTR(boot, bootloader_load); FPDT_ATTR(boot, bootloader_launch); FPDT_ATTR(boot, exitbootservice_start); FPDT_ATTR(boot, exitbootservice_end); static ssize_t resume_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", record_resume->resume_count); } static struct kobj_attribute resume_count_attr = __ATTR_RO(resume_count); static struct attribute *resume_attrs[] = { &resume_count_attr.attr, &resume_prev_attr.attr, &resume_avg_attr.attr, NULL }; static const struct attribute_group resume_attr_group = { .attrs = resume_attrs, .name = "resume", }; static struct attribute *suspend_attrs[] = { &suspend_start_attr.attr, &suspend_end_attr.attr, NULL }; static const struct attribute_group suspend_attr_group = { .attrs = suspend_attrs, .name = "suspend", }; static struct attribute *boot_attrs[] = { &firmware_start_attr.attr, &bootloader_load_attr.attr, &bootloader_launch_attr.attr, &exitbootservice_start_attr.attr, &exitbootservice_end_attr.attr, NULL }; static const struct attribute_group boot_attr_group = { .attrs = boot_attrs, .name = "boot", }; static struct kobject *fpdt_kobj; #if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT #include <linux/processor.h> static bool fpdt_address_valid(u64 address) { /* * On some systems the table contains invalid addresses * with unsuppored high address bits set, check for this. */ return !(address >> boot_cpu_data.x86_phys_bits); } #else static bool fpdt_address_valid(u64 address) { return true; } #endif static int fpdt_process_subtable(u64 address, u32 subtable_type) { struct fpdt_subtable_header *subtable_header; struct fpdt_record_header *record_header; char *signature = (subtable_type == SUBTABLE_FBPT ? "FBPT" : "S3PT"); u32 length, offset; int result; if (!fpdt_address_valid(address)) { pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address); return -EINVAL; } subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); if (!subtable_header) return -ENOMEM; if (strncmp((char *)&subtable_header->signature, signature, 4)) { pr_info(FW_BUG "subtable signature and type mismatch!\n"); return -EINVAL; } length = subtable_header->length; acpi_os_unmap_memory(subtable_header, sizeof(*subtable_header)); subtable_header = acpi_os_map_memory(address, length); if (!subtable_header) return -ENOMEM; offset = sizeof(*subtable_header); while (offset < length) { record_header = (void *)subtable_header + offset; offset += record_header->length; if (!record_header->length) { pr_err(FW_BUG "Zero-length record found in FPTD.\n"); result = -EINVAL; goto err; } switch (record_header->type) { case RECORD_S3_RESUME: if (subtable_type != SUBTABLE_S3PT) { pr_err(FW_BUG "Invalid record %d for subtable %s\n", record_header->type, signature); result = -EINVAL; goto err; } if (record_resume) { pr_err("Duplicate resume performance record found.\n"); continue; } record_resume = (struct resume_performance_record *)record_header; result = sysfs_create_group(fpdt_kobj, &resume_attr_group); if (result) goto err; break; case RECORD_S3_SUSPEND: if (subtable_type != SUBTABLE_S3PT) { pr_err(FW_BUG "Invalid %d for subtable %s\n", record_header->type, signature); continue; } if (record_suspend) { pr_err("Duplicate suspend performance record found.\n"); continue; } record_suspend = (struct suspend_performance_record *)record_header; result = sysfs_create_group(fpdt_kobj, &suspend_attr_group); if (result) goto err; break; case RECORD_BOOT: if (subtable_type != SUBTABLE_FBPT) { pr_err(FW_BUG "Invalid %d for subtable %s\n", record_header->type, signature); result = -EINVAL; goto err; } if (record_boot) { pr_err("Duplicate boot performance record found.\n"); continue; } record_boot = (struct boot_performance_record *)record_header; result = sysfs_create_group(fpdt_kobj, &boot_attr_group); if (result) goto err; break; default: /* Other types are reserved in ACPI 6.4 spec. */ break; } } return 0; err: if (record_boot) sysfs_remove_group(fpdt_kobj, &boot_attr_group); if (record_suspend) sysfs_remove_group(fpdt_kobj, &suspend_attr_group); if (record_resume) sysfs_remove_group(fpdt_kobj, &resume_attr_group); return result; } static int __init acpi_init_fpdt(void) { acpi_status status; struct acpi_table_header *header; struct fpdt_subtable_entry *subtable; u32 offset = sizeof(*header); int result; status = acpi_get_table(ACPI_SIG_FPDT, 0, &header); if (ACPI_FAILURE(status)) return 0; fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj); if (!fpdt_kobj) { result = -ENOMEM; goto err_nomem; } while (offset < header->length) { subtable = (void *)header + offset; switch (subtable->type) { case SUBTABLE_FBPT: case SUBTABLE_S3PT: result = fpdt_process_subtable(subtable->address, subtable->type); if (result) goto err_subtable; break; default: /* Other types are reserved in ACPI 6.4 spec. */ break; } offset += sizeof(*subtable); } return 0; err_subtable: kobject_put(fpdt_kobj); err_nomem: acpi_put_table(header); return result; } fs_initcall(acpi_init_fpdt);
/* SPDX-License-Identifier: GPL-2.0 */ /* * Functions to handle the cached directory entries * * Copyright (c) 2022, Ronnie Sahlberg <[email protected]> */ #ifndef _CACHED_DIR_H #define _CACHED_DIR_H struct cached_dirent { struct list_head entry; char *name; int namelen; loff_t pos; struct cifs_fattr fattr; }; struct cached_dirents { bool is_valid:1; bool is_failed:1; struct dir_context *ctx; /* * Only used to make sure we only take entries * from a single context. Never dereferenced. */ struct mutex de_mutex; int pos; /* Expected ctx->pos */ struct list_head entries; }; struct cached_fid { struct list_head entry; struct cached_fids *cfids; const char *path; bool has_lease:1; bool is_open:1; bool on_list:1; bool file_all_info_is_valid:1; unsigned long time; /* jiffies of when lease was taken */ struct kref refcount; struct cifs_fid fid; spinlock_t fid_lock; struct cifs_tcon *tcon; struct dentry *dentry; struct work_struct put_work; struct work_struct close_work; struct smb2_file_all_info file_all_info; struct cached_dirents dirents; }; /* default MAX_CACHED_FIDS is 16 */ struct cached_fids { /* Must be held when: * - accessing the cfids->entries list * - accessing the cfids->dying list */ spinlock_t cfid_list_lock; int num_entries; struct list_head entries; struct list_head dying; struct work_struct invalidation_work; struct delayed_work laundromat_work; }; extern struct cached_fids *init_cached_dirs(void); extern void free_cached_dirs(struct cached_fids *cfids); extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb, bool lookup_only, struct cached_fid **cfid); extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, struct dentry *dentry, struct cached_fid **cfid); extern void close_cached_dir(struct cached_fid *cfid); extern void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb); extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); #endif /* _CACHED_DIR_H */
// SPDX-License-Identifier: GPL-2.0+ /* * ipmi_si.c * * The interface to the IPMI driver for the system interfaces (KCS, SMIC, * BT). * * Author: MontaVista Software, Inc. * Corey Minyard <[email protected]> * [email protected] * * Copyright 2002 MontaVista Software Inc. * Copyright 2006 IBM Corp., Christian Krafft <[email protected]> */ /* * This file holds the "policy" for the interface to the SMI state * machine. It does the configuration, handles timers and interrupts, * and drives the real SMI state machine. */ #define pr_fmt(fmt) "ipmi_si: " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <asm/irq.h> #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include "ipmi_si.h" #include "ipmi_si_sm.h" #include <linux/string.h> #include <linux/ctype.h> /* Measure times between events in the driver. */ #undef DEBUG_TIMING /* Call every 10 ms. */ #define SI_TIMEOUT_TIME_USEC 10000 #define SI_USEC_PER_JIFFY (1000000/HZ) #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a short timeout */ enum si_intf_state { SI_NORMAL, SI_GETTING_FLAGS, SI_GETTING_EVENTS, SI_CLEARING_FLAGS, SI_GETTING_MESSAGES, SI_CHECKING_ENABLES, SI_SETTING_ENABLES /* FIXME - add watchdog stuff. */ }; /* Some BT-specific defines we need here. */ #define IPMI_BT_INTMASK_REG 2 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 /* 'invalid' to allow a firmware-specified interface to be disabled */ const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL }; static bool initialized; /* * Indexes into stats[] in smi_info below. */ enum si_stat_indexes { /* * Number of times the driver requested a timer while an operation * was in progress. */ SI_STAT_short_timeouts = 0, /* * Number of times the driver requested a timer while nothing was in * progress. */ SI_STAT_long_timeouts, /* Number of times the interface was idle while being polled. */ SI_STAT_idles, /* Number of interrupts the driver handled. */ SI_STAT_interrupts, /* Number of time the driver got an ATTN from the hardware. */ SI_STAT_attentions, /* Number of times the driver requested flags from the hardware. */ SI_STAT_flag_fetches, /* Number of times the hardware didn't follow the state machine. */ SI_STAT_hosed_count, /* Number of completed messages. */ SI_STAT_complete_transactions, /* Number of IPMI events received from the hardware. */ SI_STAT_events, /* Number of watchdog pretimeouts. */ SI_STAT_watchdog_pretimeouts, /* Number of asynchronous messages received. */ SI_STAT_incoming_messages, /* This *must* remain last, add new values above this. */ SI_NUM_STATS }; struct smi_info { int si_num; struct ipmi_smi *intf; struct si_sm_data *si_sm; const struct si_sm_handlers *handlers; spinlock_t si_lock; struct ipmi_smi_msg *waiting_msg; struct ipmi_smi_msg *curr_msg; enum si_intf_state si_state; /* * Used to handle the various types of I/O that can occur with * IPMI */ struct si_sm_io io; /* * Per-OEM handler, called from handle_flags(). Returns 1 * when handle_flags() needs to be re-run or 0 indicating it * set si_state itself. */ int (*oem_data_avail_handler)(struct smi_info *smi_info); /* * Flags from the last GET_MSG_FLAGS command, used when an ATTN * is set to hold the flags until we are done handling everything * from the flags. */ #define RECEIVE_MSG_AVAIL 0x01 #define EVENT_MSG_BUFFER_FULL 0x02 #define WDT_PRE_TIMEOUT_INT 0x08 #define OEM0_DATA_AVAIL 0x20 #define OEM1_DATA_AVAIL 0x40 #define OEM2_DATA_AVAIL 0x80 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ OEM1_DATA_AVAIL | \ OEM2_DATA_AVAIL) unsigned char msg_flags; /* Does the BMC have an event buffer? */ bool has_event_buffer; /* * If set to true, this will request events the next time the * state machine is idle. */ atomic_t req_events; /* * If true, run the state machine to completion on every send * call. Generally used after a panic to make sure stuff goes * out. */ bool run_to_completion; /* The timer for this si. */ struct timer_list si_timer; /* This flag is set, if the timer can be set */ bool timer_can_start; /* This flag is set, if the timer is running (timer_pending() isn't enough) */ bool timer_running; /* The time (in jiffies) the last timeout occurred at. */ unsigned long last_timeout_jiffies; /* Are we waiting for the events, pretimeouts, received msgs? */ atomic_t need_watch; /* * The driver will disable interrupts when it gets into a * situation where it cannot handle messages due to lack of * memory. Once that situation clears up, it will re-enable * interrupts. */ bool interrupt_disabled; /* * Does the BMC support events? */ bool supports_event_msg_buff; /* * Can we disable interrupts the global enables receive irq * bit? There are currently two forms of brokenness, some * systems cannot disable the bit (which is technically within * the spec but a bad idea) and some systems have the bit * forced to zero even though interrupts work (which is * clearly outside the spec). The next bool tells which form * of brokenness is present. */ bool cannot_disable_irq; /* * Some systems are broken and cannot set the irq enable * bit, even if they support interrupts. */ bool irq_enable_broken; /* Is the driver in maintenance mode? */ bool in_maintenance_mode; /* * Did we get an attention that we did not handle? */ bool got_attn; /* From the get device id response... */ struct ipmi_device_id device_id; /* Have we added the device group to the device? */ bool dev_group_added; /* Counters and things for the proc filesystem. */ atomic_t stats[SI_NUM_STATS]; struct task_struct *thread; struct list_head link; }; #define smi_inc_stat(smi, stat) \ atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) #define IPMI_MAX_INTFS 4 static int force_kipmid[IPMI_MAX_INTFS]; static int num_force_kipmid; static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS]; static int num_max_busy_us; static bool unload_when_empty = true; static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *smi_info); static void cleanup_ipmi_si(void); #ifdef DEBUG_TIMING void debug_timestamp(struct smi_info *smi_info, char *msg) { struct timespec64 t; ktime_get_ts64(&t); dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec); } #else #define debug_timestamp(smi_info, x) #endif static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&xaction_notifier_list, nb); } static void deliver_recv_msg(struct smi_info *smi_info, struct ipmi_smi_msg *msg) { /* Deliver the message to the upper layer. */ ipmi_smi_msg_received(smi_info->intf, msg); } static void return_hosed_msg(struct smi_info *smi_info, int cCode) { struct ipmi_smi_msg *msg = smi_info->curr_msg; if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) cCode = IPMI_ERR_UNSPECIFIED; /* else use it as is */ /* Make it a response */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = cCode; msg->rsp_size = 3; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); } static enum si_sm_result start_next_msg(struct smi_info *smi_info) { int rv; if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; rv = SI_SM_IDLE; } else { int err; smi_info->curr_msg = smi_info->waiting_msg; smi_info->waiting_msg = NULL; debug_timestamp(smi_info, "Start2"); err = atomic_notifier_call_chain(&xaction_notifier_list, 0, smi_info); if (err & NOTIFY_STOP_MASK) { rv = SI_SM_CALL_WITHOUT_DELAY; goto out; } err = smi_info->handlers->start_transaction( smi_info->si_sm, smi_info->curr_msg->data, smi_info->curr_msg->data_size); if (err) return_hosed_msg(smi_info, err); rv = SI_SM_CALL_WITHOUT_DELAY; } out: return rv; } static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { if (!smi_info->timer_can_start) return; smi_info->last_timeout_jiffies = jiffies; mod_timer(&smi_info->si_timer, new_val); smi_info->timer_running = true; } /* * Start a new message and (re)start the timer and thread. */ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, unsigned int size) { smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); if (smi_info->thread) wake_up_process(smi_info->thread); smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } static void start_clear_flags(struct smi_info *smi_info) { unsigned char msg[3]; /* Make sure the watchdog pre-timeout flag is not set at startup. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; start_new_msg(smi_info, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } static void start_getting_msg_queue(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; smi_info->curr_msg->data_size = 2; start_new_msg(smi_info, smi_info->curr_msg->data, smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_MESSAGES; } static void start_getting_events(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; smi_info->curr_msg->data_size = 2; start_new_msg(smi_info, smi_info->curr_msg->data, smi_info->curr_msg->data_size); smi_info->si_state = SI_GETTING_EVENTS; } /* * When we have a situtaion where we run out of memory and cannot * allocate messages, we just leave them in the BMC and run the system * polled until we can allocate some memory. Once we have some * memory, we will re-enable the interrupt. * * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; start_check_enables(smi_info); return true; } return false; } static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; start_check_enables(smi_info); return true; } return false; } /* * Allocate a message. If unable to allocate, start the interrupt * disable process and return NULL. If able to allocate but * interrupts are disabled, free the message and return NULL after * starting the interrupt enable process. */ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; msg = ipmi_alloc_smi_msg(); if (!msg) { if (!disable_si_irq(smi_info)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); msg = NULL; } return msg; } static void handle_flags(struct smi_info *smi_info) { retry: if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; ipmi_smi_watchdog_pretimeout(smi_info->intf); } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) return; start_getting_msg_queue(smi_info); } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) return; start_getting_events(smi_info); } else if (smi_info->msg_flags & OEM_DATA_AVAIL && smi_info->oem_data_avail_handler) { if (smi_info->oem_data_avail_handler(smi_info)) goto retry; } else smi_info->si_state = SI_NORMAL; } /* * Global enables we care about. */ #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ IPMI_BMC_EVT_MSG_INTR) static u8 current_global_enables(struct smi_info *smi_info, u8 base, bool *irq_on) { u8 enables = 0; if (smi_info->supports_event_msg_buff) enables |= IPMI_BMC_EVT_MSG_BUFF; if (((smi_info->io.irq && !smi_info->interrupt_disabled) || smi_info->cannot_disable_irq) && !smi_info->irq_enable_broken) enables |= IPMI_BMC_RCV_MSG_INTR; if (smi_info->supports_event_msg_buff && smi_info->io.irq && !smi_info->interrupt_disabled && !smi_info->irq_enable_broken) enables |= IPMI_BMC_EVT_MSG_INTR; *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); return enables; } static void check_bt_irq(struct smi_info *smi_info, bool irq_on) { u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; if ((bool)irqstate == irq_on) return; if (irq_on) smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); else smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); } static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; debug_timestamp(smi_info, "Done"); switch (smi_info->si_state) { case SI_NORMAL: if (!smi_info->curr_msg) break; smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); break; case SI_GETTING_FLAGS: { unsigned char msg[4]; unsigned int len; /* We got the flags from the SMI, now handle them. */ len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { /* Error fetching flags, just give up for now. */ smi_info->si_state = SI_NORMAL; } else if (len < 4) { /* * Hmm, no flags. That's technically illegal, but * don't use uninitialized data. */ smi_info->si_state = SI_NORMAL; } else { smi_info->msg_flags = msg[3]; handle_flags(smi_info); } break; } case SI_CLEARING_FLAGS: { unsigned char msg[3]; /* We cleared the flags. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 3); if (msg[2] != 0) { /* Error clearing flags */ dev_warn_ratelimited(smi_info->io.dev, "Error clearing flags: %2.2x\n", msg[2]); } smi_info->si_state = SI_NORMAL; break; } case SI_GETTING_EVENTS: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the event flag. */ smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; handle_flags(smi_info); } else { smi_inc_stat(smi_info, events); /* * Do this before we deliver the message * because delivering the message releases the * lock and something else can mess with the * state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_GETTING_MESSAGES: { smi_info->curr_msg->rsp_size = smi_info->handlers->get_result( smi_info->si_sm, smi_info->curr_msg->rsp, IPMI_MAX_MSG_LENGTH); /* * Do this here becase deliver_recv_msg() releases the * lock, and a new message can be put in during the * time the lock is released. */ msg = smi_info->curr_msg; smi_info->curr_msg = NULL; if (msg->rsp[2] != 0) { /* Error getting event, probably done. */ msg->done(msg); /* Take off the msg flag. */ smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; handle_flags(smi_info); } else { smi_inc_stat(smi_info, incoming_messages); /* * Do this before we deliver the message * because delivering the message releases the * lock and something else can mess with the * state. */ handle_flags(smi_info); deliver_recv_msg(smi_info, msg); } break; } case SI_CHECKING_ENABLES: { unsigned char msg[4]; u8 enables; bool irq_on; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { dev_warn_ratelimited(smi_info->io.dev, "Couldn't get irq info: %x,\n" "Maybe ok, but ipmi might run very slowly.\n", msg[2]); smi_info->si_state = SI_NORMAL; break; } enables = current_global_enables(smi_info, 0, &irq_on); if (smi_info->io.si_type == SI_BT) /* BT has its own interrupt enable bit. */ check_bt_irq(smi_info, irq_on); if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { /* Enables are not correct, fix them. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); smi_info->handlers->start_transaction( smi_info->si_sm, msg, 3); smi_info->si_state = SI_SETTING_ENABLES; } else if (smi_info->supports_event_msg_buff) { smi_info->curr_msg = ipmi_alloc_smi_msg(); if (!smi_info->curr_msg) { smi_info->si_state = SI_NORMAL; break; } start_getting_events(smi_info); } else { smi_info->si_state = SI_NORMAL; } break; } case SI_SETTING_ENABLES: { unsigned char msg[4]; smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) dev_warn_ratelimited(smi_info->io.dev, "Could not set the global enables: 0x%x.\n", msg[2]); if (smi_info->supports_event_msg_buff) { smi_info->curr_msg = ipmi_alloc_smi_msg(); if (!smi_info->curr_msg) { smi_info->si_state = SI_NORMAL; break; } start_getting_events(smi_info); } else { smi_info->si_state = SI_NORMAL; } break; } } } /* * Called on timeouts and events. Timeouts should pass the elapsed * time, interrupts should pass in zero. Must be called with * si_lock held and interrupts disabled. */ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, int time) { enum si_sm_result si_sm_result; restart: /* * There used to be a loop here that waited a little while * (around 25us) before giving up. That turned out to be * pointless, the minimum delays I was seeing were in the 300us * range, which is far too long to wait in an interrupt. So * we just run until the state machine tells us something * happened or it needs a delay. */ si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); time = 0; while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { smi_inc_stat(smi_info, complete_transactions); handle_transaction_done(smi_info); goto restart; } else if (si_sm_result == SI_SM_HOSED) { smi_inc_stat(smi_info, hosed_count); /* * Do the before return_hosed_msg, because that * releases the lock. */ smi_info->si_state = SI_NORMAL; if (smi_info->curr_msg != NULL) { /* * If we were handling a user message, format * a response to send to the upper layer to * tell it about the error. */ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); } goto restart; } /* * We prefer handling attn over new messages. But don't do * this if there is not yet an upper layer to handle anything. */ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { unsigned char msg[2]; if (smi_info->si_state != SI_NORMAL) { /* * We got an ATTN, but we are doing something else. * Handle the ATTN later. */ smi_info->got_attn = true; } else { smi_info->got_attn = false; smi_inc_stat(smi_info, attentions); /* * Got a attn, send down a get message flags to see * what's causing it. It would be better to handle * this in the upper layer, but due to the way * interrupts work with the SMI, that's not really * possible. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_MSG_FLAGS_CMD; start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_GETTING_FLAGS; goto restart; } } /* If we are currently idle, try to start the next message. */ if (si_sm_result == SI_SM_IDLE) { smi_inc_stat(smi_info, idles); si_sm_result = start_next_msg(smi_info); if (si_sm_result != SI_SM_IDLE) goto restart; } if ((si_sm_result == SI_SM_IDLE) && (atomic_read(&smi_info->req_events))) { /* * We are idle and the upper layer requested that I fetch * events, so do so. */ atomic_set(&smi_info->req_events, 0); /* * Take this opportunity to check the interrupt and * message enable state for the BMC. The BMC can be * asynchronously reset, and may thus get interrupts * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->io.irq) { start_check_enables(smi_info); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) goto out; start_getting_events(smi_info); } goto restart; } if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { /* Ok it if fails, the timer will just go off. */ if (del_timer(&smi_info->si_timer)) smi_info->timer_running = false; } out: return si_sm_result; } static void check_start_timer_thread(struct smi_info *smi_info) { if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); if (smi_info->thread) wake_up_process(smi_info->thread); start_next_msg(smi_info); smi_event_handler(smi_info, 0); } } static void flush_messages(void *send_info) { struct smi_info *smi_info = send_info; enum si_sm_result result; /* * Currently, this function is called only in run-to-completion * mode. This means we are single-threaded, no need for locks. */ result = smi_event_handler(smi_info, 0); while (result != SI_SM_IDLE) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } static void sender(void *send_info, struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; unsigned long flags; debug_timestamp(smi_info, "Enqueue"); if (smi_info->run_to_completion) { /* * If we are running to completion, start it. Upper * layer will call flush_messages to clear it out. */ smi_info->waiting_msg = msg; return; } spin_lock_irqsave(&smi_info->si_lock, flags); /* * The following two lines don't need to be under the lock for * the lock's sake, but they do need SMP memory barriers to * avoid getting things out of order. We are already claiming * the lock, anyway, so just do it under the lock to avoid the * ordering problem. */ BUG_ON(smi_info->waiting_msg); smi_info->waiting_msg = msg; check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void set_run_to_completion(void *send_info, bool i_run_to_completion) { struct smi_info *smi_info = send_info; smi_info->run_to_completion = i_run_to_completion; if (i_run_to_completion) flush_messages(smi_info); } /* * Use -1 as a special constant to tell that we are spinning in kipmid * looking for something and not delaying between checks */ #define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull) static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result, const struct smi_info *smi_info, ktime_t *busy_until) { unsigned int max_busy_us = 0; if (smi_info->si_num < num_max_busy_us) max_busy_us = kipmid_max_busy_us[smi_info->si_num]; if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) *busy_until = IPMI_TIME_NOT_BUSY; else if (*busy_until == IPMI_TIME_NOT_BUSY) { *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC; } else { if (unlikely(ktime_get() > *busy_until)) { *busy_until = IPMI_TIME_NOT_BUSY; return false; } } return true; } /* * A busy-waiting loop for speeding up IPMI operation. * * Lousy hardware makes this hard. This is only enabled for systems * that are not BT and do not have interrupts. It starts spinning * when an operation is complete or until max_busy tells it to stop * (if that is enabled). See the paragraph on kimid_max_busy_us in * Documentation/driver-api/ipmi.rst for details. */ static int ipmi_thread(void *data) { struct smi_info *smi_info = data; unsigned long flags; enum si_sm_result smi_result; ktime_t busy_until = IPMI_TIME_NOT_BUSY; set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { int busy_wait; spin_lock_irqsave(&(smi_info->si_lock), flags); smi_result = smi_event_handler(smi_info, 0); /* * If the driver is doing something, there is a possible * race with the timer. If the timer handler see idle, * and the thread here sees something else, the timer * handler won't restart the timer even though it is * required. So start it here if necessary. */ if (smi_result != SI_SM_IDLE && !smi_info->timer_running) smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); spin_unlock_irqrestore(&(smi_info->si_lock), flags); busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, &busy_until); if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { ; /* do nothing */ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { /* * In maintenance mode we run as fast as * possible to allow firmware updates to * complete as fast as possible, but normally * don't bang on the scheduler. */ if (smi_info->in_maintenance_mode) schedule(); else usleep_range(100, 200); } else if (smi_result == SI_SM_IDLE) { if (atomic_read(&smi_info->need_watch)) { schedule_timeout_interruptible(100); } else { /* Wait to be woken up when we are needed. */ __set_current_state(TASK_INTERRUPTIBLE); schedule(); } } else { schedule_timeout_interruptible(1); } } return 0; } static void poll(void *send_info) { struct smi_info *smi_info = send_info; unsigned long flags = 0; bool run_to_completion = smi_info->run_to_completion; /* * Make sure there is some delay in the poll loop so we can * drive time forward and timeout things. */ udelay(10); if (!run_to_completion) spin_lock_irqsave(&smi_info->si_lock, flags); smi_event_handler(smi_info, 10); if (!run_to_completion) spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void request_events(void *send_info) { struct smi_info *smi_info = send_info; if (!smi_info->has_event_buffer) return; atomic_set(&smi_info->req_events, 1); } static void set_need_watch(void *send_info, unsigned int watch_mask) { struct smi_info *smi_info = send_info; unsigned long flags; int enable; enable = !!watch_mask; atomic_set(&smi_info->need_watch, enable); spin_lock_irqsave(&smi_info->si_lock, flags); check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void smi_timeout(struct timer_list *t) { struct smi_info *smi_info = from_timer(smi_info, t, si_timer); enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; long time_diff; long timeout; spin_lock_irqsave(&(smi_info->si_lock), flags); debug_timestamp(smi_info, "Timer"); jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); goto do_mod_timer; } /* * If the state machine asks for a short delay, then shorten * the timer timeout. */ if (smi_result == SI_SM_CALL_WITH_DELAY) { smi_inc_stat(smi_info, short_timeouts); timeout = jiffies + 1; } else { smi_inc_stat(smi_info, long_timeouts); timeout = jiffies + SI_TIMEOUT_JIFFIES; } do_mod_timer: if (smi_result != SI_SM_IDLE) smi_mod_timer(smi_info, timeout); else smi_info->timer_running = false; spin_unlock_irqrestore(&(smi_info->si_lock), flags); } irqreturn_t ipmi_si_irq_handler(int irq, void *data) { struct smi_info *smi_info = data; unsigned long flags; if (smi_info->io.si_type == SI_BT) /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); debug_timestamp(smi_info, "Interrupt"); smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED; } static int smi_start_processing(void *send_info, struct ipmi_smi *intf) { struct smi_info *new_smi = send_info; int enable = 0; new_smi->intf = intf; /* Set up the timer that drives the interface. */ timer_setup(&new_smi->si_timer, smi_timeout, 0); new_smi->timer_can_start = true; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* Try to claim any interrupts. */ if (new_smi->io.irq_setup) { new_smi->io.irq_handler_data = new_smi; new_smi->io.irq_setup(&new_smi->io); } /* * Check if the user forcefully enabled the daemon. */ if (new_smi->si_num < num_force_kipmid) enable = force_kipmid[new_smi->si_num]; /* * The BT interface is efficient enough to not need a thread, * and there is no need for a thread if we have interrupts. */ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) enable = 1; if (enable) { new_smi->thread = kthread_run(ipmi_thread, new_smi, "kipmi%d", new_smi->si_num); if (IS_ERR(new_smi->thread)) { dev_notice(new_smi->io.dev, "Could not start kernel thread due to error %ld, only using timers to drive the interface\n", PTR_ERR(new_smi->thread)); new_smi->thread = NULL; } } return 0; } static int get_smi_info(void *send_info, struct ipmi_smi_info *data) { struct smi_info *smi = send_info; data->addr_src = smi->io.addr_source; data->dev = smi->io.dev; data->addr_info = smi->io.addr_info; get_device(smi->io.dev); return 0; } static void set_maintenance_mode(void *send_info, bool enable) { struct smi_info *smi_info = send_info; if (!enable) atomic_set(&smi_info->req_events, 0); smi_info->in_maintenance_mode = enable; } static void shutdown_smi(void *send_info); static const struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, .shutdown = shutdown_smi, .get_smi_info = get_smi_info, .sender = sender, .request_events = request_events, .set_need_watch = set_need_watch, .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, .flush_messages = flush_messages, .poll = poll, }; static LIST_HEAD(smi_infos); static DEFINE_MUTEX(smi_infos_lock); static int smi_num; /* Used to sequence the SMIs */ static const char * const addr_space_to_str[] = { "i/o", "mem" }; module_param_array(force_kipmid, int, &num_force_kipmid, 0); MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm."); module_param(unload_when_empty, bool, 0); MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod."); module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); MODULE_PARM_DESC(kipmid_max_busy_us, "Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time."); void ipmi_irq_finish_setup(struct si_sm_io *io) { if (io->si_type == SI_BT) /* Enable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_ENABLE_IRQ_BIT); } void ipmi_irq_start_cleanup(struct si_sm_io *io) { if (io->si_type == SI_BT) /* Disable the interrupt in the BT interface. */ io->outputb(io, IPMI_BT_INTMASK_REG, 0); } static void std_irq_cleanup(struct si_sm_io *io) { ipmi_irq_start_cleanup(io); free_irq(io->irq, io->irq_handler_data); } int ipmi_std_irq_setup(struct si_sm_io *io) { int rv; if (!io->irq) return 0; rv = request_irq(io->irq, ipmi_si_irq_handler, IRQF_SHARED, SI_DEVICE_NAME, io->irq_handler_data); if (rv) { dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n", SI_DEVICE_NAME, io->irq); io->irq = 0; } else { io->irq_cleanup = std_irq_cleanup; ipmi_irq_finish_setup(io); dev_info(io->dev, "Using irq %d\n", io->irq); } return rv; } static int wait_for_msg_done(struct smi_info *smi_info) { enum si_sm_result smi_result; smi_result = smi_info->handlers->event(smi_info->si_sm, 0); for (;;) { if (smi_result == SI_SM_CALL_WITH_DELAY || smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = smi_info->handlers->event( smi_info->si_sm, jiffies_to_usecs(1)); } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { smi_result = smi_info->handlers->event( smi_info->si_sm, 0); } else break; } if (smi_result == SI_SM_HOSED) /* * We couldn't get the state machine to run, so whatever's at * the port is probably not an IPMI SMI interface. */ return -ENODEV; return 0; } static int try_get_dev_id(struct smi_info *smi_info) { unsigned char msg[2]; unsigned char *resp; unsigned long resp_len; int rv = 0; unsigned int retry_count = 0; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; /* * Do a Get Device ID command, since it comes back with some * useful info. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_DEVICE_ID_CMD; retry: smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) goto out; resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); /* Check and record info from the get device id, in case we need it. */ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], resp + 2, resp_len - 2, &smi_info->device_id); if (rv) { /* record completion code */ unsigned char cc = *(resp + 2); if (cc != IPMI_CC_NO_ERROR && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { dev_warn_ratelimited(smi_info->io.dev, "BMC returned 0x%2.2x, retry get bmc device id\n", cc); goto retry; } } out: kfree(resp); return rv; } static int get_global_enables(struct smi_info *smi_info, u8 *enables) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) { dev_warn(smi_info->io.dev, "Error getting response from get global enables command: %d\n", rv); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 4 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { dev_warn(smi_info->io.dev, "Invalid return from get global enables command: %ld %x %x %x\n", resp_len, resp[0], resp[1], resp[2]); rv = -EINVAL; goto out; } else { *enables = resp[3]; } out: kfree(resp); return rv; } /* * Returns 1 if it gets an error from the command. */ static int set_global_enables(struct smi_info *smi_info, u8 enables) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = enables; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); rv = wait_for_msg_done(smi_info); if (rv) { dev_warn(smi_info->io.dev, "Error getting response from set global enables command: %d\n", rv); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { dev_warn(smi_info->io.dev, "Invalid return from set global enables command: %ld %x %x\n", resp_len, resp[0], resp[1]); rv = -EINVAL; goto out; } if (resp[2] != 0) rv = 1; out: kfree(resp); return rv; } /* * Some BMCs do not support clearing the receive irq bit in the global * enables (even if they don't support interrupts on the BMC). Check * for this and handle it properly. */ static void check_clr_rcv_irq(struct smi_info *smi_info) { u8 enables = 0; int rv; rv = get_global_enables(smi_info, &enables); if (!rv) { if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) /* Already clear, should work ok. */ return; enables &= ~IPMI_BMC_RCV_MSG_INTR; rv = set_global_enables(smi_info, enables); } if (rv < 0) { dev_err(smi_info->io.dev, "Cannot check clearing the rcv irq: %d\n", rv); return; } if (rv) { /* * An error when setting the event buffer bit means * clearing the bit is not supported. */ dev_warn(smi_info->io.dev, "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); smi_info->cannot_disable_irq = true; } } /* * Some BMCs do not support setting the interrupt bits in the global * enables even if they support interrupts. Clearly bad, but we can * compensate. */ static void check_set_rcv_irq(struct smi_info *smi_info) { u8 enables = 0; int rv; if (!smi_info->io.irq) return; rv = get_global_enables(smi_info, &enables); if (!rv) { enables |= IPMI_BMC_RCV_MSG_INTR; rv = set_global_enables(smi_info, enables); } if (rv < 0) { dev_err(smi_info->io.dev, "Cannot check setting the rcv irq: %d\n", rv); return; } if (rv) { /* * An error when setting the event buffer bit means * setting the bit is not supported. */ dev_warn(smi_info->io.dev, "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); smi_info->cannot_disable_irq = true; smi_info->irq_enable_broken = true; } } static int try_enable_event_buffer(struct smi_info *smi_info) { unsigned char msg[3]; unsigned char *resp; unsigned long resp_len; int rv = 0; resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); rv = wait_for_msg_done(smi_info); if (rv) { pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n"); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 4 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n"); rv = -EINVAL; goto out; } if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { /* buffer is already enabled, nothing to do. */ smi_info->supports_event_msg_buff = true; goto out; } msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); rv = wait_for_msg_done(smi_info); if (rv) { pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n"); goto out; } resp_len = smi_info->handlers->get_result(smi_info->si_sm, resp, IPMI_MAX_MSG_LENGTH); if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { pr_warn("Invalid return from get global, enables command, not enable the event buffer\n"); rv = -EINVAL; goto out; } if (resp[2] != 0) /* * An error when setting the event buffer bit means * that the event buffer is not supported. */ rv = -ENOENT; else smi_info->supports_event_msg_buff = true; out: kfree(resp); return rv; } #define IPMI_SI_ATTR(name) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct smi_info *smi_info = dev_get_drvdata(dev); \ \ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \ } \ static DEVICE_ATTR_RO(name) static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]); } static DEVICE_ATTR_RO(type); static ssize_t interrupts_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; return sysfs_emit(buf, "%d\n", enabled); } static DEVICE_ATTR_RO(interrupts_enabled); IPMI_SI_ATTR(short_timeouts); IPMI_SI_ATTR(long_timeouts); IPMI_SI_ATTR(idles); IPMI_SI_ATTR(interrupts); IPMI_SI_ATTR(attentions); IPMI_SI_ATTR(flag_fetches); IPMI_SI_ATTR(hosed_count); IPMI_SI_ATTR(complete_transactions); IPMI_SI_ATTR(events); IPMI_SI_ATTR(watchdog_pretimeouts); IPMI_SI_ATTR(incoming_messages); static ssize_t params_show(struct device *dev, struct device_attribute *attr, char *buf) { struct smi_info *smi_info = dev_get_drvdata(dev); return sysfs_emit(buf, "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", si_to_str[smi_info->io.si_type], addr_space_to_str[smi_info->io.addr_space], smi_info->io.addr_data, smi_info->io.regspacing, smi_info->io.regsize, smi_info->io.regshift, smi_info->io.irq, smi_info->io.slave_addr); } static DEVICE_ATTR_RO(params); static struct attribute *ipmi_si_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_interrupts_enabled.attr, &dev_attr_short_timeouts.attr, &dev_attr_long_timeouts.attr, &dev_attr_idles.attr, &dev_attr_interrupts.attr, &dev_attr_attentions.attr, &dev_attr_flag_fetches.attr, &dev_attr_hosed_count.attr, &dev_attr_complete_transactions.attr, &dev_attr_events.attr, &dev_attr_watchdog_pretimeouts.attr, &dev_attr_incoming_messages.attr, &dev_attr_params.attr, NULL }; static const struct attribute_group ipmi_si_dev_attr_group = { .attrs = ipmi_si_dev_attrs, }; /* * oem_data_avail_to_receive_msg_avail * @info - smi_info structure with msg_flags set * * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL * Returns 1 indicating need to re-run handle_flags(). */ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) { smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | RECEIVE_MSG_AVAIL); return 1; } /* * setup_dell_poweredge_oem_data_handler * @info - smi_info.device_id must be populated * * Systems that match, but have firmware version < 1.40 may assert * OEM0_DATA_AVAIL on their own, without being told via Set Flags that * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags * as RECEIVE_MSG_AVAIL instead. * * As Dell has no plans to release IPMI 1.5 firmware that *ever* * assert the OEM[012] bits, and if it did, the driver would have to * change to handle that properly, we don't actually check for the * firmware version. * Device ID = 0x20 BMC on PowerEdge 8G servers * Device Revision = 0x80 * Firmware Revision1 = 0x01 BMC version 1.40 * Firmware Revision2 = 0x40 BCD encoded * IPMI Version = 0x51 IPMI 1.5 * Manufacturer ID = A2 02 00 Dell IANA * * Additionally, PowerEdge systems with IPMI < 1.5 may also assert * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. * */ #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 #define DELL_IANA_MFR_ID 0x0002a2 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID) { if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { smi_info->oem_data_avail_handler = oem_data_avail_to_receive_msg_avail; } else if (ipmi_version_major(id) < 1 || (ipmi_version_major(id) == 1 && ipmi_version_minor(id) < 5)) { smi_info->oem_data_avail_handler = oem_data_avail_to_receive_msg_avail; } } } #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA static void return_hosed_msg_badsize(struct smi_info *smi_info) { struct ipmi_smi_msg *msg = smi_info->curr_msg; /* Make it a response */ msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; msg->rsp_size = 3; smi_info->curr_msg = NULL; deliver_recv_msg(smi_info, msg); } /* * dell_poweredge_bt_xaction_handler * @info - smi_info.device_id must be populated * * Dell PowerEdge servers with the BT interface (x6xx and 1750) will * not respond to a Get SDR command if the length of the data * requested is exactly 0x3A, which leads to command timeouts and no * data returned. This intercepts such commands, and causes userspace * callers to try again with a different-sized buffer, which succeeds. */ #define STORAGE_NETFN 0x0A #define STORAGE_CMD_GET_SDR 0x23 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, unsigned long unused, void *in) { struct smi_info *smi_info = in; unsigned char *data = smi_info->curr_msg->data; unsigned int size = smi_info->curr_msg->data_size; if (size >= 8 && (data[0]>>2) == STORAGE_NETFN && data[1] == STORAGE_CMD_GET_SDR && data[7] == 0x3A) { return_hosed_msg_badsize(smi_info); return NOTIFY_STOP; } return NOTIFY_DONE; } static struct notifier_block dell_poweredge_bt_xaction_notifier = { .notifier_call = dell_poweredge_bt_xaction_handler, }; /* * setup_dell_poweredge_bt_xaction_handler * @info - smi_info.device_id must be filled in already * * Fills in smi_info.device_id.start_transaction_pre_hook * when we know what function to use there. */ static void setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; if (id->manufacturer_id == DELL_IANA_MFR_ID && smi_info->io.si_type == SI_BT) register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); } /* * setup_oem_data_handler * @info - smi_info.device_id must be filled in already * * Fills in smi_info.device_id.oem_data_available_handler * when we know what function to use there. */ static void setup_oem_data_handler(struct smi_info *smi_info) { setup_dell_poweredge_oem_data_handler(smi_info); } static void setup_xaction_handlers(struct smi_info *smi_info) { setup_dell_poweredge_bt_xaction_handler(smi_info); } static void check_for_broken_irqs(struct smi_info *smi_info) { check_clr_rcv_irq(smi_info); check_set_rcv_irq(smi_info); } static inline void stop_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) { kthread_stop(smi_info->thread); smi_info->thread = NULL; } smi_info->timer_can_start = false; del_timer_sync(&smi_info->si_timer); } static struct smi_info *find_dup_si(struct smi_info *info) { struct smi_info *e; list_for_each_entry(e, &smi_infos, link) { if (e->io.addr_space != info->io.addr_space) continue; if (e->io.addr_data == info->io.addr_data) { /* * This is a cheap hack, ACPI doesn't have a defined * slave address but SMBIOS does. Pick it up from * any source that has it available. */ if (info->io.slave_addr && !e->io.slave_addr) e->io.slave_addr = info->io.slave_addr; return e; } } return NULL; } int ipmi_si_add_smi(struct si_sm_io *io) { int rv = 0; struct smi_info *new_smi, *dup; /* * If the user gave us a hard-coded device at the same * address, they presumably want us to use it and not what is * in the firmware. */ if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD && ipmi_si_hardcode_match(io->addr_space, io->addr_data)) { dev_info(io->dev, "Hard-coded device at this address already exists"); return -ENODEV; } if (!io->io_setup) { if (IS_ENABLED(CONFIG_HAS_IOPORT) && io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; } else { return -EINVAL; } } new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL); if (!new_smi) return -ENOMEM; spin_lock_init(&new_smi->si_lock); new_smi->io = *io; mutex_lock(&smi_infos_lock); dup = find_dup_si(new_smi); if (dup) { if (new_smi->io.addr_source == SI_ACPI && dup->io.addr_source == SI_SMBIOS) { /* We prefer ACPI over SMBIOS. */ dev_info(dup->io.dev, "Removing SMBIOS-specified %s state machine in favor of ACPI\n", si_to_str[new_smi->io.si_type]); cleanup_one_si(dup); } else { dev_info(new_smi->io.dev, "%s-specified %s state machine: duplicate\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type]); rv = -EBUSY; kfree(new_smi); goto out_err; } } pr_info("Adding %s-specified %s state machine\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type]); list_add_tail(&new_smi->link, &smi_infos); if (initialized) rv = try_smi_init(new_smi); out_err: mutex_unlock(&smi_infos_lock); return rv; } /* * Try to start up an interface. Must be called with smi_infos_lock * held, primarily to keep smi_num consistent, we only one to do these * one at a time. */ static int try_smi_init(struct smi_info *new_smi) { int rv = 0; int i; pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type], addr_space_to_str[new_smi->io.addr_space], new_smi->io.addr_data, new_smi->io.slave_addr, new_smi->io.irq); switch (new_smi->io.si_type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; break; case SI_SMIC: new_smi->handlers = &smic_smi_handlers; break; case SI_BT: new_smi->handlers = &bt_smi_handlers; break; default: /* No support for anything else yet. */ rv = -EIO; goto out_err; } new_smi->si_num = smi_num; /* Do this early so it's available for logs. */ if (!new_smi->io.dev) { pr_err("IPMI interface added with no device\n"); rv = -EIO; goto out_err; } /* Allocate the state machine's data and initialize it. */ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); if (!new_smi->si_sm) { rv = -ENOMEM; goto out_err; } new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm, &new_smi->io); /* Now that we know the I/O size, we can set up the I/O. */ rv = new_smi->io.io_setup(&new_smi->io); if (rv) { dev_err(new_smi->io.dev, "Could not set up I/O space\n"); goto out_err; } /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->io.addr_source) dev_err(new_smi->io.dev, "Interface detection failed\n"); rv = -ENODEV; goto out_err; } /* * Attempt a get device id command. If it fails, we probably * don't have a BMC here. */ rv = try_get_dev_id(new_smi); if (rv) { if (new_smi->io.addr_source) dev_err(new_smi->io.dev, "There appears to be no BMC at this location\n"); goto out_err; } setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); check_for_broken_irqs(new_smi); new_smi->waiting_msg = NULL; new_smi->curr_msg = NULL; atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = false; for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); new_smi->interrupt_disabled = true; atomic_set(&new_smi->need_watch, 0); rv = try_enable_event_buffer(new_smi); if (rv == 0) new_smi->has_event_buffer = true; /* * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ start_clear_flags(new_smi); /* * IRQ is defined to be set when non-zero. req_events will * cause a global flags check that will enable interrupts. */ if (new_smi->io.irq) { new_smi->interrupt_disabled = false; atomic_set(&new_smi->req_events, 1); } dev_set_drvdata(new_smi->io.dev, new_smi); rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); if (rv) { dev_err(new_smi->io.dev, "Unable to add device attributes: error %d\n", rv); goto out_err; } new_smi->dev_group_added = true; rv = ipmi_register_smi(&handlers, new_smi, new_smi->io.dev, new_smi->io.slave_addr); if (rv) { dev_err(new_smi->io.dev, "Unable to register device: error %d\n", rv); goto out_err; } /* Don't increment till we know we have succeeded. */ smi_num++; dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", si_to_str[new_smi->io.si_type]); WARN_ON(new_smi->io.dev->init_name != NULL); out_err: if (rv && new_smi->io.io_cleanup) { new_smi->io.io_cleanup(&new_smi->io); new_smi->io.io_cleanup = NULL; } if (rv && new_smi->si_sm) { kfree(new_smi->si_sm); new_smi->si_sm = NULL; } return rv; } static int __init init_ipmi_si(void) { struct smi_info *e; enum ipmi_addr_src type = SI_INVALID; if (initialized) return 0; ipmi_hardcode_init(); pr_info("IPMI System Interface driver\n"); ipmi_si_platform_init(); ipmi_si_pci_init(); ipmi_si_parisc_init(); /* We prefer devices with interrupts, but in the case of a machine with multiple BMCs we assume that there will be several instances of a given type so if we succeed in registering a type then also try to register everything else of the same type */ mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { /* Try to register a device if it has an IRQ and we either haven't successfully registered a device yet or this device has the same type as one we successfully registered */ if (e->io.irq && (!type || e->io.addr_source == type)) { if (!try_smi_init(e)) { type = e->io.addr_source; } } } /* type will only have been set if we successfully registered an si */ if (type) goto skip_fallback_noirq; /* Fall back to the preferred device */ list_for_each_entry(e, &smi_infos, link) { if (!e->io.irq && (!type || e->io.addr_source == type)) { if (!try_smi_init(e)) { type = e->io.addr_source; } } } skip_fallback_noirq: initialized = true; mutex_unlock(&smi_infos_lock); if (type) return 0; mutex_lock(&smi_infos_lock); if (unload_when_empty && list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); cleanup_ipmi_si(); pr_warn("Unable to find any System Interface(s)\n"); return -ENODEV; } else { mutex_unlock(&smi_infos_lock); return 0; } } module_init(init_ipmi_si); static void wait_msg_processed(struct smi_info *smi_info) { unsigned long jiffies_now; long time_diff; while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_event_handler(smi_info, time_diff); schedule_timeout_uninterruptible(1); } } static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; if (smi_info->dev_group_added) { device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); smi_info->dev_group_added = false; } if (smi_info->io.dev) dev_set_drvdata(smi_info->io.dev, NULL); /* * Make sure that interrupts, the timer and the thread are * stopped and will not run again. */ smi_info->interrupt_disabled = true; if (smi_info->io.irq_cleanup) { smi_info->io.irq_cleanup(&smi_info->io); smi_info->io.irq_cleanup = NULL; } stop_timer_and_thread(smi_info); /* * Wait until we know that we are out of any interrupt * handlers might have been running before we freed the * interrupt. */ synchronize_rcu(); /* * Timeouts are stopped, now make sure the interrupts are off * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ wait_msg_processed(smi_info); if (smi_info->handlers) disable_si_irq(smi_info); wait_msg_processed(smi_info); if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); if (smi_info->io.io_cleanup) { smi_info->io.io_cleanup(&smi_info->io); smi_info->io.io_cleanup = NULL; } kfree(smi_info->si_sm); smi_info->si_sm = NULL; smi_info->intf = NULL; } /* * Must be called with smi_infos_lock held, to serialize the * smi_info->intf check. */ static void cleanup_one_si(struct smi_info *smi_info) { if (!smi_info) return; list_del(&smi_info->link); ipmi_unregister_smi(smi_info->intf); kfree(smi_info); } void ipmi_si_remove_by_dev(struct device *dev) { struct smi_info *e; mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { if (e->io.dev == dev) { cleanup_one_si(e); break; } } mutex_unlock(&smi_infos_lock); } struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, unsigned long addr) { /* remove */ struct smi_info *e, *tmp_e; struct device *dev = NULL; mutex_lock(&smi_infos_lock); list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { if (e->io.addr_space != addr_space) continue; if (e->io.si_type != si_type) continue; if (e->io.addr_data == addr) { dev = get_device(e->io.dev); cleanup_one_si(e); } } mutex_unlock(&smi_infos_lock); return dev; } static void cleanup_ipmi_si(void) { struct smi_info *e, *tmp_e; if (!initialized) return; ipmi_si_pci_shutdown(); ipmi_si_parisc_shutdown(); ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); list_for_each_entry_safe(e, tmp_e, &smi_infos, link) cleanup_one_si(e); mutex_unlock(&smi_infos_lock); ipmi_si_hardcode_exit(); ipmi_si_hotmod_exit(); } module_exit(cleanup_ipmi_si); MODULE_ALIAS("platform:dmi-ipmi-si"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <[email protected]>"); MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* * hcd.h - DesignWare HS OTG Controller host-mode declarations * * Copyright (C) 2004-2013 Synopsys, Inc. */ #ifndef __DWC2_HCD_H__ #define __DWC2_HCD_H__ /* * This file contains the structures, constants, and interfaces for the * Host Contoller Driver (HCD) * * The Host Controller Driver (HCD) is responsible for translating requests * from the USB Driver into the appropriate actions on the DWC_otg controller. * It isolates the USBD from the specifics of the controller by providing an * API to the USBD. */ struct dwc2_qh; /** * struct dwc2_host_chan - Software host channel descriptor * * @hc_num: Host channel number, used for register address lookup * @dev_addr: Address of the device * @ep_num: Endpoint of the device * @ep_is_in: Endpoint direction * @speed: Device speed. One of the following values: * - USB_SPEED_LOW * - USB_SPEED_FULL * - USB_SPEED_HIGH * @ep_type: Endpoint type. One of the following values: * - USB_ENDPOINT_XFER_CONTROL: 0 * - USB_ENDPOINT_XFER_ISOC: 1 * - USB_ENDPOINT_XFER_BULK: 2 * - USB_ENDPOINT_XFER_INTR: 3 * @max_packet: Max packet size in bytes * @data_pid_start: PID for initial transaction. * 0: DATA0 * 1: DATA2 * 2: DATA1 * 3: MDATA (non-Control EP), * SETUP (Control EP) * @multi_count: Number of additional periodic transactions per * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer * @xfer_started: True if the transfer has been started * @do_ping: True if a PING request should be issued on this channel * @error_state: True if the error count for this transaction is non-zero * @halt_on_queue: True if this channel should be halted the next time a * request is queued for the channel. This is necessary in * slave mode if no request queue space is available when * an attempt is made to halt the channel. * @halt_pending: True if the host channel has been halted, but the core * is not finished flushing queued requests * @do_split: Enable split for the channel * @complete_split: Enable complete split * @hub_addr: Address of high speed hub for the split * @hub_port: Port of the low/full speed device for the split * @xact_pos: Split transaction position. One of the following values: * - DWC2_HCSPLT_XACTPOS_MID * - DWC2_HCSPLT_XACTPOS_BEGIN * - DWC2_HCSPLT_XACTPOS_END * - DWC2_HCSPLT_XACTPOS_ALL * @requests: Number of requests issued for this channel since it was * assigned to the current transfer (not counting PINGs) * @schinfo: Scheduling micro-frame bitmap * @ntd: Number of transfer descriptors for the transfer * @halt_status: Reason for halting the host channel * @hcint: Contents of the HCINT register when the interrupt came * @qh: QH for the transfer being processed by this channel * @hc_list_entry: For linking to list of host channels * @desc_list_addr: Current QH's descriptor list DMA address * @desc_list_sz: Current QH's descriptor list size * @split_order_list_entry: List entry for keeping track of the order of splits * * This structure represents the state of a single host channel when acting in * host mode. It contains the data items needed to transfer packets to an * endpoint via a host channel. */ struct dwc2_host_chan { u8 hc_num; unsigned dev_addr:7; unsigned ep_num:4; unsigned ep_is_in:1; unsigned speed:4; unsigned ep_type:2; unsigned max_packet:11; unsigned data_pid_start:2; #define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0 #define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2 #define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1 #define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA #define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP unsigned multi_count:2; u8 *xfer_buf; dma_addr_t xfer_dma; dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; u8 xfer_started; u8 do_ping; u8 error_state; u8 halt_on_queue; u8 halt_pending; u8 do_split; u8 complete_split; u8 hub_addr; u8 hub_port; u8 xact_pos; #define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID #define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END #define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN #define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL u8 requests; u8 schinfo; u16 ntd; enum dwc2_halt_status halt_status; u32 hcint; struct dwc2_qh *qh; struct list_head hc_list_entry; dma_addr_t desc_list_addr; u32 desc_list_sz; struct list_head split_order_list_entry; }; struct dwc2_hcd_pipe_info { u8 dev_addr; u8 ep_num; u8 pipe_type; u8 pipe_dir; u16 maxp; u16 maxp_mult; }; struct dwc2_hcd_iso_packet_desc { u32 offset; u32 length; u32 actual_length; u32 status; }; struct dwc2_qtd; struct dwc2_hcd_urb { void *priv; struct dwc2_qtd *qtd; void *buf; dma_addr_t dma; void *setup_packet; dma_addr_t setup_dma; u32 length; u32 actual_length; u32 status; u32 error_count; u32 packet_count; u32 flags; u16 interval; struct dwc2_hcd_pipe_info pipe_info; struct dwc2_hcd_iso_packet_desc iso_descs[]; }; /* Phases for control transfers */ enum dwc2_control_phase { DWC2_CONTROL_SETUP, DWC2_CONTROL_DATA, DWC2_CONTROL_STATUS, }; /* Transaction types */ enum dwc2_transaction_type { DWC2_TRANSACTION_NONE, DWC2_TRANSACTION_PERIODIC, DWC2_TRANSACTION_NON_PERIODIC, DWC2_TRANSACTION_ALL, }; /* The number of elements per LS bitmap (per port on multi_tt) */ #define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \ BITS_PER_LONG) /** * struct dwc2_tt - dwc2 data associated with a usb_tt * * @refcount: Number of Queue Heads (QHs) holding a reference. * @usb_tt: Pointer back to the official usb_tt. * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP * elements (so sizeof(long) times that in bytes). * * This structure is stored in the hcpriv of the official usb_tt. */ struct dwc2_tt { int refcount; struct usb_tt *usb_tt; unsigned long periodic_bitmaps[]; }; /** * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus. * * @start_schedule_us: The start time on the main bus schedule. Note that * the main bus schedule is tightly packed and this * time should be interpreted as tightly packed (so * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us * instead of 125 us). * @duration_us: How long this transfer goes. */ struct dwc2_hs_transfer_time { u32 start_schedule_us; u16 duration_us; }; /** * struct dwc2_qh - Software queue head structure * * @hsotg: The HCD state structure for the DWC OTG controller * @ep_type: Endpoint type. One of the following values: * - USB_ENDPOINT_XFER_CONTROL * - USB_ENDPOINT_XFER_BULK * - USB_ENDPOINT_XFER_INT * - USB_ENDPOINT_XFER_ISOC * @ep_is_in: Endpoint direction * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor * @maxp_mult: Multiplier for maxp * @dev_speed: Device speed. One of the following values: * - USB_SPEED_LOW * - USB_SPEED_FULL * - USB_SPEED_HIGH * @data_toggle: Determines the PID of the next data packet for * non-controltransfers. Ignored for control transfers. * One of the following values: * - DWC2_HC_PID_DATA0 * - DWC2_HC_PID_DATA1 * @ping_state: Ping state * @do_split: Full/low speed endpoint on high-speed hub requires split * @td_first: Index of first activated isochronous transfer descriptor * @td_last: Index of last activated isochronous transfer descriptor * @host_us: Bandwidth in microseconds per transfer as seen by host * @device_us: Bandwidth in microseconds per transfer as seen by device * @host_interval: Interval between transfers as seen by the host. If * the host is high speed and the device is low speed this * will be 8 times device interval. * @device_interval: Interval between transfers as seen by the device. * interval. * @next_active_frame: (Micro)frame _before_ we next need to put something on * the bus. We'll move the qh to active here. If the * host is in high speed mode this will be a uframe. If * the host is in low speed mode this will be a full frame. * @start_active_frame: If we are partway through a split transfer, this will be * what next_active_frame was when we started. Otherwise * it should always be the same as next_active_frame. * @num_hs_transfers: Number of transfers in hs_transfers. * Normally this is 1 but can be more than one for splits. * Always >= 1 unless the host is in low/full speed mode. * @hs_transfers: Transfers that are scheduled as seen by the high speed * bus. Not used if host is in low or full speed mode (but * note that it IS USED if the device is low or full speed * as long as the HOST is in high speed mode). * @ls_start_schedule_slice: Start time (in slices) on the low speed bus * schedule that's being used by this device. This * will be on the periodic_bitmap in a * "struct dwc2_tt". Not used if this device is high * speed. Note that this is in "schedule slice" which * is tightly packed. * @ntd: Actual number of transfer descriptors in a list * @dw_align_buf: Used instead of original buffer if its physical address * is not dword-aligned * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic * schedule * @desc_list: List of transfer descriptors * @desc_list_dma: Physical address of desc_list * @desc_list_sz: Size of descriptors list * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * descriptor and indicates original XferSize value for the * descriptor * @unreserve_timer: Timer for releasing periodic reservation. * @wait_timer: Timer used to wait before re-queuing. * @dwc_tt: Pointer to our tt info (or NULL if no tt). * @ttport: Port number within our tt. * @tt_buffer_dirty True if clear_tt_buffer_complete is pending * @unreserve_pending: True if we planned to unreserve but haven't yet. * @schedule_low_speed: True if we have a low/full speed component (either the * host is in low/full speed mode or do_split). * @want_wait: We should wait before re-queuing; only matters for non- * periodic transfers and is ignored for periodic ones. * @wait_timer_cancel: Set to true to cancel the wait_timer. * * @tt_buffer_dirty: True if EP's TT buffer is not clean. * A Queue Head (QH) holds the static characteristics of an endpoint and * maintains a list of transfers (QTDs) for that endpoint. A QH structure may * be entered in either the non-periodic or periodic schedule. */ struct dwc2_qh { struct dwc2_hsotg *hsotg; u8 ep_type; u8 ep_is_in; u16 maxp; u16 maxp_mult; u8 dev_speed; u8 data_toggle; u8 ping_state; u8 do_split; u8 td_first; u8 td_last; u16 host_us; u16 device_us; u16 host_interval; u16 device_interval; u16 next_active_frame; u16 start_active_frame; s16 num_hs_transfers; struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; u32 ls_start_schedule_slice; u16 ntd; u8 *dw_align_buf; dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; struct dwc2_dma_desc *desc_list; dma_addr_t desc_list_dma; u32 desc_list_sz; u32 *n_bytes; struct timer_list unreserve_timer; struct hrtimer wait_timer; struct dwc2_tt *dwc_tt; int ttport; unsigned tt_buffer_dirty:1; unsigned unreserve_pending:1; unsigned schedule_low_speed:1; unsigned want_wait:1; unsigned wait_timer_cancel:1; }; /** * struct dwc2_qtd - Software queue transfer descriptor (QTD) * * @control_phase: Current phase for control transfers (Setup, Data, or * Status) * @in_process: Indicates if this QTD is currently processed by HW * @data_toggle: Determines the PID of the next data packet for the * data phase of control transfers. Ignored for other * transfer types. One of the following values: * - DWC2_HC_PID_DATA0 * - DWC2_HC_PID_DATA1 * @complete_split: Keeps track of the current split type for FS/LS * endpoints on a HS Hub * @isoc_split_pos: Position of the ISOC split in full/low speed * @isoc_frame_index: Index of the next frame descriptor for an isochronous * transfer. A frame descriptor describes the buffer * position and length of the data to be transferred in the * next scheduled (micro)frame of an isochronous transfer. * It also holds status for that transaction. The frame * index starts at 0. * @isoc_split_offset: Position of the ISOC split in the buffer for the * current frame * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT * @error_count: Holds the number of bus errors that have occurred for * a transaction within this transfer * @n_desc: Number of DMA descriptors for this QTD * @isoc_frame_index_last: Last activated frame (packet) index, used in * descriptor DMA mode only * @num_naks: Number of NAKs received on this QTD. * @urb: URB for this transfer * @qh: Queue head for this QTD * @qtd_list_entry: For linking to the QH's list of QTDs * @isoc_td_first: Index of first activated isochronous transfer * descriptor in Descriptor DMA mode * @isoc_td_last: Index of last activated isochronous transfer * descriptor in Descriptor DMA mode * * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, * interrupt, or isochronous transfer. A single QTD is created for each URB * (of one of these types) submitted to the HCD. The transfer associated with * a QTD may require one or multiple transactions. * * A QTD is linked to a Queue Head, which is entered in either the * non-periodic or periodic schedule for execution. When a QTD is chosen for * execution, some or all of its transactions may be executed. After * execution, the state of the QTD is updated. The QTD may be retired if all * its transactions are complete or if an error occurred. Otherwise, it * remains in the schedule so more transactions can be executed later. */ struct dwc2_qtd { enum dwc2_control_phase control_phase; u8 in_process; u8 data_toggle; u8 complete_split; u8 isoc_split_pos; u16 isoc_frame_index; u16 isoc_split_offset; u16 isoc_td_last; u16 isoc_td_first; u32 ssplit_out_xfer_count; u8 error_count; u8 n_desc; u16 isoc_frame_index_last; u16 num_naks; struct dwc2_hcd_urb *urb; struct dwc2_qh *qh; struct list_head qtd_list_entry; }; #ifdef DEBUG struct hc_xfer_info { struct dwc2_hsotg *hsotg; struct dwc2_host_chan *chan; }; #endif u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg); /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */ static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg) { return (struct usb_hcd *)hsotg->priv; } /* * Inline used to disable one channel interrupt. Channel interrupts are * disabled when the channel is halted or released by the interrupt handler. * There is no need to handle further interrupts of that type until the * channel is re-assigned. In fact, subsequent handling may cause crashes * because the channel structures are cleaned up when the channel is released. */ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr) { u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum)); mask &= ~intr; dwc2_writel(hsotg, mask, HCINTMSK(chnum)); } void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, enum dwc2_halt_status halt_status); void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan); /* * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they * are read as 1, they won't clear when written back. */ static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg) { u32 hprt0 = dwc2_readl(hsotg, HPRT0); hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG); return hprt0; } static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe) { return pipe->ep_num; } static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_type; } static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe) { return pipe->maxp; } static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe) { return pipe->maxp_mult; } static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe) { return pipe->dev_addr; } static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC; } static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_type == USB_ENDPOINT_XFER_INT; } static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_type == USB_ENDPOINT_XFER_BULK; } static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL; } static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe) { return pipe->pipe_dir == USB_DIR_IN; } static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe) { return !dwc2_hcd_is_pipe_in(pipe); } int dwc2_hcd_init(struct dwc2_hsotg *hsotg); void dwc2_hcd_remove(struct dwc2_hsotg *hsotg); /* Transaction Execution Functions */ enum dwc2_transaction_type dwc2_hcd_select_transactions( struct dwc2_hsotg *hsotg); void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, enum dwc2_transaction_type tr_type); /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags); void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, int sched_csplit); void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh); /* Unlinks and frees a QTD */ static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh) { list_del(&qtd->qtd_list_entry); kfree(qtd); } /* Descriptor DMA support functions */ void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, enum dwc2_halt_status halt_status); int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, gfp_t mem_flags); void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); /* Check if QH is non-periodic */ #define dwc2_qh_is_non_per(_qh_ptr_) \ ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \ (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL) #ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; } static inline bool dbg_qh(struct dwc2_qh *qh) { return true; } static inline bool dbg_urb(struct urb *urb) { return true; } static inline bool dbg_perio(void) { return true; } #else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */ static inline bool dbg_hc(struct dwc2_host_chan *hc) { return hc->ep_type == USB_ENDPOINT_XFER_BULK || hc->ep_type == USB_ENDPOINT_XFER_CONTROL; } static inline bool dbg_qh(struct dwc2_qh *qh) { return qh->ep_type == USB_ENDPOINT_XFER_BULK || qh->ep_type == USB_ENDPOINT_XFER_CONTROL; } static inline bool dbg_urb(struct urb *urb) { return usb_pipetype(urb->pipe) == PIPE_BULK || usb_pipetype(urb->pipe) == PIPE_CONTROL; } static inline bool dbg_perio(void) { return false; } #endif /* * Returns true if frame1 index is greater than frame2 index. The comparison * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the * frame number when the max index frame number is reached. */ static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2) { u16 diff = fr_idx1 - fr_idx2; u16 sign = diff & (FRLISTEN_64_SIZE >> 1); return diff && !sign; } /* * Returns true if frame1 is less than or equal to frame2. The comparison is * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the * frame number when the max frame number is reached. */ static inline int dwc2_frame_num_le(u16 frame1, u16 frame2) { return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1); } /* * Returns true if frame1 is greater than frame2. The comparison is done * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame * number when the max frame number is reached. */ static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2) { return (frame1 != frame2) && ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1); } /* * Increments frame by the amount specified by inc. The addition is done * modulo HFNUM_MAX_FRNUM. Returns the incremented value. */ static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc) { return (frame + inc) & HFNUM_MAX_FRNUM; } static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec) { return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM; } static inline u16 dwc2_full_frame_num(u16 frame) { return (frame & HFNUM_MAX_FRNUM) >> 3; } static inline u16 dwc2_micro_frame_num(u16 frame) { return frame & 0x7; } /* * Returns the Core Interrupt Status register contents, ANDed with the Core * Interrupt Mask register contents */ static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg) { return dwc2_readl(hsotg, GINTSTS) & dwc2_readl(hsotg, GINTMSK); } static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb) { return dwc2_urb->status; } static inline u32 dwc2_hcd_urb_get_actual_length( struct dwc2_hcd_urb *dwc2_urb) { return dwc2_urb->actual_length; } static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb) { return dwc2_urb->error_count; } static inline void dwc2_hcd_urb_set_iso_desc_params( struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset, u32 length) { dwc2_urb->iso_descs[desc_num].offset = offset; dwc2_urb->iso_descs[desc_num].length = length; } static inline u32 dwc2_hcd_urb_get_iso_desc_status( struct dwc2_hcd_urb *dwc2_urb, int desc_num) { return dwc2_urb->iso_descs[desc_num].status; } static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length( struct dwc2_hcd_urb *dwc2_urb, int desc_num) { return dwc2_urb->iso_descs[desc_num].actual_length; } static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep) { struct dwc2_qh *qh = ep->hcpriv; if (qh && !list_empty(&qh->qh_list_entry)) return 1; return 0; } static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep) { struct dwc2_qh *qh = ep->hcpriv; if (!qh) { WARN_ON(1); return 0; } return qh->host_us; } void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, struct dwc2_qtd *qtd); /* HCD Core API */ /** * dwc2_handle_hcd_intr() - Called on every hardware interrupt * * @hsotg: The DWC2 HCD * * Returns IRQ_HANDLED if interrupt is handled * Return IRQ_NONE if interrupt is not handled */ irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg); /** * dwc2_hcd_stop() - Halts the DWC_otg host mode operation * * @hsotg: The DWC2 HCD */ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg); /** * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host, * and 0 otherwise * * @hsotg: The DWC2 HCD */ int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg); /** * dwc2_hcd_dump_state() - Dumps hsotg state * * @hsotg: The DWC2 HCD * * NOTE: This function will be removed once the peripheral controller code * is integrated and the driver is stable */ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg); /* URB interface */ /* Transfer flags */ #define URB_GIVEBACK_ASAP 0x1 #define URB_SEND_ZERO_PACKET 0x2 /* Host driver callbacks */ struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, gfp_t mem_flags, int *ttport); void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt); int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, int status); #endif /* __DWC2_HCD_H__ */
/* SPDX-License-Identifier: GPL-2.0+ */ /* spk_priv.h * review functions for the speakup screen review package. * originally written by: Kirk Reiser and Andy Berdan. * * extensively modified by David Borowski. * * Copyright (C) 1998 Kirk Reiser. * Copyright (C) 2003 David Borowski. */ #ifndef _SPEAKUP_KEYINFO_H #define _SPEAKUP_KEYINFO_H #define FIRST_SYNTH_VAR RATE /* 0 is reserved for no remap */ #define SPEAKUP_GOTO 0x01 #define SPEECH_KILL 0x02 #define SPEAKUP_QUIET 0x03 #define SPEAKUP_CUT 0x04 #define SPEAKUP_PASTE 0x05 #define SAY_FIRST_CHAR 0x06 #define SAY_LAST_CHAR 0x07 #define SAY_CHAR 0x08 #define SAY_PREV_CHAR 0x09 #define SAY_NEXT_CHAR 0x0a #define SAY_WORD 0x0b #define SAY_PREV_WORD 0x0c #define SAY_NEXT_WORD 0x0d #define SAY_LINE 0x0e #define SAY_PREV_LINE 0x0f #define SAY_NEXT_LINE 0x10 #define TOP_EDGE 0x11 #define BOTTOM_EDGE 0x12 #define LEFT_EDGE 0x13 #define RIGHT_EDGE 0x14 #define SPELL_PHONETIC 0x15 #define SPELL_WORD 0x16 #define SAY_SCREEN 0x17 #define SAY_POSITION 0x18 #define SAY_ATTRIBUTES 0x19 #define SPEAKUP_OFF 0x1a #define SPEAKUP_PARKED 0x1b #define SAY_LINE_INDENT 0x1c #define SAY_FROM_TOP 0x1d #define SAY_TO_BOTTOM 0x1e #define SAY_FROM_LEFT 0x1f #define SAY_TO_RIGHT 0x20 #define SAY_CHAR_NUM 0x21 #define EDIT_SOME 0x22 #define EDIT_MOST 0x23 #define SAY_PHONETIC_CHAR 0x24 #define EDIT_DELIM 0x25 #define EDIT_REPEAT 0x26 #define EDIT_EXNUM 0x27 #define SET_WIN 0x28 #define CLEAR_WIN 0x29 #define ENABLE_WIN 0x2a #define SAY_WIN 0x2b #define SPK_LOCK 0x2c #define SPEAKUP_HELP 0x2d #define TOGGLE_CURSORING 0x2e #define READ_ALL_DOC 0x2f /* one greater than the last func handler */ #define SPKUP_MAX_FUNC 0x30 #define SPK_KEY 0x80 #define FIRST_EDIT_BITS 0x22 #define FIRST_SET_VAR SPELL_DELAY /* increase if adding more than 0x3f functions */ #define VAR_START 0x40 /* keys for setting variables, must be ordered same as the enum for var_ids */ /* with dec being even and inc being 1 greater */ #define SPELL_DELAY_DEC (VAR_START + 0) #define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1) #define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2) #define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1) #define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2) #define READING_PUNC_INC (READING_PUNC_DEC + 1) #define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2) #define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1) #define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2) #define BLEEPS_INC (BLEEPS_DEC + 1) #define RATE_DEC (BLEEPS_DEC + 2) #define RATE_INC (RATE_DEC + 1) #define PITCH_DEC (RATE_DEC + 2) #define PITCH_INC (PITCH_DEC + 1) #define VOL_DEC (PITCH_DEC + 2) #define VOL_INC (VOL_DEC + 1) #define TONE_DEC (VOL_DEC + 2) #define TONE_INC (TONE_DEC + 1) #define PUNCT_DEC (TONE_DEC + 2) #define PUNCT_INC (PUNCT_DEC + 1) #define VOICE_DEC (PUNCT_DEC + 2) #define VOICE_INC (VOICE_DEC + 1) #endif
// SPDX-License-Identifier: GPL-2.0+ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> #include "hclge_cmd.h" #include "hclge_main.h" #include "hclge_tm.h" enum hclge_shaper_level { HCLGE_SHAPER_LVL_PRI = 0, HCLGE_SHAPER_LVL_PG = 1, HCLGE_SHAPER_LVL_PORT = 2, HCLGE_SHAPER_LVL_QSET = 3, HCLGE_SHAPER_LVL_CNT = 4, HCLGE_SHAPER_LVL_VF = 0, HCLGE_SHAPER_LVL_PF = 1, }; #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 #define HCLGE_SHAPER_BS_U_DEF 5 #define HCLGE_SHAPER_BS_S_DEF 20 /* hclge_shaper_para_calc: calculate ir parameter for the shaper * @ir: Rate to be config, its unit is Mbps * @shaper_level: the shaper level. eg: port, pg, priority, queueset * @ir_para: parameters of IR shaper * @max_tm_rate: max tm rate is available to config * * the formula: * * IR_b * (2 ^ IR_u) * 8 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) * Tick * (2 ^ IR_s) * * @return: 0: calculate sucessful, negative: fail */ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, struct hclge_shaper_ir_para *ir_para, u32 max_tm_rate) { #define DEFAULT_SHAPER_IR_B 126 #define DIVISOR_CLK (1000 * 8) #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 6 * 256, /* Prioriy level */ 6 * 32, /* Prioriy group level */ 6 * 8, /* Port level */ 6 * 256 /* Qset level */ }; u8 ir_u_calc = 0; u8 ir_s_calc = 0; u32 ir_calc; u32 tick; /* Calc tick */ if (shaper_level >= HCLGE_SHAPER_LVL_CNT || ir > max_tm_rate) return -EINVAL; tick = tick_array[shaper_level]; /** * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 * the formula is changed to: * 126 * 1 * 8 * ir_calc = ---------------- * 1000 * tick * 1 */ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; if (ir_calc == ir) { ir_para->ir_b = DEFAULT_SHAPER_IR_B; ir_para->ir_u = 0; ir_para->ir_s = 0; return 0; } else if (ir_calc > ir) { /* Increasing the denominator to select ir_s value */ while (ir_calc >= ir && ir) { ir_s_calc++; ir_calc = DEFAULT_DIVISOR_IR_B / (tick * (1 << ir_s_calc)); } ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; } else { /* Increasing the numerator to select ir_u value */ u32 numerator; while (ir_calc < ir) { ir_u_calc++; numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); ir_calc = (numerator + (tick >> 1)) / tick; } if (ir_calc == ir) { ir_para->ir_b = DEFAULT_SHAPER_IR_B; } else { u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); ir_para->ir_b = (ir * tick + (denominator >> 1)) / denominator; } } ir_para->ir_u = ir_u_calc; ir_para->ir_s = ir_s_calc; return 0; } static const u16 hclge_pfc_tx_stats_offset[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) }; static const u16 hclge_pfc_rx_stats_offset[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) }; static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) { const u16 *offset; int i; if (tx) offset = hclge_pfc_tx_stats_offset; else offset = hclge_pfc_rx_stats_offset; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); } void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) { hclge_pfc_stats_get(hdev, false, stats); } void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) { hclge_pfc_stats_get(hdev, true, stats); } int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) { struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); return hclge_cmd_send(&hdev->hw, &desc, 1); } int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap) { struct hclge_desc desc; struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); pfc->tx_rx_en_bitmap = tx_rx_bitmap; pfc->pri_en_bitmap = pfc_bitmap; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, u8 pause_trans_gap, u16 pause_trans_time) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); ether_addr_copy(pause_param->mac_addr, addr); ether_addr_copy(pause_param->mac_addr_extra, addr); pause_param->pause_trans_gap = pause_trans_gap; pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); return hclge_cmd_send(&hdev->hw, &desc, 1); } int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; u16 trans_time; u8 trans_gap; int ret; pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) return ret; trans_gap = pause_param->pause_trans_gap; trans_time = le16_to_cpu(pause_param->pause_trans_time); return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); } static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; tc = hdev->tm_info.prio_tc[pri_id]; if (tc >= hdev->tm_info.num_tc) return -EINVAL; /** * the register for priority has four bytes, the first bytes includes * priority0 and priority1, the higher 4bit stands for priority1 * while the lower 4bit stands for priority0, as below: * first byte: | pri_1 | pri_0 | * second byte: | pri_3 | pri_2 | * third byte: | pri_5 | pri_4 | * fourth byte: | pri_7 | pri_6 | */ pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); return 0; } int hclge_up_to_tc_map(struct hclge_dev *hdev) { struct hclge_desc desc; u8 *pri = (u8 *)desc.data; u8 pri_id; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { ret = hclge_fill_pri_array(hdev, pri, pri_id); if (ret) return ret; } return hclge_cmd_send(&hdev->hw, &desc, 1); } static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) { u8 i; hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; for (i = 0; i < HNAE3_MAX_DSCP; i++) hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; } int hclge_dscp_to_tc_map(struct hclge_dev *hdev) { struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; u8 *req0 = (u8 *)desc[0].data; u8 *req1 = (u8 *)desc[1].data; u8 pri_id, tc_id, i, j; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false); desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; tc_id = hdev->tm_info.prio_tc[pri_id]; /* Each dscp setting has 4 bits, so each byte saves two dscp * setting */ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; tc_id = hdev->tm_info.prio_tc[pri_id]; req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); } return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); } static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, u8 pg_id, u8 pri_bit_map) { struct hclge_pg_to_pri_link_cmd *map; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); map = (struct hclge_pg_to_pri_link_cmd *)desc.data; map->pg_id = pg_id; map->pri_bit_map = pri_bit_map; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); map = (struct hclge_qs_to_pri_link_cmd *)desc.data; map->qs_id = cpu_to_le16(qs_id); map->priority = pri; map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, u16 q_id, u16 qs_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; u16 qs_id_l; u16 qs_id_h; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); map = (struct hclge_nq_to_qs_link_cmd *)desc.data; map->nq_id = cpu_to_le16(q_id); /* convert qs_id to the following format to support qset_id >= 1024 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | * / / \ \ * / / \ \ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | * | qs_id_h | vld | qs_id_l | */ qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S); qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S); hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, qs_id_l); hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, qs_id_h); map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, u8 dwrr) { struct hclge_pg_weight_cmd *weight; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); weight = (struct hclge_pg_weight_cmd *)desc.data; weight->pg_id = pg_id; weight->dwrr = dwrr; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, u8 dwrr) { struct hclge_priority_weight_cmd *weight; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); weight = (struct hclge_priority_weight_cmd *)desc.data; weight->pri_id = pri_id; weight->dwrr = dwrr; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, u8 dwrr) { struct hclge_qs_weight_cmd *weight; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); weight = (struct hclge_qs_weight_cmd *)desc.data; weight->qs_id = cpu_to_le16(qs_id); weight->dwrr = dwrr; return hclge_cmd_send(&hdev->hw, &desc, 1); } static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) { u32 shapping_para = 0; hclge_tm_set_field(shapping_para, IR_B, ir_b); hclge_tm_set_field(shapping_para, IR_U, ir_u); hclge_tm_set_field(shapping_para, IR_S, ir_s); hclge_tm_set_field(shapping_para, BS_B, bs_b); hclge_tm_set_field(shapping_para, BS_S, bs_s); return shapping_para; } static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pg_id, u32 shapping_para, u32 rate) { struct hclge_pg_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : HCLGE_OPC_TM_PG_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; shap_cfg_cmd->pg_id = pg_id; shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); shap_cfg_cmd->pg_rate = cpu_to_le32(rate); return hclge_cmd_send(&hdev->hw, &desc, 1); } int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; struct hclge_desc desc; u32 shapping_para; int ret; ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, &ir_para, hdev->ae_dev->dev_specs.max_tm_rate); if (ret) return ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, ir_para.ir_s, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pri_id, u32 shapping_para, u32 rate) { struct hclge_pri_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : HCLGE_OPC_TM_PRI_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; shap_cfg_cmd->pri_id = pri_id; shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); shap_cfg_cmd->pri_rate = cpu_to_le32(rate); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) { struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); else desc.data[1] = 0; desc.data[0] = cpu_to_le32(pg_id); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); else desc.data[1] = 0; desc.data[0] = cpu_to_le32(pri_id); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) { struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); if (mode == HCLGE_SCH_MODE_DWRR) desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); else desc.data[1] = 0; desc.data[0] = cpu_to_le32(qs_id); return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, u32 bit_map) { struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, false); bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd->tc_id = tc; bp_to_qs_map_cmd->qs_group_id = grp_id; bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); return hclge_cmd_send(&hdev->hw, &desc, 1); } int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_qs_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; u32 shaper_para; int ret, i; if (!max_tx_rate) max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, &ir_para, hdev->ae_dev->dev_specs.max_tm_rate); if (ret) return ret; shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, ir_para.ir_s, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); for (i = 0; i < kinfo->tc_info.num_tc; i++) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, false); shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", vport->vport_id, shap_cfg_cmd->qs_id, max_tx_rate, ret); return ret; } } return 0; } static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hclge_dev *hdev = vport->back; u16 max_rss_size = 0; int i; if (!tc_info->mqprio_active) return vport->alloc_tqps / tc_info->num_tc; for (i = 0; i < HNAE3_MAX_TC; i++) { if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) continue; if (max_rss_size < tc_info->tqp_count[i]) max_rss_size = tc_info->tqp_count[i]; } return max_rss_size; } static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hclge_dev *hdev = vport->back; int sum = 0; int i; if (!tc_info->mqprio_active) return kinfo->rss_size * tc_info->num_tc; for (i = 0; i < HNAE3_MAX_TC; i++) { if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) sum += tc_info->tqp_count[i]; } return sum; } static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; u16 vport_max_rss_size; u16 max_rss_size; /* TC configuration is shared by PF/VF in one port, only allow * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; vport_max_rss_size = hdev->pf_rss_size_max; } max_rss_size = min_t(u16, vport_max_rss_size, hclge_vport_get_max_rss_size(vport)); /* Set to user value, no larger than max_rss_size. */ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && kinfo->req_rss_size <= max_rss_size) { dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", kinfo->rss_size, kinfo->req_rss_size); kinfo->rss_size = kinfo->req_rss_size; } else if (kinfo->rss_size > max_rss_size || (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { /* Set to the maximum specification value (max_rss_size). */ kinfo->rss_size = max_rss_size; } } static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; u8 i; hclge_tm_update_kinfo_rss_size(vport); kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; if (vport->vport_id == PF_VPORT_ID) hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) return; for (i = 0; i < HNAE3_MAX_TC; i++) { if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; kinfo->tc_info.tqp_count[i] = kinfo->rss_size; } else { /* Set to default queue if TC is disable */ kinfo->tc_info.tqp_offset[i] = 0; kinfo->tc_info.tqp_count[i] = 1; } } memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, sizeof_field(struct hnae3_tc_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; u32 i; for (i = 0; i < hdev->num_alloc_vport; i++) { hclge_tm_vport_tc_info_update(vport); vport++; } } static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { u8 i, tc_sch_mode; u32 bw_limit; for (i = 0; i < hdev->tc_max; i++) { if (i < hdev->tm_info.num_tc) { tc_sch_mode = HCLGE_SCH_MODE_DWRR; bw_limit = hdev->tm_info.pg_info[0].bw_limit; } else { tc_sch_mode = HCLGE_SCH_MODE_SP; bw_limit = 0; } hdev->tm_info.tc_info[i].tc_id = i; hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) hdev->tm_info.prio_tc[i] = (i >= hdev->tm_info.num_tc) ? 0 : i; } static void hclge_tm_pg_info_init(struct hclge_dev *hdev) { #define BW_PERCENT 100 #define DEFAULT_BW_WEIGHT 1 u8 i; for (i = 0; i < hdev->tm_info.num_pg; i++) { int k; hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.pg_info[i].bw_limit = hdev->ae_dev->dev_specs.max_tm_rate; if (i != 0) continue; hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; for (k = 0; k < hdev->tm_info.num_tc; k++) hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; for (; k < HNAE3_MAX_TC; k++) hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; } } static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) { if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { if (hdev->fc_mode_last_time == HCLGE_FC_PFC) dev_warn(&hdev->pdev->dev, "Only 1 tc used, but last mode is FC_PFC\n"); hdev->tm_info.fc_mode = hdev->fc_mode_last_time; } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { /* fc_mode_last_time record the last fc_mode when * DCB is enabled, so that fc_mode can be set to * the correct value when DCB is disabled. */ hdev->fc_mode_last_time = hdev->tm_info.fc_mode; hdev->tm_info.fc_mode = HCLGE_FC_PFC; } } static void hclge_update_fc_mode(struct hclge_dev *hdev) { if (!hdev->tm_info.pfc_en) { hdev->tm_info.fc_mode = hdev->fc_mode_last_time; return; } if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { hdev->fc_mode_last_time = hdev->tm_info.fc_mode; hdev->tm_info.fc_mode = HCLGE_FC_PFC; } } void hclge_tm_pfc_info_update(struct hclge_dev *hdev) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) hclge_update_fc_mode(hdev); else hclge_update_fc_mode_by_dcb_flag(hdev); } static void hclge_tm_schd_info_init(struct hclge_dev *hdev) { hclge_tm_pg_info_init(hdev); hclge_tm_tc_info_init(hdev); hclge_tm_vport_info_update(hdev); hclge_tm_pfc_info_update(hdev); } static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) { int ret; u32 i; if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) return 0; for (i = 0; i < hdev->tm_info.num_pg; i++) { /* Cfg mapping */ ret = hclge_tm_pg_to_pri_map_cfg( hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); if (ret) return ret; } return 0; } static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; u32 shaper_para; int ret; u32 i; /* Cfg pg schd */ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) return 0; /* Pg to pri */ for (i = 0; i < hdev->tm_info.num_pg; i++) { u32 rate = hdev->tm_info.pg_info[i].bw_limit; /* Calc shaper para */ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, &ir_para, max_tm_rate); if (ret) return ret; shaper_para = hclge_tm_get_shapping_para(0, 0, 0, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, shaper_para, rate); if (ret) return ret; shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, ir_para.ir_s, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, shaper_para, rate); if (ret) return ret; } return 0; } static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) { int ret; u32 i; /* cfg pg schd */ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) return 0; /* pg to prio */ for (i = 0; i < hdev->tm_info.num_pg; i++) { /* Cfg dwrr */ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); if (ret) return ret; } return 0; } static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hnae3_queue **tqp = kinfo->tqp; u32 i, j; int ret; for (i = 0; i < tc_info->num_tc; i++) { for (j = 0; j < tc_info->tqp_count[i]; j++) { struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; ret = hclge_tm_q_to_qs_map_cfg(hdev, hclge_get_queue_id(q), vport->qs_offset + i); if (ret) return ret; } } return 0; } static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; u16 i, k; int ret; /* Cfg qs -> pri mapping, one by one mapping */ for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; for (i = 0; i < kinfo->tc_info.max_tc; i++) { u8 pri = i < kinfo->tc_info.num_tc ? i : 0; bool link_vld = i < kinfo->tc_info.num_tc; ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, pri, link_vld); if (ret) return ret; } } return 0; } static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; u16 i, k; int ret; /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ for (k = 0; k < hdev->num_alloc_vport; k++) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, k, true); if (ret) return ret; } return 0; } static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; u32 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); else return -EINVAL; if (ret) return ret; /* Cfg q -> qs mapping */ for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_vport_q_to_qs_map(hdev, vport); if (ret) return ret; vport++; } return 0; } static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; u32 shaper_para_c, shaper_para_p; int ret; u32 i; for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; if (rate) { ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, &ir_para, max_tm_rate); if (ret) return ret; shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, ir_para.ir_s, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); } else { shaper_para_c = 0; shaper_para_p = 0; } ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, shaper_para_c, rate); if (ret) return ret; ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, shaper_para_p, rate); if (ret) return ret; } return 0; } static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; struct hclge_shaper_ir_para ir_para; u32 shaper_para; int ret; ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, &ir_para, hdev->ae_dev->dev_specs.max_tm_rate); if (ret) return ret; shaper_para = hclge_tm_get_shapping_para(0, 0, 0, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, vport->vport_id, shaper_para, vport->bw_limit); if (ret) return ret; shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, ir_para.ir_s, HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, vport->vport_id, shaper_para, vport->bw_limit); if (ret) return ret; return 0; } static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; u32 i; int ret; for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, HCLGE_SHAPER_LVL_QSET, &ir_para, max_tm_rate); if (ret) return ret; } return 0; } static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; u32 i; /* Need config vport shaper */ for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); if (ret) return ret; ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); if (ret) return ret; vport++; } return 0; } static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) { int ret; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); if (ret) return ret; } else { ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); if (ret) return ret; } return 0; } static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; struct hclge_pg_info *pg_info; u8 dwrr; int ret; u32 i, k; for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); if (ret) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; if (i >= kinfo->tc_info.max_tc) continue; dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, dwrr); if (ret) return ret; } } return 0; } static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) { #define DEFAULT_TC_OFFSET 14 struct hclge_ets_tc_weight_cmd *ets_weight; struct hclge_desc desc; unsigned int i; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; for (i = 0; i < HNAE3_MAX_TC; i++) { struct hclge_pg_info *pg_info; pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; } ets_weight->weight_offset = DEFAULT_TC_OFFSET; return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int ret; u8 i; /* Vf dwrr */ ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); if (ret) return ret; /* Qset dwrr */ for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_weight_cfg( hdev, vport->qs_offset + i, hdev->tm_info.pg_info[0].tc_dwrr[i]); if (ret) return ret; } return 0; } static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; u32 i; for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); if (ret) return ret; vport++; } return 0; } static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) { int ret; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); if (ret) return ret; if (!hnae3_dev_dcb_supported(hdev)) return 0; ret = hclge_tm_ets_tc_dwrr_cfg(hdev); if (ret == -EOPNOTSUPP) { dev_warn(&hdev->pdev->dev, "fw %08x doesn't support ets tc weight cmd\n", hdev->fw_version); ret = 0; } return ret; } else { ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); if (ret) return ret; } return 0; } static int hclge_tm_map_cfg(struct hclge_dev *hdev) { int ret; ret = hclge_up_to_tc_map(hdev); if (ret) return ret; if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) { ret = hclge_dscp_to_tc_map(hdev); if (ret) return ret; } ret = hclge_tm_pg_to_pri_map(hdev); if (ret) return ret; return hclge_tm_pri_q_qs_cfg(hdev); } static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) { int ret; ret = hclge_tm_port_shaper_cfg(hdev); if (ret) return ret; ret = hclge_tm_pg_shaper_cfg(hdev); if (ret) return ret; return hclge_tm_pri_shaper_cfg(hdev); } int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) { int ret; ret = hclge_tm_pg_dwrr_cfg(hdev); if (ret) return ret; return hclge_tm_pri_dwrr_cfg(hdev); } static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) { int ret; u8 i; /* Only being config on TC-Based scheduler mode */ if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) return 0; for (i = 0; i < hdev->tm_info.num_pg; i++) { ret = hclge_tm_pg_schd_mode_cfg(hdev, i); if (ret) return ret; } return 0; } static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); if (ret) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; if (pri_id >= kinfo->tc_info.max_tc) continue; mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, mode); if (ret) return ret; } return 0; } static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int ret; u8 i; if (vport->vport_id >= HNAE3_MAX_TC) return -EINVAL; ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); if (ret) return ret; for (i = 0; i < kinfo->tc_info.num_tc; i++) { u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, sch_mode); if (ret) return ret; } return 0; } static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; } } else { for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_tm_schd_mode_vnet_base_cfg(vport); if (ret) return ret; vport++; } } return 0; } static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) { int ret; ret = hclge_tm_lvl2_schd_mode_cfg(hdev); if (ret) return ret; return hclge_tm_lvl34_schd_mode_cfg(hdev); } int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) { int ret; /* Cfg tm mapping */ ret = hclge_tm_map_cfg(hdev); if (ret) return ret; /* Cfg tm shaper */ ret = hclge_tm_shaper_cfg(hdev); if (ret) return ret; /* Cfg dwrr */ ret = hclge_tm_dwrr_cfg(hdev); if (ret) return ret; /* Cfg schd mode for each level schd */ ret = hclge_tm_schd_mode_hw(hdev); if (ret) return ret; return hclge_tm_flush_cfg(hdev, false); } static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; return hclge_pause_param_cfg(hdev, mac->mac_addr, HCLGE_DEFAULT_PAUSE_TRANS_GAP, HCLGE_DEFAULT_PAUSE_TRANS_TIME); } static int hclge_pfc_setup_hw(struct hclge_dev *hdev) { u8 enable_bitmap = 0; if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | HCLGE_RX_MAC_PAUSE_EN_MSK; return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, hdev->tm_info.pfc_en); } /* for the queues that use for backpress, divides to several groups, * each group contains 32 queue sets, which can be represented by u32 bitmap. */ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) { u16 grp_id_shift = HCLGE_BP_GRP_ID_S; u16 grp_id_mask = HCLGE_BP_GRP_ID_M; u8 grp_num = HCLGE_BP_GRP_NUM; int i; if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { grp_num = HCLGE_BP_EXT_GRP_NUM; grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; } for (i = 0; i < grp_num; i++) { u32 qs_bitmap = 0; int k, ret; for (k = 0; k < hdev->num_alloc_vport; k++) { struct hclge_vport *vport = &hdev->vport[k]; u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); } ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); if (ret) return ret; } return 0; } int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) { bool tx_en, rx_en; switch (hdev->tm_info.fc_mode) { case HCLGE_FC_NONE: tx_en = false; rx_en = false; break; case HCLGE_FC_RX_PAUSE: tx_en = false; rx_en = true; break; case HCLGE_FC_TX_PAUSE: tx_en = true; rx_en = false; break; case HCLGE_FC_FULL: tx_en = true; rx_en = true; break; case HCLGE_FC_PFC: tx_en = false; rx_en = false; break; default: tx_en = true; rx_en = true; } return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); } static int hclge_tm_bp_setup(struct hclge_dev *hdev) { int ret; int i; for (i = 0; i < hdev->tm_info.num_tc; i++) { ret = hclge_bp_setup_hw(hdev, i); if (ret) return ret; } return 0; } int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) { int ret; ret = hclge_pause_param_setup_hw(hdev); if (ret) return ret; ret = hclge_mac_pause_setup_hw(hdev); if (ret) return ret; /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) return 0; /* GE MAC does not support PFC, when driver is initializing and MAC * is in GE Mode, ignore the error here, otherwise initialization * will fail. */ ret = hclge_pfc_setup_hw(hdev); if (init && ret == -EOPNOTSUPP) dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); else if (ret) { dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", ret); return ret; } return hclge_tm_bp_setup(hdev); } void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) { struct hclge_vport *vport = hdev->vport; struct hnae3_knic_private_info *kinfo; u32 i, k; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { hdev->tm_info.prio_tc[i] = prio_tc[i]; for (k = 0; k < hdev->num_alloc_vport; k++) { kinfo = &vport[k].nic.kinfo; kinfo->tc_info.prio_tc[i] = prio_tc[i]; } } } void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 bit_map = 0; u8 i; hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) bit_map |= BIT(i); if (!bit_map) { bit_map = 1; hdev->tm_info.num_tc = 1; } hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); } int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) { int ret; if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) return -ENOTSUPP; ret = hclge_tm_schd_setup_hw(hdev); if (ret) return ret; ret = hclge_pause_setup_hw(hdev, init); if (ret) return ret; return 0; } int hclge_tm_schd_init(struct hclge_dev *hdev) { /* fc_mode is HCLGE_FC_FULL on reset */ hdev->tm_info.fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = hdev->tm_info.fc_mode; if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && hdev->tm_info.num_pg != 1) return -EINVAL; hclge_tm_schd_info_init(hdev); hclge_dscp_to_prio_map_init(hdev); return hclge_tm_init_hw(hdev, true); } int hclge_tm_vport_map_update(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; hclge_tm_vport_tc_info_update(vport); ret = hclge_vport_q_to_qs_map(hdev, vport); if (ret) return ret; if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) return 0; return hclge_tm_bp_setup(hdev); } int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) { struct hclge_tm_nodes_cmd *nodes; struct hclge_desc desc; int ret; if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { /* Each PF has 8 qsets and each VF has 1 qset */ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); return 0; } hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get qset num, ret = %d\n", ret); return ret; } nodes = (struct hclge_tm_nodes_cmd *)desc.data; *qset_num = le16_to_cpu(nodes->qset_num); return 0; } int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) { struct hclge_tm_nodes_cmd *nodes; struct hclge_desc desc; int ret; if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; return 0; } hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get pri num, ret = %d\n", ret); return ret; } nodes = (struct hclge_tm_nodes_cmd *)desc.data; *pri_num = nodes->pri_num; return 0; } int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, u8 *link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); map = (struct hclge_qs_to_pri_link_cmd *)desc.data; map->qs_id = cpu_to_le16(qset_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get qset map priority, ret = %d\n", ret); return ret; } *priority = map->priority; *link_vld = map->link_vld; return 0; } int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) { struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; qs_sch_mode->qs_id = cpu_to_le16(qset_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get qset sch mode, ret = %d\n", ret); return ret; } *mode = qs_sch_mode->sch_mode; return 0; } int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) { struct hclge_qs_weight_cmd *qs_weight; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); qs_weight = (struct hclge_qs_weight_cmd *)desc.data; qs_weight->qs_id = cpu_to_le16(qset_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get qset weight, ret = %d\n", ret); return ret; } *weight = qs_weight->dwrr; return 0; } int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, struct hclge_tm_shaper_para *para) { struct hclge_qs_shapping_cmd *shap_cfg_cmd; struct hclge_desc desc; u32 shapping_para; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get qset %u shaper, ret = %d\n", qset_id, ret); return ret; } shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); para->ir_b = hclge_tm_get_field(shapping_para, IR_B); para->ir_u = hclge_tm_get_field(shapping_para, IR_U); para->ir_s = hclge_tm_get_field(shapping_para, IR_S); para->bs_b = hclge_tm_get_field(shapping_para, BS_B); para->bs_s = hclge_tm_get_field(shapping_para, BS_S); para->flag = shap_cfg_cmd->flag; para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); return 0; } int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) { struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; pri_sch_mode->pri_id = pri_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get priority sch mode, ret = %d\n", ret); return ret; } *mode = pri_sch_mode->sch_mode; return 0; } int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) { struct hclge_priority_weight_cmd *priority_weight; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); priority_weight = (struct hclge_priority_weight_cmd *)desc.data; priority_weight->pri_id = pri_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get priority weight, ret = %d\n", ret); return ret; } *weight = priority_weight->dwrr; return 0; } int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, enum hclge_opcode_type cmd, struct hclge_tm_shaper_para *para) { struct hclge_pri_shapping_cmd *shap_cfg_cmd; struct hclge_desc desc; u32 shapping_para; int ret; if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) return -EINVAL; hclge_cmd_setup_basic_desc(&desc, cmd, true); shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; shap_cfg_cmd->pri_id = pri_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get priority shaper(%#x), ret = %d\n", cmd, ret); return ret; } shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); para->ir_b = hclge_tm_get_field(shapping_para, IR_B); para->ir_u = hclge_tm_get_field(shapping_para, IR_U); para->ir_s = hclge_tm_get_field(shapping_para, IR_S); para->bs_b = hclge_tm_get_field(shapping_para, BS_B); para->bs_s = hclge_tm_get_field(shapping_para, BS_S); para->flag = shap_cfg_cmd->flag; para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); return 0; } int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; u16 qs_id_l; u16 qs_id_h; int ret; map = (struct hclge_nq_to_qs_link_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); map->nq_id = cpu_to_le16(q_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get queue to qset map, ret = %d\n", ret); return ret; } *qset_id = le16_to_cpu(map->qset_id); /* convert qset_id to the following format, drop the vld bit * | qs_id_h | vld | qs_id_l | * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | * \ \ / / * \ \ / / * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | */ qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S); qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S); *qset_id = 0; hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, qs_id_l); hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, qs_id_h); return 0; } int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) { #define HCLGE_TM_TC_MASK 0x7 struct hclge_tqp_tx_queue_tc_cmd *tc; struct hclge_desc desc; int ret; tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); tc->queue_id = cpu_to_le16(q_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get queue to tc map, ret = %d\n", ret); return ret; } *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; return 0; } int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, u8 *pri_bit_map) { struct hclge_pg_to_pri_link_cmd *map; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); map = (struct hclge_pg_to_pri_link_cmd *)desc.data; map->pg_id = pg_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get pg to pri map, ret = %d\n", ret); return ret; } *pri_bit_map = map->pri_bit_map; return 0; } int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) { struct hclge_pg_weight_cmd *pg_weight_cmd; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; pg_weight_cmd->pg_id = pg_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get pg weight, ret = %d\n", ret); return ret; } *weight = pg_weight_cmd->dwrr; return 0; } int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) { struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); desc.data[0] = cpu_to_le32(pg_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get pg sch mode, ret = %d\n", ret); return ret; } *mode = (u8)le32_to_cpu(desc.data[1]); return 0; } int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, enum hclge_opcode_type cmd, struct hclge_tm_shaper_para *para) { struct hclge_pg_shapping_cmd *shap_cfg_cmd; struct hclge_desc desc; u32 shapping_para; int ret; if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && cmd != HCLGE_OPC_TM_PG_P_SHAPPING) return -EINVAL; hclge_cmd_setup_basic_desc(&desc, cmd, true); shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; shap_cfg_cmd->pg_id = pg_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get pg shaper(%#x), ret = %d\n", cmd, ret); return ret; } shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); para->ir_b = hclge_tm_get_field(shapping_para, IR_B); para->ir_u = hclge_tm_get_field(shapping_para, IR_U); para->ir_s = hclge_tm_get_field(shapping_para, IR_S); para->bs_b = hclge_tm_get_field(shapping_para, BS_B); para->bs_s = hclge_tm_get_field(shapping_para, BS_S); para->flag = shap_cfg_cmd->flag; para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); return 0; } int hclge_tm_get_port_shaper(struct hclge_dev *hdev, struct hclge_tm_shaper_para *para) { struct hclge_port_shapping_cmd *port_shap_cfg_cmd; struct hclge_desc desc; u32 shapping_para; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to get port shaper, ret = %d\n", ret); return ret; } port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); para->ir_b = hclge_tm_get_field(shapping_para, IR_B); para->ir_u = hclge_tm_get_field(shapping_para, IR_U); para->ir_s = hclge_tm_get_field(shapping_para, IR_S); para->bs_b = hclge_tm_get_field(shapping_para, BS_B); para->bs_s = hclge_tm_get_field(shapping_para, BS_S); para->flag = port_shap_cfg_cmd->flag; para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); return 0; } int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable) { struct hclge_desc desc; int ret; if (!hnae3_ae_dev_tm_flush_supported(hdev)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false); desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, "failed to config tm flush, ret = %d\n", ret); return ret; } if (enable) msleep(HCLGE_TM_FLUSH_TIME_MS); return ret; } void hclge_reset_tc_config(struct hclge_dev *hdev) { struct hclge_vport *vport = &hdev->vport[0]; struct hnae3_knic_private_info *kinfo; kinfo = &vport->nic.kinfo; if (!kinfo->tc_info.mqprio_destroy) return; /* clear tc info, including mqprio_destroy and mqprio_active */ memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info)); hclge_tm_schd_info_update(hdev, 0); hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); }
/* SPDX-License-Identifier: GPL-2.0-only */ /* include/video/samsung_fimd.h * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <[email protected]> * * S3C Platform - new-style fimd and framebuffer register definitions * * This is the register set for the fimd and new style framebuffer interface * found from the S3C2443 onwards into the S3C2416, S3C2450, the * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series. */ /* VIDCON0 */ #define VIDCON0 0x00 #define VIDCON0_DSI_EN (1 << 30) #define VIDCON0_INTERLACE (1 << 29) #define VIDCON0_VIDOUT_MASK (0x7 << 26) #define VIDCON0_VIDOUT_SHIFT 26 #define VIDCON0_VIDOUT_RGB (0x0 << 26) #define VIDCON0_VIDOUT_TV (0x1 << 26) #define VIDCON0_VIDOUT_I80_LDI0 (0x2 << 26) #define VIDCON0_VIDOUT_I80_LDI1 (0x3 << 26) #define VIDCON0_VIDOUT_WB_RGB (0x4 << 26) #define VIDCON0_VIDOUT_WB_I80_LDI0 (0x6 << 26) #define VIDCON0_VIDOUT_WB_I80_LDI1 (0x7 << 26) #define VIDCON0_L1_DATA_MASK (0x7 << 23) #define VIDCON0_L1_DATA_SHIFT 23 #define VIDCON0_L1_DATA_16BPP (0x0 << 23) #define VIDCON0_L1_DATA_18BPP16 (0x1 << 23) #define VIDCON0_L1_DATA_18BPP9 (0x2 << 23) #define VIDCON0_L1_DATA_24BPP (0x3 << 23) #define VIDCON0_L1_DATA_18BPP (0x4 << 23) #define VIDCON0_L1_DATA_16BPP8 (0x5 << 23) #define VIDCON0_L0_DATA_MASK (0x7 << 20) #define VIDCON0_L0_DATA_SHIFT 20 #define VIDCON0_L0_DATA_16BPP (0x0 << 20) #define VIDCON0_L0_DATA_18BPP16 (0x1 << 20) #define VIDCON0_L0_DATA_18BPP9 (0x2 << 20) #define VIDCON0_L0_DATA_24BPP (0x3 << 20) #define VIDCON0_L0_DATA_18BPP (0x4 << 20) #define VIDCON0_L0_DATA_16BPP8 (0x5 << 20) #define VIDCON0_PNRMODE_MASK (0x3 << 17) #define VIDCON0_PNRMODE_SHIFT 17 #define VIDCON0_PNRMODE_RGB (0x0 << 17) #define VIDCON0_PNRMODE_BGR (0x1 << 17) #define VIDCON0_PNRMODE_SERIAL_RGB (0x2 << 17) #define VIDCON0_PNRMODE_SERIAL_BGR (0x3 << 17) #define VIDCON0_CLKVALUP (1 << 16) #define VIDCON0_CLKVAL_F_MASK (0xff << 6) #define VIDCON0_CLKVAL_F_SHIFT 6 #define VIDCON0_CLKVAL_F_LIMIT 0xff #define VIDCON0_CLKVAL_F(_x) ((_x) << 6) #define VIDCON0_VLCKFREE (1 << 5) #define VIDCON0_CLKDIR (1 << 4) #define VIDCON0_CLKSEL_MASK (0x3 << 2) #define VIDCON0_CLKSEL_SHIFT 2 #define VIDCON0_CLKSEL_HCLK (0x0 << 2) #define VIDCON0_CLKSEL_LCD (0x1 << 2) #define VIDCON0_CLKSEL_27M (0x3 << 2) #define VIDCON0_ENVID (1 << 1) #define VIDCON0_ENVID_F (1 << 0) #define VIDCON1 0x04 #define VIDCON1_LINECNT_MASK (0x7ff << 16) #define VIDCON1_LINECNT_SHIFT 16 #define VIDCON1_LINECNT_GET(_v) (((_v) >> 16) & 0x7ff) #define VIDCON1_FSTATUS_EVEN (1 << 15) #define VIDCON1_VSTATUS_MASK (0x3 << 13) #define VIDCON1_VSTATUS_SHIFT 13 #define VIDCON1_VSTATUS_VSYNC (0x0 << 13) #define VIDCON1_VSTATUS_BACKPORCH (0x1 << 13) #define VIDCON1_VSTATUS_ACTIVE (0x2 << 13) #define VIDCON1_VSTATUS_FRONTPORCH (0x3 << 13) #define VIDCON1_VCLK_MASK (0x3 << 9) #define VIDCON1_VCLK_HOLD (0x0 << 9) #define VIDCON1_VCLK_RUN (0x1 << 9) #define VIDCON1_INV_VCLK (1 << 7) #define VIDCON1_INV_HSYNC (1 << 6) #define VIDCON1_INV_VSYNC (1 << 5) #define VIDCON1_INV_VDEN (1 << 4) /* VIDCON2 */ #define VIDCON2 0x08 #define VIDCON2_EN601 (1 << 23) #define VIDCON2_TVFMTSEL_SW (1 << 14) #define VIDCON2_TVFMTSEL1_MASK (0x3 << 12) #define VIDCON2_TVFMTSEL1_SHIFT 12 #define VIDCON2_TVFMTSEL1_RGB (0x0 << 12) #define VIDCON2_TVFMTSEL1_YUV422 (0x1 << 12) #define VIDCON2_TVFMTSEL1_YUV444 (0x2 << 12) #define VIDCON2_ORGYCbCr (1 << 8) #define VIDCON2_YUVORDCrCb (1 << 7) /* PRTCON (S3C6410) * Might not be present in the S3C6410 documentation, * but tests prove it's there almost for sure; shouldn't hurt in any case. */ #define PRTCON 0x0c #define PRTCON_PROTECT (1 << 11) /* VIDTCON0 */ #define VIDTCON0 0x10 #define VIDTCON0_VBPDE_MASK (0xff << 24) #define VIDTCON0_VBPDE_SHIFT 24 #define VIDTCON0_VBPDE_LIMIT 0xff #define VIDTCON0_VBPDE(_x) ((_x) << 24) #define VIDTCON0_VBPD_MASK (0xff << 16) #define VIDTCON0_VBPD_SHIFT 16 #define VIDTCON0_VBPD_LIMIT 0xff #define VIDTCON0_VBPD(_x) ((_x) << 16) #define VIDTCON0_VFPD_MASK (0xff << 8) #define VIDTCON0_VFPD_SHIFT 8 #define VIDTCON0_VFPD_LIMIT 0xff #define VIDTCON0_VFPD(_x) ((_x) << 8) #define VIDTCON0_VSPW_MASK (0xff << 0) #define VIDTCON0_VSPW_SHIFT 0 #define VIDTCON0_VSPW_LIMIT 0xff #define VIDTCON0_VSPW(_x) ((_x) << 0) /* VIDTCON1 */ #define VIDTCON1 0x14 #define VIDTCON1_VFPDE_MASK (0xff << 24) #define VIDTCON1_VFPDE_SHIFT 24 #define VIDTCON1_VFPDE_LIMIT 0xff #define VIDTCON1_VFPDE(_x) ((_x) << 24) #define VIDTCON1_HBPD_MASK (0xff << 16) #define VIDTCON1_HBPD_SHIFT 16 #define VIDTCON1_HBPD_LIMIT 0xff #define VIDTCON1_HBPD(_x) ((_x) << 16) #define VIDTCON1_HFPD_MASK (0xff << 8) #define VIDTCON1_HFPD_SHIFT 8 #define VIDTCON1_HFPD_LIMIT 0xff #define VIDTCON1_HFPD(_x) ((_x) << 8) #define VIDTCON1_HSPW_MASK (0xff << 0) #define VIDTCON1_HSPW_SHIFT 0 #define VIDTCON1_HSPW_LIMIT 0xff #define VIDTCON1_HSPW(_x) ((_x) << 0) #define VIDTCON2 0x18 #define VIDTCON2_LINEVAL_E(_x) ((((_x) & 0x800) >> 11) << 23) #define VIDTCON2_LINEVAL_MASK (0x7ff << 11) #define VIDTCON2_LINEVAL_SHIFT 11 #define VIDTCON2_LINEVAL_LIMIT 0x7ff #define VIDTCON2_LINEVAL(_x) (((_x) & 0x7ff) << 11) #define VIDTCON2_HOZVAL_E(_x) ((((_x) & 0x800) >> 11) << 22) #define VIDTCON2_HOZVAL_MASK (0x7ff << 0) #define VIDTCON2_HOZVAL_SHIFT 0 #define VIDTCON2_HOZVAL_LIMIT 0x7ff #define VIDTCON2_HOZVAL(_x) (((_x) & 0x7ff) << 0) /* WINCONx */ #define WINCON(_win) (0x20 + ((_win) * 4)) #define WINCONx_CSCCON_EQ601 (0x0 << 28) #define WINCONx_CSCCON_EQ709 (0x1 << 28) #define WINCONx_CSCWIDTH_MASK (0x3 << 26) #define WINCONx_CSCWIDTH_SHIFT 26 #define WINCONx_CSCWIDTH_WIDE (0x0 << 26) #define WINCONx_CSCWIDTH_NARROW (0x3 << 26) #define WINCONx_ENLOCAL (1 << 22) #define WINCONx_BUFSTATUS (1 << 21) #define WINCONx_BUFSEL (1 << 20) #define WINCONx_BUFAUTOEN (1 << 19) #define WINCONx_BITSWP (1 << 18) #define WINCONx_BYTSWP (1 << 17) #define WINCONx_HAWSWP (1 << 16) #define WINCONx_WSWP (1 << 15) #define WINCONx_YCbCr (1 << 13) #define WINCONx_BURSTLEN_MASK (0x3 << 9) #define WINCONx_BURSTLEN_SHIFT 9 #define WINCONx_BURSTLEN_16WORD (0x0 << 9) #define WINCONx_BURSTLEN_8WORD (0x1 << 9) #define WINCONx_BURSTLEN_4WORD (0x2 << 9) #define WINCONx_ENWIN (1 << 0) #define WINCONx_BLEND_MODE_MASK (0xc2) #define WINCON0_BPPMODE_MASK (0xf << 2) #define WINCON0_BPPMODE_SHIFT 2 #define WINCON0_BPPMODE_1BPP (0x0 << 2) #define WINCON0_BPPMODE_2BPP (0x1 << 2) #define WINCON0_BPPMODE_4BPP (0x2 << 2) #define WINCON0_BPPMODE_8BPP_PALETTE (0x3 << 2) #define WINCON0_BPPMODE_16BPP_565 (0x5 << 2) #define WINCON0_BPPMODE_16BPP_1555 (0x7 << 2) #define WINCON0_BPPMODE_18BPP_666 (0x8 << 2) #define WINCON0_BPPMODE_24BPP_888 (0xb << 2) #define WINCON1_LOCALSEL_CAMIF (1 << 23) #define WINCON1_ALPHA_MUL (1 << 7) #define WINCON1_BLD_PIX (1 << 6) #define WINCON1_BPPMODE_MASK (0xf << 2) #define WINCON1_BPPMODE_SHIFT 2 #define WINCON1_BPPMODE_1BPP (0x0 << 2) #define WINCON1_BPPMODE_2BPP (0x1 << 2) #define WINCON1_BPPMODE_4BPP (0x2 << 2) #define WINCON1_BPPMODE_8BPP_PALETTE (0x3 << 2) #define WINCON1_BPPMODE_8BPP_1232 (0x4 << 2) #define WINCON1_BPPMODE_16BPP_565 (0x5 << 2) #define WINCON1_BPPMODE_16BPP_A1555 (0x6 << 2) #define WINCON1_BPPMODE_16BPP_I1555 (0x7 << 2) #define WINCON1_BPPMODE_18BPP_666 (0x8 << 2) #define WINCON1_BPPMODE_18BPP_A1665 (0x9 << 2) #define WINCON1_BPPMODE_19BPP_A1666 (0xa << 2) #define WINCON1_BPPMODE_24BPP_888 (0xb << 2) #define WINCON1_BPPMODE_24BPP_A1887 (0xc << 2) #define WINCON1_BPPMODE_25BPP_A1888 (0xd << 2) #define WINCON1_BPPMODE_28BPP_A4888 (0xd << 2) #define WINCON1_ALPHA_SEL (1 << 1) /* S5PV210 */ #define SHADOWCON 0x34 #define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win))) /* DMA channels (all windows) */ #define SHADOWCON_CHx_ENABLE(_win) (1 << (_win)) /* Local input channels (windows 0-2) */ #define SHADOWCON_CHx_LOCAL_ENABLE(_win) (1 << (5 + (_win))) /* VIDOSDx */ #define VIDOSD_BASE 0x40 #define VIDOSDxA_TOPLEFT_X_E(_x) ((((_x) & 0x800) >> 11) << 23) #define VIDOSDxA_TOPLEFT_X_MASK (0x7ff << 11) #define VIDOSDxA_TOPLEFT_X_SHIFT 11 #define VIDOSDxA_TOPLEFT_X_LIMIT 0x7ff #define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x7ff) << 11) #define VIDOSDxA_TOPLEFT_Y_E(_x) ((((_x) & 0x800) >> 11) << 22) #define VIDOSDxA_TOPLEFT_Y_MASK (0x7ff << 0) #define VIDOSDxA_TOPLEFT_Y_SHIFT 0 #define VIDOSDxA_TOPLEFT_Y_LIMIT 0x7ff #define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x7ff) << 0) #define VIDOSDxB_BOTRIGHT_X_E(_x) ((((_x) & 0x800) >> 11) << 23) #define VIDOSDxB_BOTRIGHT_X_MASK (0x7ff << 11) #define VIDOSDxB_BOTRIGHT_X_SHIFT 11 #define VIDOSDxB_BOTRIGHT_X_LIMIT 0x7ff #define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x7ff) << 11) #define VIDOSDxB_BOTRIGHT_Y_E(_x) ((((_x) & 0x800) >> 11) << 22) #define VIDOSDxB_BOTRIGHT_Y_MASK (0x7ff << 0) #define VIDOSDxB_BOTRIGHT_Y_SHIFT 0 #define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x7ff #define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x7ff) << 0) /* For VIDOSD[1..4]C */ #define VIDISD14C_ALPHA0_R(_x) ((_x) << 20) #define VIDISD14C_ALPHA0_G_MASK (0xf << 16) #define VIDISD14C_ALPHA0_G_SHIFT 16 #define VIDISD14C_ALPHA0_G_LIMIT 0xf #define VIDISD14C_ALPHA0_G(_x) ((_x) << 16) #define VIDISD14C_ALPHA0_B_MASK (0xf << 12) #define VIDISD14C_ALPHA0_B_SHIFT 12 #define VIDISD14C_ALPHA0_B_LIMIT 0xf #define VIDISD14C_ALPHA0_B(_x) ((_x) << 12) #define VIDISD14C_ALPHA1_R_MASK (0xf << 8) #define VIDISD14C_ALPHA1_R_SHIFT 8 #define VIDISD14C_ALPHA1_R_LIMIT 0xf #define VIDISD14C_ALPHA1_R(_x) ((_x) << 8) #define VIDISD14C_ALPHA1_G_MASK (0xf << 4) #define VIDISD14C_ALPHA1_G_SHIFT 4 #define VIDISD14C_ALPHA1_G_LIMIT 0xf #define VIDISD14C_ALPHA1_G(_x) ((_x) << 4) #define VIDISD14C_ALPHA1_B_MASK (0xf << 0) #define VIDISD14C_ALPHA1_B_SHIFT 0 #define VIDISD14C_ALPHA1_B_LIMIT 0xf #define VIDISD14C_ALPHA1_B(_x) ((_x) << 0) #define VIDW_ALPHA 0x021c #define VIDW_ALPHA_R(_x) ((_x) << 16) #define VIDW_ALPHA_G(_x) ((_x) << 8) #define VIDW_ALPHA_B(_x) ((_x) << 0) /* Video buffer addresses */ #define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8)) #define VIDW_BUF_START_S(_buff) (0x40A0 + ((_buff) * 8)) #define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8)) #define VIDW_BUF_END(_buff) (0xD0 + ((_buff) * 8)) #define VIDW_BUF_END1(_buff) (0xD4 + ((_buff) * 8)) #define VIDW_BUF_SIZE(_buff) (0x100 + ((_buff) * 4)) #define VIDW_BUF_SIZE_OFFSET_E(_x) ((((_x) & 0x2000) >> 13) << 27) #define VIDW_BUF_SIZE_OFFSET_MASK (0x1fff << 13) #define VIDW_BUF_SIZE_OFFSET_SHIFT 13 #define VIDW_BUF_SIZE_OFFSET_LIMIT 0x1fff #define VIDW_BUF_SIZE_OFFSET(_x) (((_x) & 0x1fff) << 13) #define VIDW_BUF_SIZE_PAGEWIDTH_E(_x) ((((_x) & 0x2000) >> 13) << 26) #define VIDW_BUF_SIZE_PAGEWIDTH_MASK (0x1fff << 0) #define VIDW_BUF_SIZE_PAGEWIDTH_SHIFT 0 #define VIDW_BUF_SIZE_PAGEWIDTH_LIMIT 0x1fff #define VIDW_BUF_SIZE_PAGEWIDTH(_x) (((_x) & 0x1fff) << 0) /* Interrupt controls and status */ #define VIDINTCON0 0x130 #define VIDINTCON0_FIFOINTERVAL_MASK (0x3f << 20) #define VIDINTCON0_FIFOINTERVAL_SHIFT 20 #define VIDINTCON0_FIFOINTERVAL_LIMIT 0x3f #define VIDINTCON0_FIFOINTERVAL(_x) ((_x) << 20) #define VIDINTCON0_INT_SYSMAINCON (1 << 19) #define VIDINTCON0_INT_SYSSUBCON (1 << 18) #define VIDINTCON0_INT_I80IFDONE (1 << 17) #define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15) #define VIDINTCON0_FRAMESEL0_SHIFT 15 #define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15) #define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15) #define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15) #define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15) #define VIDINTCON0_FRAMESEL1 (1 << 13) #define VIDINTCON0_FRAMESEL1_MASK (0x3 << 13) #define VIDINTCON0_FRAMESEL1_NONE (0x0 << 13) #define VIDINTCON0_FRAMESEL1_BACKPORCH (0x1 << 13) #define VIDINTCON0_FRAMESEL1_VSYNC (0x2 << 13) #define VIDINTCON0_FRAMESEL1_FRONTPORCH (0x3 << 13) #define VIDINTCON0_INT_FRAME (1 << 12) #define VIDINTCON0_FIFIOSEL_MASK (0x7f << 5) #define VIDINTCON0_FIFIOSEL_SHIFT 5 #define VIDINTCON0_FIFIOSEL_WINDOW0 (0x1 << 5) #define VIDINTCON0_FIFIOSEL_WINDOW1 (0x2 << 5) #define VIDINTCON0_FIFIOSEL_WINDOW2 (0x10 << 5) #define VIDINTCON0_FIFIOSEL_WINDOW3 (0x20 << 5) #define VIDINTCON0_FIFIOSEL_WINDOW4 (0x40 << 5) #define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 2) #define VIDINTCON0_FIFOLEVEL_SHIFT 2 #define VIDINTCON0_FIFOLEVEL_TO25PC (0x0 << 2) #define VIDINTCON0_FIFOLEVEL_TO50PC (0x1 << 2) #define VIDINTCON0_FIFOLEVEL_TO75PC (0x2 << 2) #define VIDINTCON0_FIFOLEVEL_EMPTY (0x3 << 2) #define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 2) #define VIDINTCON0_INT_FIFO_MASK (0x3 << 0) #define VIDINTCON0_INT_FIFO_SHIFT 0 #define VIDINTCON0_INT_ENABLE (1 << 0) #define VIDINTCON1 0x134 #define VIDINTCON1_INT_I80 (1 << 2) #define VIDINTCON1_INT_FRAME (1 << 1) #define VIDINTCON1_INT_FIFO (1 << 0) /* Window colour-key control registers */ #define WKEYCON 0x140 #define WKEYCON0 0x00 #define WKEYCON1 0x04 #define WxKEYCON0_KEYBL_EN (1 << 26) #define WxKEYCON0_KEYEN_F (1 << 25) #define WxKEYCON0_DIRCON (1 << 24) #define WxKEYCON0_COMPKEY_MASK (0xffffff << 0) #define WxKEYCON0_COMPKEY_SHIFT 0 #define WxKEYCON0_COMPKEY_LIMIT 0xffffff #define WxKEYCON0_COMPKEY(_x) ((_x) << 0) #define WxKEYCON1_COLVAL_MASK (0xffffff << 0) #define WxKEYCON1_COLVAL_SHIFT 0 #define WxKEYCON1_COLVAL_LIMIT 0xffffff #define WxKEYCON1_COLVAL(_x) ((_x) << 0) /* Dithering control */ #define DITHMODE 0x170 #define DITHMODE_R_POS_MASK (0x3 << 5) #define DITHMODE_R_POS_SHIFT 5 #define DITHMODE_R_POS_8BIT (0x0 << 5) #define DITHMODE_R_POS_6BIT (0x1 << 5) #define DITHMODE_R_POS_5BIT (0x2 << 5) #define DITHMODE_G_POS_MASK (0x3 << 3) #define DITHMODE_G_POS_SHIFT 3 #define DITHMODE_G_POS_8BIT (0x0 << 3) #define DITHMODE_G_POS_6BIT (0x1 << 3) #define DITHMODE_G_POS_5BIT (0x2 << 3) #define DITHMODE_B_POS_MASK (0x3 << 1) #define DITHMODE_B_POS_SHIFT 1 #define DITHMODE_B_POS_8BIT (0x0 << 1) #define DITHMODE_B_POS_6BIT (0x1 << 1) #define DITHMODE_B_POS_5BIT (0x2 << 1) #define DITHMODE_DITH_EN (1 << 0) /* Window blanking (MAP) */ #define WINxMAP(_win) (0x180 + ((_win) * 4)) #define WINxMAP_MAP (1 << 24) #define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0) #define WINxMAP_MAP_COLOUR_SHIFT 0 #define WINxMAP_MAP_COLOUR_LIMIT 0xffffff #define WINxMAP_MAP_COLOUR(_x) ((_x) << 0) /* Winodw palette control */ #define WPALCON 0x1A0 #define WPALCON_PAL_UPDATE (1 << 9) #define WPALCON_W4PAL_16BPP_A555 (1 << 8) #define WPALCON_W3PAL_16BPP_A555 (1 << 7) #define WPALCON_W2PAL_16BPP_A555 (1 << 6) #define WPALCON_W1PAL_MASK (0x7 << 3) #define WPALCON_W1PAL_SHIFT 3 #define WPALCON_W1PAL_25BPP_A888 (0x0 << 3) #define WPALCON_W1PAL_24BPP (0x1 << 3) #define WPALCON_W1PAL_19BPP_A666 (0x2 << 3) #define WPALCON_W1PAL_18BPP_A665 (0x3 << 3) #define WPALCON_W1PAL_18BPP (0x4 << 3) #define WPALCON_W1PAL_16BPP_A555 (0x5 << 3) #define WPALCON_W1PAL_16BPP_565 (0x6 << 3) #define WPALCON_W0PAL_MASK (0x7 << 0) #define WPALCON_W0PAL_SHIFT 0 #define WPALCON_W0PAL_25BPP_A888 (0x0 << 0) #define WPALCON_W0PAL_24BPP (0x1 << 0) #define WPALCON_W0PAL_19BPP_A666 (0x2 << 0) #define WPALCON_W0PAL_18BPP_A665 (0x3 << 0) #define WPALCON_W0PAL_18BPP (0x4 << 0) #define WPALCON_W0PAL_16BPP_A555 (0x5 << 0) #define WPALCON_W0PAL_16BPP_565 (0x6 << 0) /* Blending equation control */ #define BLENDEQx(_win) (0x244 + ((_win - 1) * 4)) #define BLENDEQ_ZERO 0x0 #define BLENDEQ_ONE 0x1 #define BLENDEQ_ALPHA_A 0x2 #define BLENDEQ_ONE_MINUS_ALPHA_A 0x3 #define BLENDEQ_ALPHA0 0x6 #define BLENDEQ_B_FUNC_F(_x) (_x << 6) #define BLENDEQ_A_FUNC_F(_x) (_x << 0) #define BLENDCON 0x260 #define BLENDCON_NEW_MASK (1 << 0) #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0) #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0) /* Display port clock control */ #define DP_MIE_CLKCON 0x27c #define DP_MIE_CLK_DISABLE 0x0 #define DP_MIE_CLK_DP_ENABLE 0x2 #define DP_MIE_CLK_MIE_ENABLE 0x3 /* Notes on per-window bpp settings * * Value Win0 Win1 Win2 Win3 Win 4 * 0000 1(P) 1(P) 1(P) 1(P) 1(P) * 0001 2(P) 2(P) 2(P) 2(P) 2(P) * 0010 4(P) 4(P) 4(P) 4(P) -none- * 0011 8(P) 8(P) -none- -none- -none- * 0100 -none- 8(A232) 8(A232) -none- -none- * 0101 16(565) 16(565) 16(565) 16(565) 16(565) * 0110 -none- 16(A555) 16(A555) 16(A555) 16(A555) * 0111 16(I555) 16(I565) 16(I555) 16(I555) 16(I555) * 1000 18(666) 18(666) 18(666) 18(666) 18(666) * 1001 -none- 18(A665) 18(A665) 18(A665) 16(A665) * 1010 -none- 19(A666) 19(A666) 19(A666) 19(A666) * 1011 24(888) 24(888) 24(888) 24(888) 24(888) * 1100 -none- 24(A887) 24(A887) 24(A887) 24(A887) * 1101 -none- 25(A888) 25(A888) 25(A888) 25(A888) * 1110 -none- -none- -none- -none- -none- * 1111 -none- -none- -none- -none- -none- */ #define WIN_RGB_ORDER(_win) (0x2020 + ((_win) * 4)) #define WIN_RGB_ORDER_FORWARD (0 << 11) #define WIN_RGB_ORDER_REVERSE (1 << 11) /* FIMD Version 8 register offset definitions */ #define FIMD_V8_VIDTCON0 0x20010 #define FIMD_V8_VIDTCON1 0x20014 #define FIMD_V8_VIDTCON2 0x20018 #define FIMD_V8_VIDTCON3 0x2001C #define FIMD_V8_VIDCON1 0x20004
// SPDX-License-Identifier: GPL-2.0 /* * Renesas R-Car V3H System Controller * * Copyright (C) 2018 Renesas Electronics Corp. * Copyright (C) 2018 Cogent Embedded, Inc. */ #include <linux/bits.h> #include <linux/kernel.h> #include <dt-bindings/power/r8a77980-sysc.h> #include "rcar-sysc.h" static const struct rcar_sysc_area r8a77980_areas[] __initconst = { { "always-on", 0, 0, R8A77980_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, { "ca53-scu", 0x140, 0, R8A77980_PD_CA53_SCU, R8A77980_PD_ALWAYS_ON, PD_SCU }, { "ca53-cpu0", 0x200, 0, R8A77980_PD_CA53_CPU0, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, { "ca53-cpu1", 0x200, 1, R8A77980_PD_CA53_CPU1, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, { "ca53-cpu2", 0x200, 2, R8A77980_PD_CA53_CPU2, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON, PD_CPU_NOCR }, { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR }, { "a2ir2", 0x400, 2, R8A77980_PD_A2IR2, R8A77980_PD_A3IR }, { "a2ir3", 0x400, 3, R8A77980_PD_A2IR3, R8A77980_PD_A3IR }, { "a2ir4", 0x400, 4, R8A77980_PD_A2IR4, R8A77980_PD_A3IR }, { "a2ir5", 0x400, 5, R8A77980_PD_A2IR5, R8A77980_PD_A3IR }, { "a2sc0", 0x400, 6, R8A77980_PD_A2SC0, R8A77980_PD_A3IR }, { "a2sc1", 0x400, 7, R8A77980_PD_A2SC1, R8A77980_PD_A3IR }, { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR }, { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR }, { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR }, { "a2dp0", 0x400, 11, R8A77980_PD_A2DP0, R8A77980_PD_A3IR }, { "a2dp1", 0x400, 12, R8A77980_PD_A2DP1, R8A77980_PD_A3IR }, { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR }, { "a3vip0", 0x2c0, 0, R8A77980_PD_A3VIP0, R8A77980_PD_ALWAYS_ON }, { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_ALWAYS_ON }, { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_ALWAYS_ON }, }; const struct rcar_sysc_info r8a77980_sysc_info __initconst = { .areas = r8a77980_areas, .num_areas = ARRAY_SIZE(r8a77980_areas), .extmask_offs = 0x138, .extmask_val = BIT(0), };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2015 Linaro Limited */ #ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H #define _DT_BINDINGS_RESET_MSM_GCC_8916_H #define GCC_BLSP1_BCR 0 #define GCC_BLSP1_QUP1_BCR 1 #define GCC_BLSP1_UART1_BCR 2 #define GCC_BLSP1_QUP2_BCR 3 #define GCC_BLSP1_UART2_BCR 4 #define GCC_BLSP1_QUP3_BCR 5 #define GCC_BLSP1_QUP4_BCR 6 #define GCC_BLSP1_QUP5_BCR 7 #define GCC_BLSP1_QUP6_BCR 8 #define GCC_IMEM_BCR 9 #define GCC_SMMU_BCR 10 #define GCC_APSS_TCU_BCR 11 #define GCC_SMMU_XPU_BCR 12 #define GCC_PCNOC_TBU_BCR 13 #define GCC_PRNG_BCR 14 #define GCC_BOOT_ROM_BCR 15 #define GCC_CRYPTO_BCR 16 #define GCC_SEC_CTRL_BCR 17 #define GCC_AUDIO_CORE_BCR 18 #define GCC_ULT_AUDIO_BCR 19 #define GCC_DEHR_BCR 20 #define GCC_SYSTEM_NOC_BCR 21 #define GCC_PCNOC_BCR 22 #define GCC_TCSR_BCR 23 #define GCC_QDSS_BCR 24 #define GCC_DCD_BCR 25 #define GCC_MSG_RAM_BCR 26 #define GCC_MPM_BCR 27 #define GCC_SPMI_BCR 28 #define GCC_SPDM_BCR 29 #define GCC_MM_SPDM_BCR 30 #define GCC_BIMC_BCR 31 #define GCC_RBCPR_BCR 32 #define GCC_TLMM_BCR 33 #define GCC_USB_HS_BCR 34 #define GCC_USB2A_PHY_BCR 35 #define GCC_SDCC1_BCR 36 #define GCC_SDCC2_BCR 37 #define GCC_PDM_BCR 38 #define GCC_SNOC_BUS_TIMEOUT0_BCR 39 #define GCC_PCNOC_BUS_TIMEOUT0_BCR 40 #define GCC_PCNOC_BUS_TIMEOUT1_BCR 41 #define GCC_PCNOC_BUS_TIMEOUT2_BCR 42 #define GCC_PCNOC_BUS_TIMEOUT3_BCR 43 #define GCC_PCNOC_BUS_TIMEOUT4_BCR 44 #define GCC_PCNOC_BUS_TIMEOUT5_BCR 45 #define GCC_PCNOC_BUS_TIMEOUT6_BCR 46 #define GCC_PCNOC_BUS_TIMEOUT7_BCR 47 #define GCC_PCNOC_BUS_TIMEOUT8_BCR 48 #define GCC_PCNOC_BUS_TIMEOUT9_BCR 49 #define GCC_MMSS_BCR 50 #define GCC_VENUS0_BCR 51 #define GCC_MDSS_BCR 52 #define GCC_CAMSS_PHY0_BCR 53 #define GCC_CAMSS_CSI0_BCR 54 #define GCC_CAMSS_CSI0PHY_BCR 55 #define GCC_CAMSS_CSI0RDI_BCR 56 #define GCC_CAMSS_CSI0PIX_BCR 57 #define GCC_CAMSS_PHY1_BCR 58 #define GCC_CAMSS_CSI1_BCR 59 #define GCC_CAMSS_CSI1PHY_BCR 60 #define GCC_CAMSS_CSI1RDI_BCR 61 #define GCC_CAMSS_CSI1PIX_BCR 62 #define GCC_CAMSS_ISPIF_BCR 63 #define GCC_CAMSS_CCI_BCR 64 #define GCC_CAMSS_MCLK0_BCR 65 #define GCC_CAMSS_MCLK1_BCR 66 #define GCC_CAMSS_GP0_BCR 67 #define GCC_CAMSS_GP1_BCR 68 #define GCC_CAMSS_TOP_BCR 69 #define GCC_CAMSS_MICRO_BCR 70 #define GCC_CAMSS_JPEG_BCR 71 #define GCC_CAMSS_VFE_BCR 72 #define GCC_CAMSS_CSI_VFE0_BCR 73 #define GCC_OXILI_BCR 74 #define GCC_GMEM_BCR 75 #define GCC_CAMSS_AHB_BCR 76 #define GCC_MDP_TBU_BCR 77 #define GCC_GFX_TBU_BCR 78 #define GCC_GFX_TCU_BCR 79 #define GCC_MSS_TBU_AXI_BCR 80 #define GCC_MSS_TBU_GSS_AXI_BCR 81 #define GCC_MSS_TBU_Q6_AXI_BCR 82 #define GCC_GTCU_AHB_BCR 83 #define GCC_SMMU_CFG_BCR 84 #define GCC_VFE_TBU_BCR 85 #define GCC_VENUS_TBU_BCR 86 #define GCC_JPEG_TBU_BCR 87 #define GCC_PRONTO_TBU_BCR 88 #define GCC_SMMU_CATS_BCR 89 #endif
// SPDX-License-Identifier: GPL-2.0 /* * Check Point L-50 Board Description * Copyright 2020 Pawel Dembicki <[email protected]> */ /dts-v1/; #include "kirkwood.dtsi" #include "kirkwood-6281.dtsi" / { model = "Check Point L-50"; compatible = "checkpoint,l-50", "marvell,kirkwood-88f6281", "marvell,kirkwood"; memory { device_type = "memory"; reg = <0x00000000 0x20000000>; }; chosen { bootargs = "console=ttyS0,115200n8"; stdout-path = &uart0; }; ocp@f1000000 { pinctrl: pin-controller@10000 { pinctrl-0 = <&pmx_led38 &pmx_sysrst &pmx_button29>; pinctrl-names = "default"; pmx_sysrst: pmx-sysrst { marvell,pins = "mpp6"; marvell,function = "sysrst"; }; pmx_button29: pmx_button29 { marvell,pins = "mpp29"; marvell,function = "gpio"; }; pmx_led38: pmx_led38 { marvell,pins = "mpp38"; marvell,function = "gpio"; }; pmx_sdio_cd: pmx-sdio-cd { marvell,pins = "mpp46"; marvell,function = "gpio"; }; }; serial@12000 { status = "okay"; }; mvsdio@90000 { status = "okay"; cd-gpios = <&gpio1 14 9>; }; i2c@11000 { status = "okay"; clock-frequency = <400000>; gpio2: gpio-expander@20 { #gpio-cells = <2>; #interrupt-cells = <2>; interrupt-controller; compatible = "semtech,sx1505q"; reg = <0x20>; gpio-controller; }; /* Three GPIOs from 0x21 exp. are undescribed in dts: * 1: DSL module reset (active low) * 5: mPCIE reset (active low) * 6: Express card reset (active low) */ gpio3: gpio-expander@21 { #gpio-cells = <2>; #interrupt-cells = <2>; interrupt-controller; compatible = "semtech,sx1505q"; reg = <0x21>; gpio-controller; }; rtc@30 { compatible = "s35390a"; reg = <0x30>; }; }; }; leds { compatible = "gpio-leds"; led-status-green { label = "l-50:green:status"; gpios = <&gpio1 6 GPIO_ACTIVE_LOW>; }; led-status-red { label = "l-50:red:status"; gpios = <&gpio3 2 GPIO_ACTIVE_LOW>; }; led-wifi { label = "l-50:green:wifi"; gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; linux,default-trigger = "phy0tpt"; }; led-internet-green { label = "l-50:green:internet"; gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; }; led-internet-red { label = "l-50:red:internet"; gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; }; led-usb1-green { label = "l-50:green:usb1"; gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; linux,default-trigger = "usbport"; trigger-sources = <&hub_port3>; }; led-usb1-red { label = "l-50:red:usb1"; gpios = <&gpio2 4 GPIO_ACTIVE_LOW>; }; led-usb2-green { label = "l-50:green:usb2"; gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; linux,default-trigger = "usbport"; trigger-sources = <&hub_port1>; }; led-usb2-red { label = "l-50:red:usb2"; gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; }; }; usb2_pwr { compatible = "regulator-fixed"; regulator-name = "usb2_pwr"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio3 3 GPIO_ACTIVE_LOW>; regulator-always-on; }; usb1_pwr { compatible = "regulator-fixed"; regulator-name = "usb1_pwr"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio3 4 GPIO_ACTIVE_LOW>; regulator-always-on; }; mpcie_pwr { compatible = "regulator-fixed"; regulator-name = "mpcie_pwr"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; }; express_card_pwr { compatible = "regulator-fixed"; regulator-name = "express_card_pwr"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; }; keys { compatible = "gpio-keys"; button-factory-defaults { label = "factory_defaults"; gpios = <&gpio0 29 GPIO_ACTIVE_LOW>; linux,code = <KEY_RESTART>; }; }; }; &mdio { status = "okay"; ethphy8: ethernet-phy@8 { reg = <0x08>; }; switch0: switch@10 { compatible = "marvell,mv88e6085"; #address-cells = <1>; #size-cells = <0>; reg = <0x10>; dsa,member = <0 0>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; label = "lan5"; }; port@1 { reg = <1>; label = "lan1"; }; port@2 { reg = <2>; label = "lan6"; }; port@3 { reg = <3>; label = "lan2"; }; port@4 { reg = <4>; label = "lan7"; }; switch0port5: port@5 { reg = <5>; phy-mode = "rgmii-txid"; link = <&switch1port5>; fixed-link { speed = <1000>; full-duplex; }; }; port@6 { reg = <6>; phy-mode = "rgmii-id"; ethernet = <&eth1port>; fixed-link { speed = <1000>; full-duplex; }; }; }; }; switch@11 { compatible = "marvell,mv88e6085"; #address-cells = <1>; #size-cells = <0>; reg = <0x11>; dsa,member = <0 1>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; label = "lan3"; }; port@1 { reg = <1>; label = "lan8"; }; port@2 { reg = <2>; label = "lan4"; }; port@3 { reg = <3>; label = "dmz"; }; switch1port5: port@5 { reg = <5>; phy-mode = "rgmii-txid"; link = <&switch0port5>; fixed-link { speed = <1000>; full-duplex; }; }; port@6 { reg = <6>; label = "dsl"; fixed-link { speed = <100>; full-duplex; }; }; }; }; }; &eth0 { status = "okay"; ethernet0-port@0 { phy-handle = <&ethphy8>; }; }; &eth1 { status = "okay"; ethernet1-port@0 { speed = <1000>; duplex = <1>; phy-mode = "rgmii"; }; }; &nand { status = "okay"; pinctrl-0 = <&pmx_nand>; pinctrl-names = "default"; partition@0 { label = "u-boot"; reg = <0x00000000 0x000c0000>; }; partition@a0000 { label = "bootldr-env"; reg = <0x000c0000 0x00040000>; }; partition@100000 { label = "kernel-1"; reg = <0x00100000 0x00800000>; }; partition@900000 { label = "rootfs-1"; reg = <0x00900000 0x07100000>; }; partition@7a00000 { label = "kernel-2"; reg = <0x07a00000 0x00800000>; }; partition@8200000 { label = "rootfs-2"; reg = <0x08200000 0x07100000>; }; partition@f300000 { label = "default_sw"; reg = <0x0f300000 0x07900000>; }; partition@16c00000 { label = "logs"; reg = <0x16c00000 0x01800000>; }; partition@18400000 { label = "preset_cfg"; reg = <0x18400000 0x00100000>; }; partition@18500000 { label = "adsl"; reg = <0x18500000 0x00100000>; }; partition@18600000 { label = "storage"; reg = <0x18600000 0x07a00000>; }; }; &rtc { status = "disabled"; }; &pciec { status = "okay"; }; &pcie0 { status = "okay"; }; &sata_phy0 { status = "disabled"; }; &sata_phy1 { status = "disabled"; }; &usb0 { #address-cells = <1>; #size-cells = <0>; status = "okay"; port@1 { #address-cells = <1>; #size-cells = <0>; reg = <1>; #trigger-source-cells = <0>; hub_port1: port@1 { reg = <1>; #trigger-source-cells = <0>; }; hub_port3: port@3 { reg = <3>; #trigger-source-cells = <0>; }; }; };
/* SPDX-License-Identifier: GPL-2.0 */ #ifdef CONFIG_RTC_INTF_DEV extern void __init rtc_dev_init(void); extern void rtc_dev_prepare(struct rtc_device *rtc); #else static inline void rtc_dev_init(void) { } static inline void rtc_dev_prepare(struct rtc_device *rtc) { } #endif #ifdef CONFIG_RTC_INTF_PROC extern void rtc_proc_add_device(struct rtc_device *rtc); extern void rtc_proc_del_device(struct rtc_device *rtc); #else static inline void rtc_proc_add_device(struct rtc_device *rtc) { } static inline void rtc_proc_del_device(struct rtc_device *rtc) { } #endif #ifdef CONFIG_RTC_INTF_SYSFS const struct attribute_group **rtc_get_dev_attribute_groups(void); #else static inline const struct attribute_group **rtc_get_dev_attribute_groups(void) { return NULL; } #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_EVENT_ROUTER_ #define K3_EVENT_ROUTER_ #include <linux/types.h> struct k3_event_route_data { void *priv; int (*set_event)(void *priv, u32 event); }; #endif /* K3_EVENT_ROUTER_ */
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright (C) STMicroelectronics 2019 - All Rights Reserved * Author: Alexandre Torgue <[email protected]> for STMicroelectronics. */ /dts-v1/; #include "stm32mp157.dtsi" #include "stm32mp15-pinctrl.dtsi" #include "stm32mp15xxac-pinctrl.dtsi" #include "stm32mp15xx-dkx.dtsi" / { model = "STMicroelectronics STM32MP157A-DK1 Discovery Board"; compatible = "st,stm32mp157a-dk1", "st,stm32mp157"; aliases { ethernet0 = &ethernet0; }; chosen { stdout-path = "serial0:115200n8"; }; };
/* SPDX-License-Identifier: MIT * * Copyright © 2019 Intel Corporation */ #ifndef _INTEL_DSB_H #define _INTEL_DSB_H #include <linux/types.h> #include "i915_reg_defs.h" struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_display; struct intel_dsb; enum pipe; enum intel_dsb_id { INTEL_DSB_0, INTEL_DSB_1, INTEL_DSB_2, I915_MAX_DSBS, }; struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, struct intel_crtc *crtc, enum intel_dsb_id dsb_id, unsigned int max_cmds); void intel_dsb_finish(struct intel_dsb *dsb); void intel_dsb_cleanup(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); void intel_dsb_reg_write_masked(struct intel_dsb *dsb, i915_reg_t reg, u32 mask, u32 val); void intel_dsb_noop(struct intel_dsb *dsb, int count); void intel_dsb_nonpost_start(struct intel_dsb *dsb); void intel_dsb_nonpost_end(struct intel_dsb *dsb); void intel_dsb_interrupt(struct intel_dsb *dsb); void intel_dsb_wait_usec(struct intel_dsb *dsb, int count); void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count); void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, struct intel_dsb *dsb); void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, struct intel_dsb *dsb, int lower, int upper); void intel_dsb_vblank_evade(struct intel_atomic_state *state, struct intel_dsb *dsb); void intel_dsb_chain(struct intel_atomic_state *state, struct intel_dsb *dsb, struct intel_dsb *chained_dsb, bool wait_for_vblank); void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank); void intel_dsb_wait(struct intel_dsb *dsb); void intel_dsb_irq_handler(struct intel_display *display, enum pipe pipe, enum intel_dsb_id dsb_id); #endif
/* SPDX-License-Identifier: GPL-2.0 */ #ifdef __NR_semop DO_TEST(semop, __NR_semop) #endif #ifdef __NR_semget DO_TEST(semget, __NR_semget) #endif #ifdef __NR_semctl DO_TEST(semctl, __NR_semctl) #endif #ifdef __NR_semtimedop DO_TEST(semtimedop, __NR_semtimedop) #endif #ifdef __NR_msgsnd DO_TEST(msgsnd, __NR_msgsnd) #endif #ifdef __NR_msgrcv DO_TEST(msgrcv, __NR_msgrcv) #endif #ifdef __NR_msgget DO_TEST(msgget, __NR_msgget) #endif #ifdef __NR_msgctl DO_TEST(msgctl, __NR_msgctl) #endif #ifdef __NR_shmat DO_TEST(shmat, __NR_shmat) #endif #ifdef __NR_shmdt DO_TEST(shmdt, __NR_shmdt) #endif #ifdef __NR_shmget DO_TEST(shmget, __NR_shmget) #endif #ifdef __NR_shmctl DO_TEST(shmctl, __NR_shmctl) #endif
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __GFXHUB_V1_2_H__ #define __GFXHUB_V1_2_H__ extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs; extern struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs; #endif
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Memory Encryption Support * * Copyright (C) 2016-2024 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> */ #define DISABLE_BRANCH_PROFILING #include <linux/linkage.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/dma-direct.h> #include <linux/swiotlb.h> #include <linux/mem_encrypt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/cc_platform.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> #include <asm/setup.h> #include <asm/mem_encrypt.h> #include <asm/bootparam.h> #include <asm/set_memory.h> #include <asm/cacheflush.h> #include <asm/processor-flags.h> #include <asm/msr.h> #include <asm/cmdline.h> #include <asm/sev.h> #include <asm/ia32.h> #include "mm_internal.h" /* * Since SME related variables are set early in the boot process they must * reside in the .data section so as not to be zeroed out when the .bss * section is later cleared. */ u64 sme_me_mask __section(".data") = 0; u64 sev_status __section(".data") = 0; u64 sev_check_data __section(".data") = 0; EXPORT_SYMBOL(sme_me_mask); /* Buffer used for early in-place encryption by BSP, no locking needed */ static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); /* * SNP-specific routine which needs to additionally change the page state from * private to shared before copying the data from the source to destination and * restore after the copy. */ static inline void __init snp_memcpy(void *dst, void *src, size_t sz, unsigned long paddr, bool decrypt) { unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; if (decrypt) { /* * @paddr needs to be accessed decrypted, mark the page shared in * the RMP table before copying it. */ early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages); memcpy(dst, src, sz); /* Restore the page state after the memcpy. */ early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages); } else { /* * @paddr need to be accessed encrypted, no need for the page state * change. */ memcpy(dst, src, sz); } } /* * This routine does not change the underlying encryption setting of the * page(s) that map this memory. It assumes that eventually the memory is * meant to be accessed as either encrypted or decrypted but the contents * are currently not in the desired state. * * This routine follows the steps outlined in the AMD64 Architecture * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. */ static void __init __sme_early_enc_dec(resource_size_t paddr, unsigned long size, bool enc) { void *src, *dst; size_t len; if (!sme_me_mask) return; wbinvd(); /* * There are limited number of early mapping slots, so map (at most) * one page at time. */ while (size) { len = min_t(size_t, sizeof(sme_early_buffer), size); /* * Create mappings for the current and desired format of * the memory. Use a write-protected mapping for the source. */ src = enc ? early_memremap_decrypted_wp(paddr, len) : early_memremap_encrypted_wp(paddr, len); dst = enc ? early_memremap_encrypted(paddr, len) : early_memremap_decrypted(paddr, len); /* * If a mapping can't be obtained to perform the operation, * then eventual access of that area in the desired mode * will cause a crash. */ BUG_ON(!src || !dst); /* * Use a temporary buffer, of cache-line multiple size, to * avoid data corruption as documented in the APM. */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) { snp_memcpy(sme_early_buffer, src, len, paddr, enc); snp_memcpy(dst, sme_early_buffer, len, paddr, !enc); } else { memcpy(sme_early_buffer, src, len); memcpy(dst, sme_early_buffer, len); } early_memunmap(dst, len); early_memunmap(src, len); paddr += len; size -= len; } } void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) { __sme_early_enc_dec(paddr, size, true); } void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) { __sme_early_enc_dec(paddr, size, false); } static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, bool map) { unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; pmdval_t pmd_flags, pmd; /* Use early_pmd_flags but remove the encryption mask */ pmd_flags = __sme_clr(early_pmd_flags); do { pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; __early_make_pgtable((unsigned long)vaddr, pmd); vaddr += PMD_SIZE; paddr += PMD_SIZE; size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; } while (size); flush_tlb_local(); } void __init sme_unmap_bootdata(char *real_mode_data) { struct boot_params *boot_data; unsigned long cmdline_paddr; if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; /* Get the command line address before unmapping the real_mode_data */ boot_data = (struct boot_params *)real_mode_data; cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); if (!cmdline_paddr) return; __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); } void __init sme_map_bootdata(char *real_mode_data) { struct boot_params *boot_data; unsigned long cmdline_paddr; if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); /* Get the command line address after mapping the real_mode_data */ boot_data = (struct boot_params *)real_mode_data; cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); if (!cmdline_paddr) return; __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); } static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) { unsigned long pfn = 0; pgprot_t prot; switch (level) { case PG_LEVEL_4K: pfn = pte_pfn(*kpte); prot = pte_pgprot(*kpte); break; case PG_LEVEL_2M: pfn = pmd_pfn(*(pmd_t *)kpte); prot = pmd_pgprot(*(pmd_t *)kpte); break; case PG_LEVEL_1G: pfn = pud_pfn(*(pud_t *)kpte); prot = pud_pgprot(*(pud_t *)kpte); break; default: WARN_ONCE(1, "Invalid level for kpte\n"); return 0; } if (ret_prot) *ret_prot = prot; return pfn; } static bool amd_enc_tlb_flush_required(bool enc) { return true; } static bool amd_enc_cache_flush_required(void) { return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT); } static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) { #ifdef CONFIG_PARAVIRT unsigned long vaddr_end = vaddr + size; while (vaddr < vaddr_end) { int psize, pmask, level; unsigned long pfn; pte_t *kpte; kpte = lookup_address(vaddr, &level); if (!kpte || pte_none(*kpte)) { WARN_ONCE(1, "kpte lookup for vaddr\n"); return; } pfn = pg_level_to_pfn(level, kpte, NULL); if (!pfn) continue; psize = page_level_size(level); pmask = page_level_mask(level); notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); vaddr = (vaddr & pmask) + psize; } #endif } static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) { /* * To maintain the security guarantees of SEV-SNP guests, make sure * to invalidate the memory before encryption attribute is cleared. */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) snp_set_memory_shared(vaddr, npages); return 0; } /* Return true unconditionally: return value doesn't matter for the SEV side */ static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) { /* * After memory is mapped encrypted in the page table, validate it * so that it is consistent with the page table updates. */ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc) snp_set_memory_private(vaddr, npages); if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); return 0; } int prepare_pte_enc(struct pte_enc_desc *d) { pgprot_t old_prot; d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot); if (!d->pfn) return 1; d->new_pgprot = old_prot; if (d->encrypt) pgprot_val(d->new_pgprot) |= _PAGE_ENC; else pgprot_val(d->new_pgprot) &= ~_PAGE_ENC; /* If prot is same then do nothing. */ if (pgprot_val(old_prot) == pgprot_val(d->new_pgprot)) return 1; d->pa = d->pfn << PAGE_SHIFT; d->size = page_level_size(d->pte_level); /* * In-place en-/decryption and physical page attribute change * from C=1 to C=0 or vice versa will be performed. Flush the * caches to ensure that data gets accessed with the correct * C-bit. */ if (d->va) clflush_cache_range(d->va, d->size); else clflush_cache_range(__va(d->pa), d->size); return 0; } void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { pte_t new_pte; /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); } static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) { struct pte_enc_desc d = { .kpte = kpte, .pte_level = level, .encrypt = enc }; if (prepare_pte_enc(&d)) return; /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(d.pa, d.size); } else { sme_early_decrypt(d.pa, d.size); /* * ON SNP, the page state in the RMP table must happen * before the page table updates. */ early_snp_set_memory_shared((unsigned long)__va(d.pa), d.pa, 1); } set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); /* * If page is set encrypted in the page table, then update the RMP table to * add this page as private. */ if (enc) early_snp_set_memory_private((unsigned long)__va(d.pa), d.pa, 1); } static int __init early_set_memory_enc_dec(unsigned long vaddr, unsigned long size, bool enc) { unsigned long vaddr_end, vaddr_next, start; unsigned long psize, pmask; int split_page_size_mask; int level, ret; pte_t *kpte; start = vaddr; vaddr_next = vaddr; vaddr_end = vaddr + size; for (; vaddr < vaddr_end; vaddr = vaddr_next) { kpte = lookup_address(vaddr, &level); if (!kpte || pte_none(*kpte)) { ret = 1; goto out; } if (level == PG_LEVEL_4K) { __set_clr_pte_enc(kpte, level, enc); vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; continue; } psize = page_level_size(level); pmask = page_level_mask(level); /* * Check whether we can change the large page in one go. * We request a split when the address is not aligned and * the number of pages to set/clear encryption bit is smaller * than the number of pages in the large page. */ if (vaddr == (vaddr & pmask) && ((vaddr_end - vaddr) >= psize)) { __set_clr_pte_enc(kpte, level, enc); vaddr_next = (vaddr & pmask) + psize; continue; } /* * The virtual address is part of a larger page, create the next * level page table mapping (4K or 2M). If it is part of a 2M * page then we request a split of the large page into 4K * chunks. A 1GB large page is split into 2M pages, resp. */ if (level == PG_LEVEL_2M) split_page_size_mask = 0; else split_page_size_mask = 1 << PG_LEVEL_2M; /* * kernel_physical_mapping_change() does not flush the TLBs, so * a TLB flush is required after we exit from the for loop. */ kernel_physical_mapping_change(__pa(vaddr & pmask), __pa((vaddr_end & pmask) + psize), split_page_size_mask); } ret = 0; early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); return ret; } int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return early_set_memory_enc_dec(vaddr, size, false); } int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return early_set_memory_enc_dec(vaddr, size, true); } void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) { enc_dec_hypercall(vaddr, size, enc); } void __init sme_early_init(void) { if (!sme_me_mask) return; early_pmd_flags = __sme_set(early_pmd_flags); __supported_pte_mask = __sme_set(__supported_pte_mask); /* Update the protection map with memory encryption mask */ add_encrypt_protection_map(); x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; x86_platform.guest.enc_kexec_begin = snp_kexec_begin; x86_platform.guest.enc_kexec_finish = snp_kexec_finish; /* * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the * parallel bringup low level code. That raises #VC which cannot be * handled there. * It does not provide a RDMSR GHCB protocol so the early startup * code cannot directly communicate with the secure firmware. The * alternative solution to retrieve the APIC ID via CPUID(0xb), * which is covered by the GHCB protocol, is not viable either * because there is no enforcement of the CPUID(0xb) provided * "initial" APIC ID to be the same as the real APIC ID. * Disable parallel bootup. */ if (sev_status & MSR_AMD64_SEV_ES_ENABLED) x86_cpuinit.parallel_bringup = false; /* * The VMM is capable of injecting interrupt 0x80 and triggering the * compatibility syscall path. * * By default, the 32-bit emulation is disabled in order to ensure * the safety of the VM. */ if (sev_status & MSR_AMD64_SEV_ENABLED) ia32_disable(); /* * Override init functions that scan the ROM region in SEV-SNP guests, * as this memory is not pre-validated and would thus cause a crash. */ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) { x86_init.mpparse.find_mptable = x86_init_noop; x86_init.pci.init_irq = x86_init_noop; x86_init.resources.probe_roms = x86_init_noop; /* * DMI setup behavior for SEV-SNP guests depends on * efi_enabled(EFI_CONFIG_TABLES), which hasn't been * parsed yet. snp_dmi_setup() will run after that * parsing has happened. */ x86_init.resources.dmi_setup = snp_dmi_setup; } /* * Switch the SVSM CA mapping (if active) from identity mapped to * kernel mapped. */ snp_update_svsm_ca(); } void __init mem_encrypt_free_decrypted_mem(void) { unsigned long vaddr, vaddr_end, npages; int r; vaddr = (unsigned long)__start_bss_decrypted_unused; vaddr_end = (unsigned long)__end_bss_decrypted; npages = (vaddr_end - vaddr) >> PAGE_SHIFT; /* * If the unused memory range was mapped decrypted, change the encryption * attribute from decrypted to encrypted before freeing it. Base the * re-encryption on the same condition used for the decryption in * sme_postprocess_startup(). Higher level abstractions, such as * CC_ATTR_MEM_ENCRYPT, aren't necessarily equivalent in a Hyper-V VM * using vTOM, where sme_me_mask is always zero. */ if (sme_me_mask) { r = set_memory_encrypted(vaddr, npages); if (r) { pr_warn("failed to free unused decrypted pages\n"); return; } } free_init_pages("unused decrypted", vaddr, vaddr_end); }
// SPDX-License-Identifier: GPL-2.0-only // // rt5682s.c -- RT5682I-VS ALSA SoC audio component driver // // Copyright 2021 Realtek Semiconductor Corp. // Author: Derek Fang <[email protected]> // #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/acpi.h> #include <linux/gpio/consumer.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/jack.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/rt5682s.h> #include "rt5682s.h" #define DEVICE_ID 0x6749 static const struct rt5682s_platform_data i2s_default_platform_data = { .dmic1_data_pin = RT5682S_DMIC1_DATA_GPIO2, .dmic1_clk_pin = RT5682S_DMIC1_CLK_GPIO3, .jd_src = RT5682S_JD1, .dai_clk_names[RT5682S_DAI_WCLK_IDX] = "rt5682-dai-wclk", .dai_clk_names[RT5682S_DAI_BCLK_IDX] = "rt5682-dai-bclk", }; static const char *rt5682s_supply_names[RT5682S_NUM_SUPPLIES] = { [RT5682S_SUPPLY_AVDD] = "AVDD", [RT5682S_SUPPLY_MICVDD] = "MICVDD", [RT5682S_SUPPLY_DBVDD] = "DBVDD", [RT5682S_SUPPLY_LDO1_IN] = "LDO1-IN", }; static const struct reg_sequence patch_list[] = { {RT5682S_I2C_CTRL, 0x0007}, {RT5682S_DIG_IN_CTRL_1, 0x0000}, {RT5682S_CHOP_DAC_2, 0x2020}, {RT5682S_VREF_REC_OP_FB_CAP_CTRL_2, 0x0101}, {RT5682S_VREF_REC_OP_FB_CAP_CTRL_1, 0x80c0}, {RT5682S_HP_CALIB_CTRL_9, 0x0002}, {RT5682S_DEPOP_1, 0x0000}, {RT5682S_HP_CHARGE_PUMP_2, 0x3c15}, {RT5682S_DAC1_DIG_VOL, 0xfefe}, {RT5682S_SAR_IL_CMD_2, 0xac00}, {RT5682S_SAR_IL_CMD_3, 0x024c}, {RT5682S_CBJ_CTRL_6, 0x0804}, }; static void rt5682s_apply_patch_list(struct rt5682s_priv *rt5682s, struct device *dev) { int ret; ret = regmap_multi_reg_write(rt5682s->regmap, patch_list, ARRAY_SIZE(patch_list)); if (ret) dev_warn(dev, "Failed to apply regmap patch: %d\n", ret); } static const struct reg_default rt5682s_reg[] = { {0x0002, 0x8080}, {0x0003, 0x0001}, {0x0005, 0x0000}, {0x0006, 0x0000}, {0x0008, 0x8007}, {0x000b, 0x0000}, {0x000f, 0x4000}, {0x0010, 0x4040}, {0x0011, 0x0000}, {0x0012, 0x0000}, {0x0013, 0x1200}, {0x0014, 0x200a}, {0x0015, 0x0404}, {0x0016, 0x0404}, {0x0017, 0x05a4}, {0x0019, 0xffff}, {0x001c, 0x2f2f}, {0x001f, 0x0000}, {0x0022, 0x5757}, {0x0023, 0x0039}, {0x0024, 0x000b}, {0x0026, 0xc0c4}, {0x0029, 0x8080}, {0x002a, 0xa0a0}, {0x002b, 0x0300}, {0x0030, 0x0000}, {0x003c, 0x08c0}, {0x0044, 0x1818}, {0x004b, 0x00c0}, {0x004c, 0x0000}, {0x004d, 0x0000}, {0x0061, 0x00c0}, {0x0062, 0x008a}, {0x0063, 0x0800}, {0x0064, 0x0000}, {0x0065, 0x0000}, {0x0066, 0x0030}, {0x0067, 0x000c}, {0x0068, 0x0000}, {0x0069, 0x0000}, {0x006a, 0x0000}, {0x006b, 0x0000}, {0x006c, 0x0000}, {0x006d, 0x2200}, {0x006e, 0x0810}, {0x006f, 0xe4de}, {0x0070, 0x3320}, {0x0071, 0x0000}, {0x0073, 0x0000}, {0x0074, 0x0000}, {0x0075, 0x0002}, {0x0076, 0x0001}, {0x0079, 0x0000}, {0x007a, 0x0000}, {0x007b, 0x0000}, {0x007c, 0x0100}, {0x007e, 0x0000}, {0x007f, 0x0000}, {0x0080, 0x0000}, {0x0083, 0x0000}, {0x0084, 0x0000}, {0x0085, 0x0000}, {0x0086, 0x0005}, {0x0087, 0x0000}, {0x0088, 0x0000}, {0x008c, 0x0003}, {0x008e, 0x0060}, {0x008f, 0x4da1}, {0x0091, 0x1c15}, {0x0092, 0x0425}, {0x0093, 0x0000}, {0x0094, 0x0080}, {0x0095, 0x008f}, {0x0096, 0x0000}, {0x0097, 0x0000}, {0x0098, 0x0000}, {0x0099, 0x0000}, {0x009a, 0x0000}, {0x009b, 0x0000}, {0x009c, 0x0000}, {0x009d, 0x0000}, {0x009e, 0x0000}, {0x009f, 0x0009}, {0x00a0, 0x0000}, {0x00a3, 0x0002}, {0x00a4, 0x0001}, {0x00b6, 0x0000}, {0x00b7, 0x0000}, {0x00b8, 0x0000}, {0x00b9, 0x0002}, {0x00be, 0x0000}, {0x00c0, 0x0160}, {0x00c1, 0x82a0}, {0x00c2, 0x0000}, {0x00d0, 0x0000}, {0x00d2, 0x3300}, {0x00d3, 0x2200}, {0x00d4, 0x0000}, {0x00d9, 0x0000}, {0x00da, 0x0000}, {0x00db, 0x0000}, {0x00dc, 0x00c0}, {0x00dd, 0x2220}, {0x00de, 0x3131}, {0x00df, 0x3131}, {0x00e0, 0x3131}, {0x00e2, 0x0000}, {0x00e3, 0x4000}, {0x00e4, 0x0aa0}, {0x00e5, 0x3131}, {0x00e6, 0x3131}, {0x00e7, 0x3131}, {0x00e8, 0x3131}, {0x00ea, 0xb320}, {0x00eb, 0x0000}, {0x00f0, 0x0000}, {0x00f6, 0x0000}, {0x00fa, 0x0000}, {0x00fb, 0x0000}, {0x00fc, 0x0000}, {0x00fd, 0x0000}, {0x00fe, 0x10ec}, {0x00ff, 0x6749}, {0x0100, 0xa000}, {0x010b, 0x0066}, {0x010c, 0x6666}, {0x010d, 0x2202}, {0x010e, 0x6666}, {0x010f, 0xa800}, {0x0110, 0x0006}, {0x0111, 0x0460}, {0x0112, 0x2000}, {0x0113, 0x0200}, {0x0117, 0x8000}, {0x0118, 0x0303}, {0x0125, 0x0020}, {0x0132, 0x5026}, {0x0136, 0x8000}, {0x0139, 0x0005}, {0x013a, 0x3030}, {0x013b, 0xa000}, {0x013c, 0x4110}, {0x013f, 0x0000}, {0x0145, 0x0022}, {0x0146, 0x0000}, {0x0147, 0x0000}, {0x0148, 0x0000}, {0x0156, 0x0022}, {0x0157, 0x0303}, {0x0158, 0x2222}, {0x0159, 0x0000}, {0x0160, 0x4ec0}, {0x0161, 0x0080}, {0x0162, 0x0200}, {0x0163, 0x0800}, {0x0164, 0x0000}, {0x0165, 0x0000}, {0x0166, 0x0000}, {0x0167, 0x000f}, {0x0168, 0x000f}, {0x0169, 0x0001}, {0x0190, 0x4131}, {0x0194, 0x0000}, {0x0195, 0x0000}, {0x0197, 0x0022}, {0x0198, 0x0000}, {0x0199, 0x0000}, {0x01ac, 0x0000}, {0x01ad, 0x0000}, {0x01ae, 0x0000}, {0x01af, 0x2000}, {0x01b0, 0x0000}, {0x01b1, 0x0000}, {0x01b2, 0x0000}, {0x01b3, 0x0017}, {0x01b4, 0x004b}, {0x01b5, 0x0000}, {0x01b6, 0x03e8}, {0x01b7, 0x0000}, {0x01b8, 0x0000}, {0x01b9, 0x0400}, {0x01ba, 0xb5b6}, {0x01bb, 0x9124}, {0x01bc, 0x4924}, {0x01bd, 0x0009}, {0x01be, 0x0018}, {0x01bf, 0x002a}, {0x01c0, 0x004c}, {0x01c1, 0x0097}, {0x01c2, 0x01c3}, {0x01c3, 0x03e9}, {0x01c4, 0x1389}, {0x01c5, 0xc351}, {0x01c6, 0x02a0}, {0x01c7, 0x0b0f}, {0x01c8, 0x402f}, {0x01c9, 0x0702}, {0x01ca, 0x0000}, {0x01cb, 0x0000}, {0x01cc, 0x5757}, {0x01cd, 0x5757}, {0x01ce, 0x5757}, {0x01cf, 0x5757}, {0x01d0, 0x5757}, {0x01d1, 0x5757}, {0x01d2, 0x5757}, {0x01d3, 0x5757}, {0x01d4, 0x5757}, {0x01d5, 0x5757}, {0x01d6, 0x0000}, {0x01d7, 0x0000}, {0x01d8, 0x0162}, {0x01d9, 0x0007}, {0x01da, 0x0000}, {0x01db, 0x0004}, {0x01dc, 0x0000}, {0x01de, 0x7c00}, {0x01df, 0x0020}, {0x01e0, 0x04c1}, {0x01e1, 0x0000}, {0x01e2, 0x0000}, {0x01e3, 0x0000}, {0x01e4, 0x0000}, {0x01e5, 0x0000}, {0x01e6, 0x0001}, {0x01e7, 0x0000}, {0x01e8, 0x0000}, {0x01eb, 0x0000}, {0x01ec, 0x0000}, {0x01ed, 0x0000}, {0x01ee, 0x0000}, {0x01ef, 0x0000}, {0x01f0, 0x0000}, {0x01f1, 0x0000}, {0x01f2, 0x0000}, {0x01f3, 0x0000}, {0x01f4, 0x0000}, {0x0210, 0x6297}, {0x0211, 0xa004}, {0x0212, 0x0365}, {0x0213, 0xf7ff}, {0x0214, 0xf24c}, {0x0215, 0x0102}, {0x0216, 0x00a3}, {0x0217, 0x0048}, {0x0218, 0xa2c0}, {0x0219, 0x0400}, {0x021a, 0x00c8}, {0x021b, 0x00c0}, {0x021c, 0x0000}, {0x021d, 0x024c}, {0x02fa, 0x0000}, {0x02fb, 0x0000}, {0x02fc, 0x0000}, {0x03fe, 0x0000}, {0x03ff, 0x0000}, {0x0500, 0x0000}, {0x0600, 0x0000}, {0x0610, 0x6666}, {0x0611, 0xa9aa}, {0x0620, 0x6666}, {0x0621, 0xa9aa}, {0x0630, 0x6666}, {0x0631, 0xa9aa}, {0x0640, 0x6666}, {0x0641, 0xa9aa}, {0x07fa, 0x0000}, {0x08fa, 0x0000}, {0x08fb, 0x0000}, {0x0d00, 0x0000}, {0x1100, 0x0000}, {0x1101, 0x0000}, {0x1102, 0x0000}, {0x1103, 0x0000}, {0x1104, 0x0000}, {0x1105, 0x0000}, {0x1106, 0x0000}, {0x1107, 0x0000}, {0x1108, 0x0000}, {0x1109, 0x0000}, {0x110a, 0x0000}, {0x110b, 0x0000}, {0x110c, 0x0000}, {0x1111, 0x0000}, {0x1112, 0x0000}, {0x1113, 0x0000}, {0x1114, 0x0000}, {0x1115, 0x0000}, {0x1116, 0x0000}, {0x1117, 0x0000}, {0x1118, 0x0000}, {0x1119, 0x0000}, {0x111a, 0x0000}, {0x111b, 0x0000}, {0x111c, 0x0000}, {0x1401, 0x0404}, {0x1402, 0x0007}, {0x1403, 0x0365}, {0x1404, 0x0210}, {0x1405, 0x0365}, {0x1406, 0x0210}, {0x1407, 0x0000}, {0x1408, 0x0000}, {0x1409, 0x0000}, {0x140a, 0x0000}, {0x140b, 0x0000}, {0x140c, 0x0000}, {0x140d, 0x0000}, {0x140e, 0x0000}, {0x140f, 0x0000}, {0x1410, 0x0000}, {0x1411, 0x0000}, {0x1801, 0x0004}, {0x1802, 0x0000}, {0x1803, 0x0000}, {0x1804, 0x0000}, {0x1805, 0x00ff}, {0x2c00, 0x0000}, {0x3400, 0x0200}, {0x3404, 0x0000}, {0x3405, 0x0000}, {0x3406, 0x0000}, {0x3407, 0x0000}, {0x3408, 0x0000}, {0x3409, 0x0000}, {0x340a, 0x0000}, {0x340b, 0x0000}, {0x340c, 0x0000}, {0x340d, 0x0000}, {0x340e, 0x0000}, {0x340f, 0x0000}, {0x3410, 0x0000}, {0x3411, 0x0000}, {0x3412, 0x0000}, {0x3413, 0x0000}, {0x3414, 0x0000}, {0x3415, 0x0000}, {0x3424, 0x0000}, {0x3425, 0x0000}, {0x3426, 0x0000}, {0x3427, 0x0000}, {0x3428, 0x0000}, {0x3429, 0x0000}, {0x342a, 0x0000}, {0x342b, 0x0000}, {0x342c, 0x0000}, {0x342d, 0x0000}, {0x342e, 0x0000}, {0x342f, 0x0000}, {0x3430, 0x0000}, {0x3431, 0x0000}, {0x3432, 0x0000}, {0x3433, 0x0000}, {0x3434, 0x0000}, {0x3435, 0x0000}, {0x3440, 0x6319}, {0x3441, 0x3771}, {0x3500, 0x0002}, {0x3501, 0x5728}, {0x3b00, 0x3010}, {0x3b01, 0x3300}, {0x3b02, 0x2200}, {0x3b03, 0x0100}, }; static bool rt5682s_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case RT5682S_RESET: case RT5682S_CBJ_CTRL_2: case RT5682S_I2S1_F_DIV_CTRL_2: case RT5682S_I2S2_F_DIV_CTRL_2: case RT5682S_INT_ST_1: case RT5682S_GPIO_ST: case RT5682S_IL_CMD_1: case RT5682S_4BTN_IL_CMD_1: case RT5682S_AJD1_CTRL: case RT5682S_VERSION_ID...RT5682S_DEVICE_ID: case RT5682S_STO_NG2_CTRL_1: case RT5682S_STO_NG2_CTRL_5...RT5682S_STO_NG2_CTRL_7: case RT5682S_STO1_DAC_SIL_DET: case RT5682S_HP_IMP_SENS_CTRL_1...RT5682S_HP_IMP_SENS_CTRL_4: case RT5682S_HP_IMP_SENS_CTRL_13: case RT5682S_HP_IMP_SENS_CTRL_14: case RT5682S_HP_IMP_SENS_CTRL_43...RT5682S_HP_IMP_SENS_CTRL_46: case RT5682S_HP_CALIB_CTRL_1: case RT5682S_HP_CALIB_CTRL_10: case RT5682S_HP_CALIB_ST_1...RT5682S_HP_CALIB_ST_11: case RT5682S_SAR_IL_CMD_2...RT5682S_SAR_IL_CMD_5: case RT5682S_SAR_IL_CMD_10: case RT5682S_SAR_IL_CMD_11: case RT5682S_VERSION_ID_HIDE: case RT5682S_VERSION_ID_CUS: case RT5682S_I2C_TRANS_CTRL: case RT5682S_DMIC_FLOAT_DET: case RT5682S_HA_CMP_OP_1: case RT5682S_NEW_CBJ_DET_CTL_10...RT5682S_NEW_CBJ_DET_CTL_16: case RT5682S_CLK_SW_TEST_1: case RT5682S_CLK_SW_TEST_2: case RT5682S_EFUSE_READ_1...RT5682S_EFUSE_READ_18: case RT5682S_PILOT_DIG_CTL_1: return true; default: return false; } } static bool rt5682s_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case RT5682S_RESET: case RT5682S_VERSION_ID: case RT5682S_VENDOR_ID: case RT5682S_DEVICE_ID: case RT5682S_HP_CTRL_1: case RT5682S_HP_CTRL_2: case RT5682S_HPL_GAIN: case RT5682S_HPR_GAIN: case RT5682S_I2C_CTRL: case RT5682S_CBJ_BST_CTRL: case RT5682S_CBJ_DET_CTRL: case RT5682S_CBJ_CTRL_1...RT5682S_CBJ_CTRL_8: case RT5682S_DAC1_DIG_VOL: case RT5682S_STO1_ADC_DIG_VOL: case RT5682S_STO1_ADC_BOOST: case RT5682S_HP_IMP_GAIN_1: case RT5682S_HP_IMP_GAIN_2: case RT5682S_SIDETONE_CTRL: case RT5682S_STO1_ADC_MIXER: case RT5682S_AD_DA_MIXER: case RT5682S_STO1_DAC_MIXER: case RT5682S_A_DAC1_MUX: case RT5682S_DIG_INF2_DATA: case RT5682S_REC_MIXER: case RT5682S_CAL_REC: case RT5682S_HP_ANA_OST_CTRL_1...RT5682S_HP_ANA_OST_CTRL_3: case RT5682S_PWR_DIG_1...RT5682S_PWR_MIXER: case RT5682S_MB_CTRL: case RT5682S_CLK_GATE_TCON_1...RT5682S_CLK_GATE_TCON_3: case RT5682S_CLK_DET...RT5682S_LPF_AD_DMIC: case RT5682S_I2S1_SDP: case RT5682S_I2S2_SDP: case RT5682S_ADDA_CLK_1: case RT5682S_ADDA_CLK_2: case RT5682S_I2S1_F_DIV_CTRL_1: case RT5682S_I2S1_F_DIV_CTRL_2: case RT5682S_TDM_CTRL: case RT5682S_TDM_ADDA_CTRL_1: case RT5682S_TDM_ADDA_CTRL_2: case RT5682S_DATA_SEL_CTRL_1: case RT5682S_TDM_TCON_CTRL_1: case RT5682S_TDM_TCON_CTRL_2: case RT5682S_GLB_CLK: case RT5682S_PLL_TRACK_1...RT5682S_PLL_TRACK_6: case RT5682S_PLL_TRACK_11: case RT5682S_DEPOP_1: case RT5682S_HP_CHARGE_PUMP_1: case RT5682S_HP_CHARGE_PUMP_2: case RT5682S_HP_CHARGE_PUMP_3: case RT5682S_MICBIAS_1...RT5682S_MICBIAS_3: case RT5682S_PLL_TRACK_12...RT5682S_PLL_CTRL_7: case RT5682S_RC_CLK_CTRL: case RT5682S_I2S2_M_CLK_CTRL_1: case RT5682S_I2S2_F_DIV_CTRL_1: case RT5682S_I2S2_F_DIV_CTRL_2: case RT5682S_IRQ_CTRL_1...RT5682S_IRQ_CTRL_4: case RT5682S_INT_ST_1: case RT5682S_GPIO_CTRL_1: case RT5682S_GPIO_CTRL_2: case RT5682S_GPIO_ST: case RT5682S_HP_AMP_DET_CTRL_1: case RT5682S_MID_HP_AMP_DET: case RT5682S_LOW_HP_AMP_DET: case RT5682S_DELAY_BUF_CTRL: case RT5682S_SV_ZCD_1: case RT5682S_SV_ZCD_2: case RT5682S_IL_CMD_1...RT5682S_IL_CMD_6: case RT5682S_4BTN_IL_CMD_1...RT5682S_4BTN_IL_CMD_7: case RT5682S_ADC_STO1_HP_CTRL_1: case RT5682S_ADC_STO1_HP_CTRL_2: case RT5682S_AJD1_CTRL: case RT5682S_JD_CTRL_1: case RT5682S_DUMMY_1...RT5682S_DUMMY_3: case RT5682S_DAC_ADC_DIG_VOL1: case RT5682S_BIAS_CUR_CTRL_2...RT5682S_BIAS_CUR_CTRL_10: case RT5682S_VREF_REC_OP_FB_CAP_CTRL_1: case RT5682S_VREF_REC_OP_FB_CAP_CTRL_2: case RT5682S_CHARGE_PUMP_1: case RT5682S_DIG_IN_CTRL_1: case RT5682S_PAD_DRIVING_CTRL: case RT5682S_CHOP_DAC_1: case RT5682S_CHOP_DAC_2: case RT5682S_CHOP_ADC: case RT5682S_CALIB_ADC_CTRL: case RT5682S_VOL_TEST: case RT5682S_SPKVDD_DET_ST: case RT5682S_TEST_MODE_CTRL_1...RT5682S_TEST_MODE_CTRL_4: case RT5682S_PLL_INTERNAL_1...RT5682S_PLL_INTERNAL_4: case RT5682S_STO_NG2_CTRL_1...RT5682S_STO_NG2_CTRL_10: case RT5682S_STO1_DAC_SIL_DET: case RT5682S_SIL_PSV_CTRL1: case RT5682S_SIL_PSV_CTRL2: case RT5682S_SIL_PSV_CTRL3: case RT5682S_SIL_PSV_CTRL4: case RT5682S_SIL_PSV_CTRL5: case RT5682S_HP_IMP_SENS_CTRL_1...RT5682S_HP_IMP_SENS_CTRL_46: case RT5682S_HP_LOGIC_CTRL_1...RT5682S_HP_LOGIC_CTRL_3: case RT5682S_HP_CALIB_CTRL_1...RT5682S_HP_CALIB_CTRL_11: case RT5682S_HP_CALIB_ST_1...RT5682S_HP_CALIB_ST_11: case RT5682S_SAR_IL_CMD_1...RT5682S_SAR_IL_CMD_14: case RT5682S_DUMMY_4...RT5682S_DUMMY_6: case RT5682S_VERSION_ID_HIDE: case RT5682S_VERSION_ID_CUS: case RT5682S_SCAN_CTL: case RT5682S_HP_AMP_DET: case RT5682S_BIAS_CUR_CTRL_11: case RT5682S_BIAS_CUR_CTRL_12: case RT5682S_BIAS_CUR_CTRL_13: case RT5682S_BIAS_CUR_CTRL_14: case RT5682S_BIAS_CUR_CTRL_15: case RT5682S_BIAS_CUR_CTRL_16: case RT5682S_BIAS_CUR_CTRL_17: case RT5682S_BIAS_CUR_CTRL_18: case RT5682S_I2C_TRANS_CTRL: case RT5682S_DUMMY_7: case RT5682S_DUMMY_8: case RT5682S_DMIC_FLOAT_DET: case RT5682S_HA_CMP_OP_1...RT5682S_HA_CMP_OP_13: case RT5682S_HA_CMP_OP_14...RT5682S_HA_CMP_OP_25: case RT5682S_NEW_CBJ_DET_CTL_1...RT5682S_NEW_CBJ_DET_CTL_16: case RT5682S_DA_FILTER_1...RT5682S_DA_FILTER_5: case RT5682S_CLK_SW_TEST_1: case RT5682S_CLK_SW_TEST_2: case RT5682S_CLK_SW_TEST_3...RT5682S_CLK_SW_TEST_14: case RT5682S_EFUSE_MANU_WRITE_1...RT5682S_EFUSE_MANU_WRITE_6: case RT5682S_EFUSE_READ_1...RT5682S_EFUSE_READ_18: case RT5682S_EFUSE_TIMING_CTL_1: case RT5682S_EFUSE_TIMING_CTL_2: case RT5682S_PILOT_DIG_CTL_1: case RT5682S_PILOT_DIG_CTL_2: case RT5682S_HP_AMP_DET_CTL_1...RT5682S_HP_AMP_DET_CTL_4: return true; default: return false; } } static void rt5682s_reset(struct rt5682s_priv *rt5682s) { regmap_write(rt5682s->regmap, RT5682S_RESET, 0); } static int rt5682s_button_detect(struct snd_soc_component *component) { int btn_type, val; val = snd_soc_component_read(component, RT5682S_4BTN_IL_CMD_1); btn_type = val & 0xfff0; snd_soc_component_write(component, RT5682S_4BTN_IL_CMD_1, val); dev_dbg(component->dev, "%s btn_type=%x\n", __func__, btn_type); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_2, RT5682S_SAR_ADC_PSV_MASK, RT5682S_SAR_ADC_PSV_ENTRY); return btn_type; } enum { SAR_PWR_OFF, SAR_PWR_NORMAL, SAR_PWR_SAVING, }; static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); mutex_lock(&rt5682s->sar_mutex); switch (mode) { case SAR_PWR_SAVING: snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_DIS); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK, RT5682S_CTRL_MB1_REG | RT5682S_CTRL_MB2_REG); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); usleep_range(5000, 5500); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK, RT5682S_SAR_BUTDET_EN); usleep_range(5000, 5500); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_2, RT5682S_SAR_ADC_PSV_MASK, RT5682S_SAR_ADC_PSV_ENTRY); break; case SAR_PWR_NORMAL: snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_EN); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK, RT5682S_CTRL_MB1_FSM | RT5682S_CTRL_MB2_FSM); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_SEL_MB1_2_AUTO); usleep_range(5000, 5500); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK, RT5682S_SAR_BUTDET_EN | RT5682S_SAR_BUTDET_POW_NORM); break; case SAR_PWR_OFF: snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK, RT5682S_CTRL_MB1_FSM | RT5682S_CTRL_MB2_FSM); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); break; default: dev_err(component->dev, "Invalid SAR Power mode: %d\n", mode); break; } mutex_unlock(&rt5682s->sar_mutex); } static void rt5682s_enable_push_button_irq(struct snd_soc_component *component) { snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_13, RT5682S_SAR_SOUR_MASK, RT5682S_SAR_SOUR_BTN); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_EN | RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_AUTO); snd_soc_component_write(component, RT5682S_IL_CMD_1, 0x0040); snd_soc_component_update_bits(component, RT5682S_4BTN_IL_CMD_2, RT5682S_4BTN_IL_MASK | RT5682S_4BTN_IL_RST_MASK, RT5682S_4BTN_IL_EN | RT5682S_4BTN_IL_NOR); snd_soc_component_update_bits(component, RT5682S_IRQ_CTRL_3, RT5682S_IL_IRQ_MASK, RT5682S_IL_IRQ_EN); } static void rt5682s_disable_push_button_irq(struct snd_soc_component *component) { snd_soc_component_update_bits(component, RT5682S_IRQ_CTRL_3, RT5682S_IL_IRQ_MASK, RT5682S_IL_IRQ_DIS); snd_soc_component_update_bits(component, RT5682S_4BTN_IL_CMD_2, RT5682S_4BTN_IL_MASK, RT5682S_4BTN_IL_DIS); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_13, RT5682S_SAR_SOUR_MASK, RT5682S_SAR_SOUR_TYPE); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); } /** * rt5682s_headset_detect - Detect headset. * @component: SoC audio component device. * @jack_insert: Jack insert or not. * * Detect whether is headset or not when jack inserted. * * Returns detect status. */ static int rt5682s_headset_detect(struct snd_soc_component *component, int jack_insert) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int val, count; int jack_type = 0; if (jack_insert) { rt5682s_disable_push_button_irq(component); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF1 | RT5682S_PWR_VREF2 | RT5682S_PWR_MB, RT5682S_PWR_VREF1 | RT5682S_PWR_VREF2 | RT5682S_PWR_MB); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_FV1 | RT5682S_PWR_FV2, 0); usleep_range(15000, 20000); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_FV1 | RT5682S_PWR_FV2, RT5682S_PWR_FV1 | RT5682S_PWR_FV2); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_3, RT5682S_PWR_CBJ, RT5682S_PWR_CBJ); snd_soc_component_write(component, RT5682S_SAR_IL_CMD_3, 0x0365); snd_soc_component_update_bits(component, RT5682S_HP_CHARGE_PUMP_2, RT5682S_OSW_L_MASK | RT5682S_OSW_R_MASK, RT5682S_OSW_L_DIS | RT5682S_OSW_R_DIS); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_13, RT5682S_SAR_SOUR_MASK, RT5682S_SAR_SOUR_TYPE); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_EN); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_TRIG_JD_MASK, RT5682S_TRIG_JD_LOW); usleep_range(45000, 50000); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_TRIG_JD_MASK, RT5682S_TRIG_JD_HIGH); count = 0; do { usleep_range(10000, 15000); val = snd_soc_component_read(component, RT5682S_CBJ_CTRL_2) & RT5682S_JACK_TYPE_MASK; count++; } while (val == 0 && count < 50); dev_dbg(component->dev, "%s, val=%d, count=%d\n", __func__, val, count); switch (val) { case 0x1: case 0x2: jack_type = SND_JACK_HEADSET; snd_soc_component_write(component, RT5682S_SAR_IL_CMD_3, 0x024c); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_EN); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_SEL_MB1_2_MASK, val << RT5682S_SAR_SEL_MB1_2_SFT); rt5682s_enable_push_button_irq(component); rt5682s_sar_power_mode(component, SAR_PWR_SAVING); break; default: jack_type = SND_JACK_HEADPHONE; break; } snd_soc_component_update_bits(component, RT5682S_HP_CHARGE_PUMP_2, RT5682S_OSW_L_MASK | RT5682S_OSW_R_MASK, RT5682S_OSW_L_EN | RT5682S_OSW_R_EN); usleep_range(35000, 40000); } else { rt5682s_sar_power_mode(component, SAR_PWR_OFF); rt5682s_disable_push_button_irq(component); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_TRIG_JD_MASK, RT5682S_TRIG_JD_LOW); if (!rt5682s->wclk_enabled) { snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF2 | RT5682S_PWR_MB, 0); } snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_3, RT5682S_PWR_CBJ, 0); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_DIS); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_DIS); jack_type = 0; } dev_dbg(component->dev, "jack_type = %d\n", jack_type); return jack_type; } static void rt5682s_jack_detect_handler(struct work_struct *work) { struct rt5682s_priv *rt5682s = container_of(work, struct rt5682s_priv, jack_detect_work.work); struct snd_soc_dapm_context *dapm; int val, btn_type; if (!rt5682s->component || !snd_soc_card_is_instantiated(rt5682s->component->card)) { /* card not yet ready, try later */ mod_delayed_work(system_power_efficient_wq, &rt5682s->jack_detect_work, msecs_to_jiffies(15)); return; } dapm = snd_soc_component_get_dapm(rt5682s->component); snd_soc_dapm_mutex_lock(dapm); mutex_lock(&rt5682s->calibrate_mutex); mutex_lock(&rt5682s->wclk_mutex); val = snd_soc_component_read(rt5682s->component, RT5682S_AJD1_CTRL) & RT5682S_JDH_RS_MASK; if (!val) { /* jack in */ if (rt5682s->jack_type == 0) { /* jack was out, report jack type */ rt5682s->jack_type = rt5682s_headset_detect(rt5682s->component, 1); rt5682s->irq_work_delay_time = 0; } else if ((rt5682s->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) { /* jack is already in, report button event */ rt5682s->jack_type = SND_JACK_HEADSET; btn_type = rt5682s_button_detect(rt5682s->component); /** * rt5682s can report three kinds of button behavior, * one click, double click and hold. However, * currently we will report button pressed/released * event. So all the three button behaviors are * treated as button pressed. */ switch (btn_type) { case 0x8000: case 0x4000: case 0x2000: rt5682s->jack_type |= SND_JACK_BTN_0; break; case 0x1000: case 0x0800: case 0x0400: rt5682s->jack_type |= SND_JACK_BTN_1; break; case 0x0200: case 0x0100: case 0x0080: rt5682s->jack_type |= SND_JACK_BTN_2; break; case 0x0040: case 0x0020: case 0x0010: rt5682s->jack_type |= SND_JACK_BTN_3; break; case 0x0000: /* unpressed */ break; default: dev_err(rt5682s->component->dev, "Unexpected button code 0x%04x\n", btn_type); break; } } } else { /* jack out */ rt5682s->jack_type = rt5682s_headset_detect(rt5682s->component, 0); rt5682s->irq_work_delay_time = 50; } mutex_unlock(&rt5682s->wclk_mutex); mutex_unlock(&rt5682s->calibrate_mutex); snd_soc_dapm_mutex_unlock(dapm); snd_soc_jack_report(rt5682s->hs_jack, rt5682s->jack_type, SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3); if (rt5682s->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3)) schedule_delayed_work(&rt5682s->jd_check_work, 0); else cancel_delayed_work_sync(&rt5682s->jd_check_work); } static void rt5682s_jd_check_handler(struct work_struct *work) { struct rt5682s_priv *rt5682s = container_of(work, struct rt5682s_priv, jd_check_work.work); if (snd_soc_component_read(rt5682s->component, RT5682S_AJD1_CTRL) & RT5682S_JDH_RS_MASK) { /* jack out */ schedule_delayed_work(&rt5682s->jack_detect_work, 0); } else { schedule_delayed_work(&rt5682s->jd_check_work, 500); } } static irqreturn_t rt5682s_irq(int irq, void *data) { struct rt5682s_priv *rt5682s = data; mod_delayed_work(system_power_efficient_wq, &rt5682s->jack_detect_work, msecs_to_jiffies(rt5682s->irq_work_delay_time)); return IRQ_HANDLED; } static int rt5682s_set_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *hs_jack, void *data) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int btndet_delay = 16; rt5682s->hs_jack = hs_jack; if (!hs_jack) { regmap_update_bits(rt5682s->regmap, RT5682S_IRQ_CTRL_2, RT5682S_JD1_EN_MASK, RT5682S_JD1_DIS); regmap_update_bits(rt5682s->regmap, RT5682S_RC_CLK_CTRL, RT5682S_POW_JDH, 0); cancel_delayed_work_sync(&rt5682s->jack_detect_work); return 0; } switch (rt5682s->pdata.jd_src) { case RT5682S_JD1: regmap_update_bits(rt5682s->regmap, RT5682S_CBJ_CTRL_5, RT5682S_JD_FAST_OFF_SRC_MASK, RT5682S_JD_FAST_OFF_SRC_JDH); regmap_update_bits(rt5682s->regmap, RT5682S_CBJ_CTRL_2, RT5682S_EXT_JD_SRC, RT5682S_EXT_JD_SRC_MANUAL); regmap_update_bits(rt5682s->regmap, RT5682S_CBJ_CTRL_1, RT5682S_EMB_JD_MASK | RT5682S_DET_TYPE | RT5682S_POL_FAST_OFF_MASK | RT5682S_MIC_CAP_MASK, RT5682S_EMB_JD_EN | RT5682S_DET_TYPE | RT5682S_POL_FAST_OFF_HIGH | RT5682S_MIC_CAP_HS); regmap_update_bits(rt5682s->regmap, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_POW_MASK, RT5682S_SAR_POW_EN); regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, RT5682S_GP1_PIN_MASK, RT5682S_GP1_PIN_IRQ); regmap_update_bits(rt5682s->regmap, RT5682S_PWR_ANLG_3, RT5682S_PWR_BGLDO, RT5682S_PWR_BGLDO); regmap_update_bits(rt5682s->regmap, RT5682S_PWR_ANLG_2, RT5682S_PWR_JD_MASK, RT5682S_PWR_JD_ENABLE); regmap_update_bits(rt5682s->regmap, RT5682S_RC_CLK_CTRL, RT5682S_POW_IRQ | RT5682S_POW_JDH, RT5682S_POW_IRQ | RT5682S_POW_JDH); regmap_update_bits(rt5682s->regmap, RT5682S_IRQ_CTRL_2, RT5682S_JD1_EN_MASK | RT5682S_JD1_POL_MASK, RT5682S_JD1_EN | RT5682S_JD1_POL_NOR); regmap_update_bits(rt5682s->regmap, RT5682S_4BTN_IL_CMD_4, RT5682S_4BTN_IL_HOLD_WIN_MASK | RT5682S_4BTN_IL_CLICK_WIN_MASK, (btndet_delay << RT5682S_4BTN_IL_HOLD_WIN_SFT | btndet_delay)); regmap_update_bits(rt5682s->regmap, RT5682S_4BTN_IL_CMD_5, RT5682S_4BTN_IL_HOLD_WIN_MASK | RT5682S_4BTN_IL_CLICK_WIN_MASK, (btndet_delay << RT5682S_4BTN_IL_HOLD_WIN_SFT | btndet_delay)); regmap_update_bits(rt5682s->regmap, RT5682S_4BTN_IL_CMD_6, RT5682S_4BTN_IL_HOLD_WIN_MASK | RT5682S_4BTN_IL_CLICK_WIN_MASK, (btndet_delay << RT5682S_4BTN_IL_HOLD_WIN_SFT | btndet_delay)); regmap_update_bits(rt5682s->regmap, RT5682S_4BTN_IL_CMD_7, RT5682S_4BTN_IL_HOLD_WIN_MASK | RT5682S_4BTN_IL_CLICK_WIN_MASK, (btndet_delay << RT5682S_4BTN_IL_HOLD_WIN_SFT | btndet_delay)); mod_delayed_work(system_power_efficient_wq, &rt5682s->jack_detect_work, msecs_to_jiffies(250)); break; case RT5682S_JD_NULL: regmap_update_bits(rt5682s->regmap, RT5682S_IRQ_CTRL_2, RT5682S_JD1_EN_MASK, RT5682S_JD1_DIS); regmap_update_bits(rt5682s->regmap, RT5682S_RC_CLK_CTRL, RT5682S_POW_JDH, 0); break; default: dev_warn(component->dev, "Wrong JD source\n"); break; } return 0; } static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9562, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); static const DECLARE_TLV_DB_SCALE(cbj_bst_tlv, -1200, 150, 0); static const struct snd_kcontrol_new rt5682s_snd_controls[] = { /* DAC Digital Volume */ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682S_DAC1_DIG_VOL, RT5682S_L_VOL_SFT + 1, RT5682S_R_VOL_SFT + 1, 127, 0, dac_vol_tlv), /* CBJ Boost Volume */ SOC_SINGLE_TLV("CBJ Boost Volume", RT5682S_REC_MIXER, RT5682S_BST_CBJ_SFT, 35, 0, cbj_bst_tlv), /* ADC Digital Volume Control */ SOC_DOUBLE("STO1 ADC Capture Switch", RT5682S_STO1_ADC_DIG_VOL, RT5682S_L_MUTE_SFT, RT5682S_R_MUTE_SFT, 1, 1), SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682S_STO1_ADC_DIG_VOL, RT5682S_L_VOL_SFT + 1, RT5682S_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682S_STO1_ADC_BOOST, RT5682S_STO1_ADC_L_BST_SFT, RT5682S_STO1_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), }; /** * rt5682s_sel_asrc_clk_src - select ASRC clock source for a set of filters * @component: SoC audio component device. * @filter_mask: mask of filters. * @clk_src: clock source * * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5682S can * only support standard 32fs or 64fs i2s format, ASRC should be enabled to * support special i2s clock format such as Intel's 100fs(100 * sampling rate). * ASRC function will track i2s clock and generate a corresponding system clock * for codec. This function provides an API to select the clock source for a * set of filters specified by the mask. And the component driver will turn on * ASRC for these filters if ASRC is selected as their clock source. */ int rt5682s_sel_asrc_clk_src(struct snd_soc_component *component, unsigned int filter_mask, unsigned int clk_src) { switch (clk_src) { case RT5682S_CLK_SEL_SYS: case RT5682S_CLK_SEL_I2S1_ASRC: case RT5682S_CLK_SEL_I2S2_ASRC: break; default: return -EINVAL; } if (filter_mask & RT5682S_DA_STEREO1_FILTER) { snd_soc_component_update_bits(component, RT5682S_PLL_TRACK_2, RT5682S_FILTER_CLK_SEL_MASK, clk_src << RT5682S_FILTER_CLK_SEL_SFT); } if (filter_mask & RT5682S_AD_STEREO1_FILTER) { snd_soc_component_update_bits(component, RT5682S_PLL_TRACK_3, RT5682S_FILTER_CLK_SEL_MASK, clk_src << RT5682S_FILTER_CLK_SEL_SFT); } snd_soc_component_update_bits(component, RT5682S_PLL_TRACK_11, RT5682S_ASRCIN_AUTO_CLKOUT_MASK, RT5682S_ASRCIN_AUTO_CLKOUT_EN); return 0; } EXPORT_SYMBOL_GPL(rt5682s_sel_asrc_clk_src); static int rt5682s_div_sel(struct rt5682s_priv *rt5682s, int target, const int div[], int size) { int i; if (rt5682s->sysclk < target) { dev_err(rt5682s->component->dev, "sysclk rate %d is too low\n", rt5682s->sysclk); return 0; } for (i = 0; i < size - 1; i++) { dev_dbg(rt5682s->component->dev, "div[%d]=%d\n", i, div[i]); if (target * div[i] == rt5682s->sysclk) return i; if (target * div[i + 1] > rt5682s->sysclk) { dev_dbg(rt5682s->component->dev, "can't find div for sysclk %d\n", rt5682s->sysclk); return i; } } if (target * div[i] < rt5682s->sysclk) dev_err(rt5682s->component->dev, "sysclk rate %d is too high\n", rt5682s->sysclk); return size - 1; } static int get_clk_info(int sclk, int rate) { int i; static const int pd[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48}; if (sclk <= 0 || rate <= 0) return -EINVAL; rate = rate << 8; for (i = 0; i < ARRAY_SIZE(pd); i++) if (sclk == rate * pd[i]) return i; return -EINVAL; } /** * set_dmic_clk - Set parameter of dmic. * * @w: DAPM widget. * @kcontrol: The kcontrol of this widget. * @event: Event id. * * Choose dmic clock between 1MHz and 3MHz. * It is better for clock to approximate 3MHz. */ static int set_dmic_clk(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int idx, dmic_clk_rate = 3072000; static const int div[] = {2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128}; if (rt5682s->pdata.dmic_clk_rate) dmic_clk_rate = rt5682s->pdata.dmic_clk_rate; idx = rt5682s_div_sel(rt5682s, dmic_clk_rate, div, ARRAY_SIZE(div)); snd_soc_component_update_bits(component, RT5682S_DMIC_CTRL_1, RT5682S_DMIC_CLK_MASK, idx << RT5682S_DMIC_CLK_SFT); return 0; } static int rt5682s_set_pllb_power(struct rt5682s_priv *rt5682s, int on) { struct snd_soc_component *component = rt5682s->component; if (on) { snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_PLLB | RT5682S_PWR_BIAS_PLLB | RT5682S_PWR_PLLB, RT5682S_PWR_LDO_PLLB | RT5682S_PWR_BIAS_PLLB | RT5682S_PWR_PLLB); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_3, RT5682S_RSTB_PLLB, RT5682S_RSTB_PLLB); } else { snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_PLLB | RT5682S_PWR_BIAS_PLLB | RT5682S_RSTB_PLLB | RT5682S_PWR_PLLB, 0); } return 0; } static int set_pllb_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int on = 0; if (rt5682s->wclk_enabled) return 0; if (SND_SOC_DAPM_EVENT_ON(event)) on = 1; rt5682s_set_pllb_power(rt5682s, on); return 0; } static void rt5682s_set_filter_clk(struct rt5682s_priv *rt5682s, int reg, int ref) { struct snd_soc_component *component = rt5682s->component; int idx; static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48}; static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48}; idx = rt5682s_div_sel(rt5682s, ref, div_f, ARRAY_SIZE(div_f)); snd_soc_component_update_bits(component, reg, RT5682S_FILTER_CLK_DIV_MASK, idx << RT5682S_FILTER_CLK_DIV_SFT); /* select over sample rate */ for (idx = 0; idx < ARRAY_SIZE(div_o); idx++) { if (rt5682s->sysclk <= 12288000 * div_o[idx]) break; } snd_soc_component_update_bits(component, RT5682S_ADDA_CLK_1, RT5682S_ADC_OSR_MASK | RT5682S_DAC_OSR_MASK, (idx << RT5682S_ADC_OSR_SFT) | (idx << RT5682S_DAC_OSR_SFT)); } static int set_filter_clk(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int ref, reg, val; val = snd_soc_component_read(component, RT5682S_GPIO_CTRL_1) & RT5682S_GP4_PIN_MASK; if (w->shift == RT5682S_PWR_ADC_S1F_BIT && val == RT5682S_GP4_PIN_ADCDAT2) ref = 256 * rt5682s->lrck[RT5682S_AIF2]; else ref = 256 * rt5682s->lrck[RT5682S_AIF1]; if (w->shift == RT5682S_PWR_ADC_S1F_BIT) reg = RT5682S_PLL_TRACK_3; else reg = RT5682S_PLL_TRACK_2; rt5682s_set_filter_clk(rt5682s, reg, ref); return 0; } static int set_dmic_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int delay = 50, val; if (rt5682s->pdata.dmic_delay) delay = rt5682s->pdata.dmic_delay; switch (event) { case SND_SOC_DAPM_POST_PMU: val = (snd_soc_component_read(component, RT5682S_GLB_CLK) & RT5682S_SCLK_SRC_MASK) >> RT5682S_SCLK_SRC_SFT; if (val == RT5682S_CLK_SRC_PLL1 || val == RT5682S_CLK_SRC_PLL2) snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF2 | RT5682S_PWR_MB, RT5682S_PWR_VREF2 | RT5682S_PWR_MB); /*Add delay to avoid pop noise*/ msleep(delay); break; case SND_SOC_DAPM_POST_PMD: if (!rt5682s->jack_type && !rt5682s->wclk_enabled) { snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF2 | RT5682S_PWR_MB, 0); } break; } return 0; } static void rt5682s_set_i2s(struct rt5682s_priv *rt5682s, int id, int on) { struct snd_soc_component *component = rt5682s->component; int pre_div; unsigned int p_reg, p_mask, p_sft; unsigned int c_reg, c_mask, c_sft; if (id == RT5682S_AIF1) { c_reg = RT5682S_ADDA_CLK_1; c_mask = RT5682S_I2S_M_D_MASK; c_sft = RT5682S_I2S_M_D_SFT; p_reg = RT5682S_PWR_DIG_1; p_mask = RT5682S_PWR_I2S1; p_sft = RT5682S_PWR_I2S1_BIT; } else { c_reg = RT5682S_I2S2_M_CLK_CTRL_1; c_mask = RT5682S_I2S2_M_D_MASK; c_sft = RT5682S_I2S2_M_D_SFT; p_reg = RT5682S_PWR_DIG_1; p_mask = RT5682S_PWR_I2S2; p_sft = RT5682S_PWR_I2S2_BIT; } if (on && rt5682s->master[id]) { pre_div = get_clk_info(rt5682s->sysclk, rt5682s->lrck[id]); if (pre_div < 0) { dev_err(component->dev, "get pre_div failed\n"); return; } dev_dbg(component->dev, "lrck is %dHz and pre_div is %d for iis %d master\n", rt5682s->lrck[id], pre_div, id); snd_soc_component_update_bits(component, c_reg, c_mask, pre_div << c_sft); } snd_soc_component_update_bits(component, p_reg, p_mask, on << p_sft); } static int set_i2s_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int on = 0; if (SND_SOC_DAPM_EVENT_ON(event)) on = 1; if (!snd_soc_dapm_widget_name_cmp(w, "I2S1") && !rt5682s->wclk_enabled) rt5682s_set_i2s(rt5682s, RT5682S_AIF1, on); else if (!snd_soc_dapm_widget_name_cmp(w, "I2S2")) rt5682s_set_i2s(rt5682s, RT5682S_AIF2, on); return 0; } static int is_sys_clk_from_plla(struct snd_soc_dapm_widget *w, struct snd_soc_dapm_widget *sink) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); if ((rt5682s->sysclk_src == RT5682S_CLK_SRC_PLL1) || (rt5682s->sysclk_src == RT5682S_CLK_SRC_PLL2 && rt5682s->pll_comb == USE_PLLAB)) return 1; return 0; } static int is_sys_clk_from_pllb(struct snd_soc_dapm_widget *w, struct snd_soc_dapm_widget *sink) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); if (rt5682s->sysclk_src == RT5682S_CLK_SRC_PLL2) return 1; return 0; } static int is_using_asrc(struct snd_soc_dapm_widget *w, struct snd_soc_dapm_widget *sink) { unsigned int reg, sft, val; struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); switch (w->shift) { case RT5682S_ADC_STO1_ASRC_SFT: reg = RT5682S_PLL_TRACK_3; sft = RT5682S_FILTER_CLK_SEL_SFT; break; case RT5682S_DAC_STO1_ASRC_SFT: reg = RT5682S_PLL_TRACK_2; sft = RT5682S_FILTER_CLK_SEL_SFT; break; default: return 0; } val = (snd_soc_component_read(component, reg) >> sft) & 0xf; switch (val) { case RT5682S_CLK_SEL_I2S1_ASRC: case RT5682S_CLK_SEL_I2S2_ASRC: return 1; default: return 0; } } static int rt5682s_hp_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_component_update_bits(component, RT5682S_DEPOP_1, RT5682S_OUT_HP_L_EN | RT5682S_OUT_HP_R_EN, RT5682S_OUT_HP_L_EN | RT5682S_OUT_HP_R_EN); usleep_range(15000, 20000); snd_soc_component_update_bits(component, RT5682S_DEPOP_1, RT5682S_LDO_PUMP_EN | RT5682S_PUMP_EN | RT5682S_CAPLESS_L_EN | RT5682S_CAPLESS_R_EN, RT5682S_LDO_PUMP_EN | RT5682S_PUMP_EN | RT5682S_CAPLESS_L_EN | RT5682S_CAPLESS_R_EN); snd_soc_component_write(component, RT5682S_BIAS_CUR_CTRL_11, 0x6666); snd_soc_component_write(component, RT5682S_BIAS_CUR_CTRL_12, 0xa82a); snd_soc_component_update_bits(component, RT5682S_HP_CTRL_2, RT5682S_HPO_L_PATH_MASK | RT5682S_HPO_R_PATH_MASK | RT5682S_HPO_SEL_IP_EN_SW, RT5682S_HPO_L_PATH_EN | RT5682S_HPO_R_PATH_EN | RT5682S_HPO_IP_EN_GATING); usleep_range(5000, 10000); snd_soc_component_update_bits(component, RT5682S_HP_AMP_DET_CTL_1, RT5682S_CP_SW_SIZE_MASK, RT5682S_CP_SW_SIZE_L | RT5682S_CP_SW_SIZE_S); break; case SND_SOC_DAPM_POST_PMD: snd_soc_component_update_bits(component, RT5682S_HP_CTRL_2, RT5682S_HPO_L_PATH_MASK | RT5682S_HPO_R_PATH_MASK | RT5682S_HPO_SEL_IP_EN_SW, 0); snd_soc_component_update_bits(component, RT5682S_HP_AMP_DET_CTL_1, RT5682S_CP_SW_SIZE_MASK, RT5682S_CP_SW_SIZE_M); snd_soc_component_update_bits(component, RT5682S_DEPOP_1, RT5682S_LDO_PUMP_EN | RT5682S_PUMP_EN | RT5682S_CAPLESS_L_EN | RT5682S_CAPLESS_R_EN, 0); snd_soc_component_update_bits(component, RT5682S_DEPOP_1, RT5682S_OUT_HP_L_EN | RT5682S_OUT_HP_R_EN, 0); break; } return 0; } static int rt5682s_stereo1_adc_mixl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int delay = 0; if (rt5682s->pdata.amic_delay) delay = rt5682s->pdata.amic_delay; switch (event) { case SND_SOC_DAPM_POST_PMU: msleep(delay); snd_soc_component_update_bits(component, RT5682S_STO1_ADC_DIG_VOL, RT5682S_L_MUTE, 0); break; case SND_SOC_DAPM_PRE_PMD: snd_soc_component_update_bits(component, RT5682S_STO1_ADC_DIG_VOL, RT5682S_L_MUTE, RT5682S_L_MUTE); break; } return 0; } static int sar_power_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); if ((rt5682s->jack_type & SND_JACK_HEADSET) != SND_JACK_HEADSET) return 0; switch (event) { case SND_SOC_DAPM_PRE_PMU: rt5682s_sar_power_mode(component, SAR_PWR_NORMAL); break; case SND_SOC_DAPM_POST_PMD: rt5682s_sar_power_mode(component, SAR_PWR_SAVING); break; } return 0; } /* Interface data select */ static const char * const rt5682s_data_select[] = { "L/R", "R/L", "L/L", "R/R" }; static SOC_ENUM_SINGLE_DECL(rt5682s_if2_adc_enum, RT5682S_DIG_INF2_DATA, RT5682S_IF2_ADC_SEL_SFT, rt5682s_data_select); static SOC_ENUM_SINGLE_DECL(rt5682s_if1_01_adc_enum, RT5682S_TDM_ADDA_CTRL_1, RT5682S_IF1_ADC1_SEL_SFT, rt5682s_data_select); static SOC_ENUM_SINGLE_DECL(rt5682s_if1_23_adc_enum, RT5682S_TDM_ADDA_CTRL_1, RT5682S_IF1_ADC2_SEL_SFT, rt5682s_data_select); static SOC_ENUM_SINGLE_DECL(rt5682s_if1_45_adc_enum, RT5682S_TDM_ADDA_CTRL_1, RT5682S_IF1_ADC3_SEL_SFT, rt5682s_data_select); static SOC_ENUM_SINGLE_DECL(rt5682s_if1_67_adc_enum, RT5682S_TDM_ADDA_CTRL_1, RT5682S_IF1_ADC4_SEL_SFT, rt5682s_data_select); static const struct snd_kcontrol_new rt5682s_if2_adc_swap_mux = SOC_DAPM_ENUM("IF2 ADC Swap Mux", rt5682s_if2_adc_enum); static const struct snd_kcontrol_new rt5682s_if1_01_adc_swap_mux = SOC_DAPM_ENUM("IF1 01 ADC Swap Mux", rt5682s_if1_01_adc_enum); static const struct snd_kcontrol_new rt5682s_if1_23_adc_swap_mux = SOC_DAPM_ENUM("IF1 23 ADC Swap Mux", rt5682s_if1_23_adc_enum); static const struct snd_kcontrol_new rt5682s_if1_45_adc_swap_mux = SOC_DAPM_ENUM("IF1 45 ADC Swap Mux", rt5682s_if1_45_adc_enum); static const struct snd_kcontrol_new rt5682s_if1_67_adc_swap_mux = SOC_DAPM_ENUM("IF1 67 ADC Swap Mux", rt5682s_if1_67_adc_enum); /* Digital Mixer */ static const struct snd_kcontrol_new rt5682s_sto1_adc_l_mix[] = { SOC_DAPM_SINGLE("ADC1 Switch", RT5682S_STO1_ADC_MIXER, RT5682S_M_STO1_ADC_L1_SFT, 1, 1), SOC_DAPM_SINGLE("ADC2 Switch", RT5682S_STO1_ADC_MIXER, RT5682S_M_STO1_ADC_L2_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_sto1_adc_r_mix[] = { SOC_DAPM_SINGLE("ADC1 Switch", RT5682S_STO1_ADC_MIXER, RT5682S_M_STO1_ADC_R1_SFT, 1, 1), SOC_DAPM_SINGLE("ADC2 Switch", RT5682S_STO1_ADC_MIXER, RT5682S_M_STO1_ADC_R2_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_dac_l_mix[] = { SOC_DAPM_SINGLE("Stereo ADC Switch", RT5682S_AD_DA_MIXER, RT5682S_M_ADCMIX_L_SFT, 1, 1), SOC_DAPM_SINGLE("DAC1 Switch", RT5682S_AD_DA_MIXER, RT5682S_M_DAC1_L_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_dac_r_mix[] = { SOC_DAPM_SINGLE("Stereo ADC Switch", RT5682S_AD_DA_MIXER, RT5682S_M_ADCMIX_R_SFT, 1, 1), SOC_DAPM_SINGLE("DAC1 Switch", RT5682S_AD_DA_MIXER, RT5682S_M_DAC1_R_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_sto1_dac_l_mix[] = { SOC_DAPM_SINGLE("DAC L1 Switch", RT5682S_STO1_DAC_MIXER, RT5682S_M_DAC_L1_STO_L_SFT, 1, 1), SOC_DAPM_SINGLE("DAC R1 Switch", RT5682S_STO1_DAC_MIXER, RT5682S_M_DAC_R1_STO_L_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_sto1_dac_r_mix[] = { SOC_DAPM_SINGLE("DAC L1 Switch", RT5682S_STO1_DAC_MIXER, RT5682S_M_DAC_L1_STO_R_SFT, 1, 1), SOC_DAPM_SINGLE("DAC R1 Switch", RT5682S_STO1_DAC_MIXER, RT5682S_M_DAC_R1_STO_R_SFT, 1, 1), }; /* Analog Input Mixer */ static const struct snd_kcontrol_new rt5682s_rec1_l_mix[] = { SOC_DAPM_SINGLE("CBJ Switch", RT5682S_REC_MIXER, RT5682S_M_CBJ_RM1_L_SFT, 1, 1), }; static const struct snd_kcontrol_new rt5682s_rec1_r_mix[] = { SOC_DAPM_SINGLE("CBJ Switch", RT5682S_REC_MIXER, RT5682S_M_CBJ_RM1_R_SFT, 1, 1), }; /* STO1 ADC1 Source */ /* MX-26 [13] [5] */ static const char * const rt5682s_sto1_adc1_src[] = { "DAC MIX", "ADC" }; static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adc1l_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADC1L_SRC_SFT, rt5682s_sto1_adc1_src); static const struct snd_kcontrol_new rt5682s_sto1_adc1l_mux = SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5682s_sto1_adc1l_enum); static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adc1r_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADC1R_SRC_SFT, rt5682s_sto1_adc1_src); static const struct snd_kcontrol_new rt5682s_sto1_adc1r_mux = SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5682s_sto1_adc1r_enum); /* STO1 ADC Source */ /* MX-26 [11:10] [3:2] */ static const char * const rt5682s_sto1_adc_src[] = { "ADC1 L", "ADC1 R" }; static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adcl_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADCL_SRC_SFT, rt5682s_sto1_adc_src); static const struct snd_kcontrol_new rt5682s_sto1_adcl_mux = SOC_DAPM_ENUM("Stereo1 ADCL Source", rt5682s_sto1_adcl_enum); static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adcr_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADCR_SRC_SFT, rt5682s_sto1_adc_src); static const struct snd_kcontrol_new rt5682s_sto1_adcr_mux = SOC_DAPM_ENUM("Stereo1 ADCR Source", rt5682s_sto1_adcr_enum); /* STO1 ADC2 Source */ /* MX-26 [12] [4] */ static const char * const rt5682s_sto1_adc2_src[] = { "DAC MIX", "DMIC" }; static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adc2l_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADC2L_SRC_SFT, rt5682s_sto1_adc2_src); static const struct snd_kcontrol_new rt5682s_sto1_adc2l_mux = SOC_DAPM_ENUM("Stereo1 ADC2L Source", rt5682s_sto1_adc2l_enum); static SOC_ENUM_SINGLE_DECL(rt5682s_sto1_adc2r_enum, RT5682S_STO1_ADC_MIXER, RT5682S_STO1_ADC2R_SRC_SFT, rt5682s_sto1_adc2_src); static const struct snd_kcontrol_new rt5682s_sto1_adc2r_mux = SOC_DAPM_ENUM("Stereo1 ADC2R Source", rt5682s_sto1_adc2r_enum); /* MX-79 [6:4] I2S1 ADC data location */ static const unsigned int rt5682s_if1_adc_slot_values[] = { 0, 2, 4, 6, }; static const char * const rt5682s_if1_adc_slot_src[] = { "Slot 0", "Slot 2", "Slot 4", "Slot 6" }; static SOC_VALUE_ENUM_SINGLE_DECL(rt5682s_if1_adc_slot_enum, RT5682S_TDM_CTRL, RT5682S_TDM_ADC_LCA_SFT, RT5682S_TDM_ADC_LCA_MASK, rt5682s_if1_adc_slot_src, rt5682s_if1_adc_slot_values); static const struct snd_kcontrol_new rt5682s_if1_adc_slot_mux = SOC_DAPM_ENUM("IF1 ADC Slot location", rt5682s_if1_adc_slot_enum); /* Analog DAC L1 Source, Analog DAC R1 Source*/ /* MX-2B [4], MX-2B [0]*/ static const char * const rt5682s_alg_dac1_src[] = { "Stereo1 DAC Mixer", "DAC1" }; static SOC_ENUM_SINGLE_DECL(rt5682s_alg_dac_l1_enum, RT5682S_A_DAC1_MUX, RT5682S_A_DACL1_SFT, rt5682s_alg_dac1_src); static const struct snd_kcontrol_new rt5682s_alg_dac_l1_mux = SOC_DAPM_ENUM("Analog DAC L1 Source", rt5682s_alg_dac_l1_enum); static SOC_ENUM_SINGLE_DECL(rt5682s_alg_dac_r1_enum, RT5682S_A_DAC1_MUX, RT5682S_A_DACR1_SFT, rt5682s_alg_dac1_src); static const struct snd_kcontrol_new rt5682s_alg_dac_r1_mux = SOC_DAPM_ENUM("Analog DAC R1 Source", rt5682s_alg_dac_r1_enum); static const unsigned int rt5682s_adcdat_pin_values[] = { 1, 3, }; static const char * const rt5682s_adcdat_pin_select[] = { "ADCDAT1", "ADCDAT2", }; static SOC_VALUE_ENUM_SINGLE_DECL(rt5682s_adcdat_pin_enum, RT5682S_GPIO_CTRL_1, RT5682S_GP4_PIN_SFT, RT5682S_GP4_PIN_MASK, rt5682s_adcdat_pin_select, rt5682s_adcdat_pin_values); static const struct snd_kcontrol_new rt5682s_adcdat_pin_ctrl = SOC_DAPM_ENUM("ADCDAT", rt5682s_adcdat_pin_enum); static const struct snd_soc_dapm_widget rt5682s_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("LDO MB1", RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_MB1_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("LDO MB2", RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_MB2_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("LDO", RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_BIT, 0, NULL, 0), /* PLL Powers */ SND_SOC_DAPM_SUPPLY_S("PLLA_LDO", 0, RT5682S_PWR_ANLG_3, RT5682S_PWR_LDO_PLLA_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("PLLA_BIAS", 0, RT5682S_PWR_ANLG_3, RT5682S_PWR_BIAS_PLLA_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("PLLA", 0, RT5682S_PWR_ANLG_3, RT5682S_PWR_PLLA_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("PLLA_RST", 1, RT5682S_PWR_ANLG_3, RT5682S_RSTB_PLLA_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("PLLB", SND_SOC_NOPM, 0, 0, set_pllb_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), /* ASRC */ SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682S_PLL_TRACK_1, RT5682S_DAC_STO1_ASRC_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5682S_PLL_TRACK_1, RT5682S_ADC_STO1_ASRC_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("AD ASRC", 1, RT5682S_PLL_TRACK_1, RT5682S_AD_ASRC_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("DA ASRC", 1, RT5682S_PLL_TRACK_1, RT5682S_DA_ASRC_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("DMIC ASRC", 1, RT5682S_PLL_TRACK_1, RT5682S_DMIC_ASRC_SFT, 0, NULL, 0), /* Input Side */ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5682S_PWR_ANLG_2, RT5682S_PWR_MB1_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5682S_PWR_ANLG_2, RT5682S_PWR_MB2_BIT, 0, NULL, 0), /* Input Lines */ SND_SOC_DAPM_INPUT("DMIC L1"), SND_SOC_DAPM_INPUT("DMIC R1"), SND_SOC_DAPM_INPUT("IN1P"), SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0, set_dmic_clk, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_SUPPLY("DMIC1 Power", RT5682S_DMIC_CTRL_1, RT5682S_DMIC_1_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* Boost */ SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, 0, 0, NULL, 0), /* REC Mixer */ SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682s_rec1_l_mix, ARRAY_SIZE(rt5682s_rec1_l_mix)), SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5682s_rec1_r_mix, ARRAY_SIZE(rt5682s_rec1_r_mix)), SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5682S_CAL_REC, RT5682S_PWR_RM1_L_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5682S_CAL_REC, RT5682S_PWR_RM1_R_BIT, 0, NULL, 0), /* ADCs */ SND_SOC_DAPM_ADC("ADC1 L", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_ADC("ADC1 R", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_SUPPLY("ADC1 L Power", RT5682S_PWR_DIG_1, RT5682S_PWR_ADC_L1_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("ADC1 R Power", RT5682S_PWR_DIG_1, RT5682S_PWR_ADC_R1_BIT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("ADC1 clock", RT5682S_CHOP_ADC, RT5682S_CKGEN_ADC1_SFT, 0, NULL, 0), /* ADC Mux */ SND_SOC_DAPM_MUX("Stereo1 ADC L1 Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adc1l_mux), SND_SOC_DAPM_MUX("Stereo1 ADC R1 Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adc1r_mux), SND_SOC_DAPM_MUX("Stereo1 ADC L2 Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adc2l_mux), SND_SOC_DAPM_MUX("Stereo1 ADC R2 Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adc2r_mux), SND_SOC_DAPM_MUX("Stereo1 ADC L Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adcl_mux), SND_SOC_DAPM_MUX("Stereo1 ADC R Mux", SND_SOC_NOPM, 0, 0, &rt5682s_sto1_adcr_mux), SND_SOC_DAPM_MUX("IF1_ADC Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if1_adc_slot_mux), /* ADC Mixer */ SND_SOC_DAPM_SUPPLY("ADC Stereo1 Filter", RT5682S_PWR_DIG_2, RT5682S_PWR_ADC_S1F_BIT, 0, set_filter_clk, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_MIXER_E("Stereo1 ADC MIXL", SND_SOC_NOPM, 0, 0, rt5682s_sto1_adc_l_mix, ARRAY_SIZE(rt5682s_sto1_adc_l_mix), rt5682s_stereo1_adc_mixl_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_MIXER("Stereo1 ADC MIXR", RT5682S_STO1_ADC_DIG_VOL, RT5682S_R_MUTE_SFT, 1, rt5682s_sto1_adc_r_mix, ARRAY_SIZE(rt5682s_sto1_adc_r_mix)), /* ADC PGA */ SND_SOC_DAPM_PGA("Stereo1 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0), /* Digital Interface */ SND_SOC_DAPM_SUPPLY("I2S1", SND_SOC_NOPM, 0, 0, set_i2s_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("I2S2", SND_SOC_NOPM, 0, 0, set_i2s_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0), /* Digital Interface Select */ SND_SOC_DAPM_MUX("IF1 01 ADC Swap Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if1_01_adc_swap_mux), SND_SOC_DAPM_MUX("IF1 23 ADC Swap Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if1_23_adc_swap_mux), SND_SOC_DAPM_MUX("IF1 45 ADC Swap Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if1_45_adc_swap_mux), SND_SOC_DAPM_MUX("IF1 67 ADC Swap Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if1_67_adc_swap_mux), SND_SOC_DAPM_MUX("IF2 ADC Swap Mux", SND_SOC_NOPM, 0, 0, &rt5682s_if2_adc_swap_mux), SND_SOC_DAPM_MUX("ADCDAT Mux", SND_SOC_NOPM, 0, 0, &rt5682s_adcdat_pin_ctrl), /* Audio Interface */ SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, RT5682S_I2S1_SDP, RT5682S_SEL_ADCDAT_SFT, 1), SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0, RT5682S_I2S2_SDP, RT5682S_I2S2_PIN_CFG_SFT, 1), SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), /* Output Side */ /* DAC mixer before sound effect */ SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0, rt5682s_dac_l_mix, ARRAY_SIZE(rt5682s_dac_l_mix)), SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0, rt5682s_dac_r_mix, ARRAY_SIZE(rt5682s_dac_r_mix)), /* DAC channel Mux */ SND_SOC_DAPM_MUX("DAC L1 Source", SND_SOC_NOPM, 0, 0, &rt5682s_alg_dac_l1_mux), SND_SOC_DAPM_MUX("DAC R1 Source", SND_SOC_NOPM, 0, 0, &rt5682s_alg_dac_r1_mux), /* DAC Mixer */ SND_SOC_DAPM_SUPPLY("DAC Stereo1 Filter", RT5682S_PWR_DIG_2, RT5682S_PWR_DAC_S1F_BIT, 0, set_filter_clk, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_MIXER("Stereo1 DAC MIXL", SND_SOC_NOPM, 0, 0, rt5682s_sto1_dac_l_mix, ARRAY_SIZE(rt5682s_sto1_dac_l_mix)), SND_SOC_DAPM_MIXER("Stereo1 DAC MIXR", SND_SOC_NOPM, 0, 0, rt5682s_sto1_dac_r_mix, ARRAY_SIZE(rt5682s_sto1_dac_r_mix)), /* DACs */ SND_SOC_DAPM_DAC("DAC L1", NULL, RT5682S_PWR_DIG_1, RT5682S_PWR_DAC_L1_BIT, 0), SND_SOC_DAPM_DAC("DAC R1", NULL, RT5682S_PWR_DIG_1, RT5682S_PWR_DAC_R1_BIT, 0), /* HPO */ SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5682s_hp_amp_event, SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU), /* CLK DET */ SND_SOC_DAPM_SUPPLY("CLKDET SYS", RT5682S_CLK_DET, RT5682S_SYS_CLK_DET_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("CLKDET PLL1", RT5682S_CLK_DET, RT5682S_PLL1_CLK_DET_SFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MCLK0 DET PWR", RT5682S_PWR_ANLG_2, RT5682S_PWR_MCLK0_WD_BIT, 0, NULL, 0), /* SAR */ SND_SOC_DAPM_SUPPLY("SAR", SND_SOC_NOPM, 0, 0, sar_power_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), /* Output Lines */ SND_SOC_DAPM_OUTPUT("HPOL"), SND_SOC_DAPM_OUTPUT("HPOR"), }; static const struct snd_soc_dapm_route rt5682s_dapm_routes[] = { /*PLL*/ {"ADC Stereo1 Filter", NULL, "PLLA", is_sys_clk_from_plla}, {"ADC Stereo1 Filter", NULL, "PLLB", is_sys_clk_from_pllb}, {"DAC Stereo1 Filter", NULL, "PLLA", is_sys_clk_from_plla}, {"DAC Stereo1 Filter", NULL, "PLLB", is_sys_clk_from_pllb}, {"PLLA", NULL, "PLLA_LDO"}, {"PLLA", NULL, "PLLA_BIAS"}, {"PLLA", NULL, "PLLA_RST"}, /*ASRC*/ {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, {"ADC STO1 ASRC", NULL, "AD ASRC"}, {"ADC STO1 ASRC", NULL, "DA ASRC"}, {"DAC STO1 ASRC", NULL, "AD ASRC"}, {"DAC STO1 ASRC", NULL, "DA ASRC"}, {"CLKDET SYS", NULL, "MCLK0 DET PWR"}, {"BST1 CBJ", NULL, "IN1P"}, {"BST1 CBJ", NULL, "SAR"}, {"RECMIX1L", "CBJ Switch", "BST1 CBJ"}, {"RECMIX1L", NULL, "RECMIX1L Power"}, {"RECMIX1R", "CBJ Switch", "BST1 CBJ"}, {"RECMIX1R", NULL, "RECMIX1R Power"}, {"ADC1 L", NULL, "RECMIX1L"}, {"ADC1 L", NULL, "ADC1 L Power"}, {"ADC1 L", NULL, "ADC1 clock"}, {"ADC1 R", NULL, "RECMIX1R"}, {"ADC1 R", NULL, "ADC1 R Power"}, {"ADC1 R", NULL, "ADC1 clock"}, {"DMIC L1", NULL, "DMIC CLK"}, {"DMIC L1", NULL, "DMIC1 Power"}, {"DMIC R1", NULL, "DMIC CLK"}, {"DMIC R1", NULL, "DMIC1 Power"}, {"DMIC CLK", NULL, "DMIC ASRC"}, {"Stereo1 ADC L Mux", "ADC1 L", "ADC1 L"}, {"Stereo1 ADC L Mux", "ADC1 R", "ADC1 R"}, {"Stereo1 ADC R Mux", "ADC1 L", "ADC1 L"}, {"Stereo1 ADC R Mux", "ADC1 R", "ADC1 R"}, {"Stereo1 ADC L1 Mux", "ADC", "Stereo1 ADC L Mux"}, {"Stereo1 ADC L1 Mux", "DAC MIX", "Stereo1 DAC MIXL"}, {"Stereo1 ADC L2 Mux", "DMIC", "DMIC L1"}, {"Stereo1 ADC L2 Mux", "DAC MIX", "Stereo1 DAC MIXL"}, {"Stereo1 ADC R1 Mux", "ADC", "Stereo1 ADC R Mux"}, {"Stereo1 ADC R1 Mux", "DAC MIX", "Stereo1 DAC MIXR"}, {"Stereo1 ADC R2 Mux", "DMIC", "DMIC R1"}, {"Stereo1 ADC R2 Mux", "DAC MIX", "Stereo1 DAC MIXR"}, {"Stereo1 ADC MIXL", "ADC1 Switch", "Stereo1 ADC L1 Mux"}, {"Stereo1 ADC MIXL", "ADC2 Switch", "Stereo1 ADC L2 Mux"}, {"Stereo1 ADC MIXL", NULL, "ADC Stereo1 Filter"}, {"Stereo1 ADC MIXR", "ADC1 Switch", "Stereo1 ADC R1 Mux"}, {"Stereo1 ADC MIXR", "ADC2 Switch", "Stereo1 ADC R2 Mux"}, {"Stereo1 ADC MIXR", NULL, "ADC Stereo1 Filter"}, {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXL"}, {"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXR"}, {"IF1 01 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"}, {"IF1 01 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"}, {"IF1 01 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"}, {"IF1 01 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"}, {"IF1 23 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"}, {"IF1 23 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"}, {"IF1 23 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"}, {"IF1 23 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"}, {"IF1 45 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"}, {"IF1 45 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"}, {"IF1 45 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"}, {"IF1 45 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"}, {"IF1 67 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"}, {"IF1 67 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"}, {"IF1 67 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"}, {"IF1 67 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"}, {"IF1_ADC Mux", "Slot 0", "IF1 01 ADC Swap Mux"}, {"IF1_ADC Mux", "Slot 2", "IF1 23 ADC Swap Mux"}, {"IF1_ADC Mux", "Slot 4", "IF1 45 ADC Swap Mux"}, {"IF1_ADC Mux", "Slot 6", "IF1 67 ADC Swap Mux"}, {"ADCDAT Mux", "ADCDAT1", "IF1_ADC Mux"}, {"AIF1TX", NULL, "I2S1"}, {"AIF1TX", NULL, "ADCDAT Mux"}, {"IF2 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"}, {"IF2 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"}, {"IF2 ADC Swap Mux", "L/L", "Stereo1 ADC MIX"}, {"IF2 ADC Swap Mux", "R/R", "Stereo1 ADC MIX"}, {"ADCDAT Mux", "ADCDAT2", "IF2 ADC Swap Mux"}, {"AIF2TX", NULL, "ADCDAT Mux"}, {"IF1 DAC1 L", NULL, "AIF1RX"}, {"IF1 DAC1 L", NULL, "I2S1"}, {"IF1 DAC1 L", NULL, "DAC Stereo1 Filter"}, {"IF1 DAC1 R", NULL, "AIF1RX"}, {"IF1 DAC1 R", NULL, "I2S1"}, {"IF1 DAC1 R", NULL, "DAC Stereo1 Filter"}, {"DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL"}, {"DAC1 MIXL", "DAC1 Switch", "IF1 DAC1 L"}, {"DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR"}, {"DAC1 MIXR", "DAC1 Switch", "IF1 DAC1 R"}, {"Stereo1 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"}, {"Stereo1 DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"}, {"Stereo1 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"}, {"Stereo1 DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"}, {"DAC L1 Source", "DAC1", "DAC1 MIXL"}, {"DAC L1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"}, {"DAC R1 Source", "DAC1", "DAC1 MIXR"}, {"DAC R1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"}, {"DAC L1", NULL, "DAC L1 Source"}, {"DAC R1", NULL, "DAC R1 Source"}, {"HP Amp", NULL, "DAC L1"}, {"HP Amp", NULL, "DAC R1"}, {"HP Amp", NULL, "CLKDET SYS"}, {"HP Amp", NULL, "SAR"}, {"HPOL", NULL, "HP Amp"}, {"HPOR", NULL, "HP Amp"}, }; static int rt5682s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct snd_soc_component *component = dai->component; unsigned int cl, val = 0, tx_slotnum; if (tx_mask || rx_mask) snd_soc_component_update_bits(component, RT5682S_TDM_ADDA_CTRL_2, RT5682S_TDM_EN, RT5682S_TDM_EN); else snd_soc_component_update_bits(component, RT5682S_TDM_ADDA_CTRL_2, RT5682S_TDM_EN, 0); /* Tx slot configuration */ tx_slotnum = hweight_long(tx_mask); if (tx_slotnum) { if (tx_slotnum > slots) { dev_err(component->dev, "Invalid or oversized Tx slots.\n"); return -EINVAL; } val |= (tx_slotnum - 1) << RT5682S_TDM_ADC_DL_SFT; } switch (slots) { case 4: val |= RT5682S_TDM_TX_CH_4; val |= RT5682S_TDM_RX_CH_4; break; case 6: val |= RT5682S_TDM_TX_CH_6; val |= RT5682S_TDM_RX_CH_6; break; case 8: val |= RT5682S_TDM_TX_CH_8; val |= RT5682S_TDM_RX_CH_8; break; case 2: break; default: return -EINVAL; } snd_soc_component_update_bits(component, RT5682S_TDM_CTRL, RT5682S_TDM_TX_CH_MASK | RT5682S_TDM_RX_CH_MASK | RT5682S_TDM_ADC_DL_MASK, val); switch (slot_width) { case 8: if (tx_mask || rx_mask) return -EINVAL; cl = RT5682S_I2S1_TX_CHL_8 | RT5682S_I2S1_RX_CHL_8; break; case 16: val = RT5682S_TDM_CL_16; cl = RT5682S_I2S1_TX_CHL_16 | RT5682S_I2S1_RX_CHL_16; break; case 20: val = RT5682S_TDM_CL_20; cl = RT5682S_I2S1_TX_CHL_20 | RT5682S_I2S1_RX_CHL_20; break; case 24: val = RT5682S_TDM_CL_24; cl = RT5682S_I2S1_TX_CHL_24 | RT5682S_I2S1_RX_CHL_24; break; case 32: val = RT5682S_TDM_CL_32; cl = RT5682S_I2S1_TX_CHL_32 | RT5682S_I2S1_RX_CHL_32; break; default: return -EINVAL; } snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_CL_MASK, val); snd_soc_component_update_bits(component, RT5682S_I2S1_SDP, RT5682S_I2S1_TX_CHL_MASK | RT5682S_I2S1_RX_CHL_MASK, cl); return 0; } static int rt5682s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int len_1 = 0, len_2 = 0; int frame_size; rt5682s->lrck[dai->id] = params_rate(params); frame_size = snd_soc_params_to_frame_size(params); if (frame_size < 0) { dev_err(component->dev, "Unsupported frame size: %d\n", frame_size); return -EINVAL; } switch (params_width(params)) { case 16: break; case 20: len_1 |= RT5682S_I2S1_DL_20; len_2 |= RT5682S_I2S2_DL_20; break; case 24: len_1 |= RT5682S_I2S1_DL_24; len_2 |= RT5682S_I2S2_DL_24; break; case 32: len_1 |= RT5682S_I2S1_DL_32; len_2 |= RT5682S_I2S2_DL_24; break; case 8: len_1 |= RT5682S_I2S2_DL_8; len_2 |= RT5682S_I2S2_DL_8; break; default: return -EINVAL; } switch (dai->id) { case RT5682S_AIF1: snd_soc_component_update_bits(component, RT5682S_I2S1_SDP, RT5682S_I2S1_DL_MASK, len_1); if (params_channels(params) == 1) /* mono mode */ snd_soc_component_update_bits(component, RT5682S_I2S1_SDP, RT5682S_I2S1_MONO_MASK, RT5682S_I2S1_MONO_EN); else snd_soc_component_update_bits(component, RT5682S_I2S1_SDP, RT5682S_I2S1_MONO_MASK, RT5682S_I2S1_MONO_DIS); break; case RT5682S_AIF2: snd_soc_component_update_bits(component, RT5682S_I2S2_SDP, RT5682S_I2S2_DL_MASK, len_2); if (params_channels(params) == 1) /* mono mode */ snd_soc_component_update_bits(component, RT5682S_I2S2_SDP, RT5682S_I2S2_MONO_MASK, RT5682S_I2S2_MONO_EN); else snd_soc_component_update_bits(component, RT5682S_I2S2_SDP, RT5682S_I2S2_MONO_MASK, RT5682S_I2S2_MONO_DIS); break; default: dev_err(component->dev, "Invalid dai->id: %d\n", dai->id); return -EINVAL; } return 0; } static int rt5682s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int reg_val = 0, tdm_ctrl = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: rt5682s->master[dai->id] = 1; break; case SND_SOC_DAIFMT_CBS_CFS: rt5682s->master[dai->id] = 0; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: reg_val |= RT5682S_I2S_BP_INV; tdm_ctrl |= RT5682S_TDM_S_BP_INV; break; case SND_SOC_DAIFMT_NB_IF: if (dai->id == RT5682S_AIF1) tdm_ctrl |= RT5682S_TDM_S_LP_INV | RT5682S_TDM_M_BP_INV; else return -EINVAL; break; case SND_SOC_DAIFMT_IB_IF: if (dai->id == RT5682S_AIF1) tdm_ctrl |= RT5682S_TDM_S_BP_INV | RT5682S_TDM_S_LP_INV | RT5682S_TDM_M_BP_INV | RT5682S_TDM_M_LP_INV; else return -EINVAL; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: break; case SND_SOC_DAIFMT_LEFT_J: reg_val |= RT5682S_I2S_DF_LEFT; tdm_ctrl |= RT5682S_TDM_DF_LEFT; break; case SND_SOC_DAIFMT_DSP_A: reg_val |= RT5682S_I2S_DF_PCM_A; tdm_ctrl |= RT5682S_TDM_DF_PCM_A; break; case SND_SOC_DAIFMT_DSP_B: reg_val |= RT5682S_I2S_DF_PCM_B; tdm_ctrl |= RT5682S_TDM_DF_PCM_B; break; default: return -EINVAL; } switch (dai->id) { case RT5682S_AIF1: snd_soc_component_update_bits(component, RT5682S_I2S1_SDP, RT5682S_I2S_DF_MASK, reg_val); snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_MS_MASK | RT5682S_TDM_S_BP_MASK | RT5682S_TDM_DF_MASK | RT5682S_TDM_M_BP_MASK | RT5682S_TDM_M_LP_MASK | RT5682S_TDM_S_LP_MASK, tdm_ctrl | rt5682s->master[dai->id]); break; case RT5682S_AIF2: if (rt5682s->master[dai->id] == 0) reg_val |= RT5682S_I2S2_MS_S; snd_soc_component_update_bits(component, RT5682S_I2S2_SDP, RT5682S_I2S2_MS_MASK | RT5682S_I2S_BP_MASK | RT5682S_I2S_DF_MASK, reg_val); break; default: dev_err(component->dev, "Invalid dai->id: %d\n", dai->id); return -EINVAL; } return 0; } static int rt5682s_set_component_sysclk(struct snd_soc_component *component, int clk_id, int source, unsigned int freq, int dir) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); unsigned int src = 0; if (freq == rt5682s->sysclk && clk_id == rt5682s->sysclk_src) return 0; switch (clk_id) { case RT5682S_SCLK_S_MCLK: src = RT5682S_CLK_SRC_MCLK; break; case RT5682S_SCLK_S_PLL1: src = RT5682S_CLK_SRC_PLL1; break; case RT5682S_SCLK_S_PLL2: src = RT5682S_CLK_SRC_PLL2; break; case RT5682S_SCLK_S_RCCLK: src = RT5682S_CLK_SRC_RCCLK; break; default: dev_err(component->dev, "Invalid clock id (%d)\n", clk_id); return -EINVAL; } snd_soc_component_update_bits(component, RT5682S_GLB_CLK, RT5682S_SCLK_SRC_MASK, src << RT5682S_SCLK_SRC_SFT); snd_soc_component_update_bits(component, RT5682S_ADDA_CLK_1, RT5682S_I2S_M_CLK_SRC_MASK, src << RT5682S_I2S_M_CLK_SRC_SFT); snd_soc_component_update_bits(component, RT5682S_I2S2_M_CLK_CTRL_1, RT5682S_I2S2_M_CLK_SRC_MASK, src << RT5682S_I2S2_M_CLK_SRC_SFT); rt5682s->sysclk = freq; rt5682s->sysclk_src = clk_id; dev_dbg(component->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id); return 0; } static const struct pll_calc_map plla_table[] = { {2048000, 24576000, 0, 46, 2, true, false, false, false}, {256000, 24576000, 0, 382, 2, true, false, false, false}, {512000, 24576000, 0, 190, 2, true, false, false, false}, {4096000, 24576000, 0, 22, 2, true, false, false, false}, {1024000, 24576000, 0, 94, 2, true, false, false, false}, {11289600, 22579200, 1, 22, 2, false, false, false, false}, {1411200, 22579200, 0, 62, 2, true, false, false, false}, {2822400, 22579200, 0, 30, 2, true, false, false, false}, {12288000, 24576000, 1, 22, 2, false, false, false, false}, {1536000, 24576000, 0, 62, 2, true, false, false, false}, {3072000, 24576000, 0, 30, 2, true, false, false, false}, {24576000, 49152000, 4, 22, 0, false, false, false, false}, {3072000, 49152000, 0, 30, 0, true, false, false, false}, {6144000, 49152000, 0, 30, 0, false, false, false, false}, {49152000, 98304000, 10, 22, 0, false, true, false, false}, {6144000, 98304000, 0, 30, 0, false, true, false, false}, {12288000, 98304000, 1, 22, 0, false, true, false, false}, {48000000, 3840000, 10, 22, 23, false, false, false, false}, {24000000, 3840000, 4, 22, 23, false, false, false, false}, {19200000, 3840000, 3, 23, 23, false, false, false, false}, {38400000, 3840000, 8, 23, 23, false, false, false, false}, }; static const struct pll_calc_map pllb_table[] = { {48000000, 24576000, 8, 6, 3, false, false, false, false}, {48000000, 22579200, 23, 12, 3, false, false, false, true}, {24000000, 24576000, 3, 6, 3, false, false, false, false}, {24000000, 22579200, 23, 26, 3, false, false, false, true}, {19200000, 24576000, 2, 6, 3, false, false, false, false}, {19200000, 22579200, 3, 5, 3, false, false, false, true}, {38400000, 24576000, 6, 6, 3, false, false, false, false}, {38400000, 22579200, 8, 5, 3, false, false, false, true}, {3840000, 49152000, 0, 6, 0, true, false, false, false}, }; static int find_pll_inter_combination(unsigned int f_in, unsigned int f_out, struct pll_calc_map *a, struct pll_calc_map *b) { int i, j; /* Look at PLLA table */ for (i = 0; i < ARRAY_SIZE(plla_table); i++) { if (plla_table[i].freq_in == f_in && plla_table[i].freq_out == f_out) { memcpy(a, plla_table + i, sizeof(*a)); return USE_PLLA; } } /* Look at PLLB table */ for (i = 0; i < ARRAY_SIZE(pllb_table); i++) { if (pllb_table[i].freq_in == f_in && pllb_table[i].freq_out == f_out) { memcpy(b, pllb_table + i, sizeof(*b)); return USE_PLLB; } } /* Find a combination of PLLA & PLLB */ for (i = ARRAY_SIZE(plla_table) - 1; i >= 0; i--) { if (plla_table[i].freq_in == f_in && plla_table[i].freq_out == 3840000) { for (j = ARRAY_SIZE(pllb_table) - 1; j >= 0; j--) { if (pllb_table[j].freq_in == 3840000 && pllb_table[j].freq_out == f_out) { memcpy(a, plla_table + i, sizeof(*a)); memcpy(b, pllb_table + j, sizeof(*b)); return USE_PLLAB; } } } } return -EINVAL; } static int rt5682s_set_component_pll(struct snd_soc_component *component, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); struct pll_calc_map a_map, b_map; if (source == rt5682s->pll_src[pll_id] && freq_in == rt5682s->pll_in[pll_id] && freq_out == rt5682s->pll_out[pll_id]) return 0; if (!freq_in || !freq_out) { dev_dbg(component->dev, "PLL disabled\n"); rt5682s->pll_in[pll_id] = 0; rt5682s->pll_out[pll_id] = 0; snd_soc_component_update_bits(component, RT5682S_GLB_CLK, RT5682S_SCLK_SRC_MASK, RT5682S_CLK_SRC_MCLK << RT5682S_SCLK_SRC_SFT); return 0; } switch (source) { case RT5682S_PLL_S_MCLK: snd_soc_component_update_bits(component, RT5682S_GLB_CLK, RT5682S_PLL_SRC_MASK, RT5682S_PLL_SRC_MCLK); break; case RT5682S_PLL_S_BCLK1: snd_soc_component_update_bits(component, RT5682S_GLB_CLK, RT5682S_PLL_SRC_MASK, RT5682S_PLL_SRC_BCLK1); break; default: dev_err(component->dev, "Unknown PLL Source %d\n", source); return -EINVAL; } rt5682s->pll_comb = find_pll_inter_combination(freq_in, freq_out, &a_map, &b_map); if ((pll_id == RT5682S_PLL1 && rt5682s->pll_comb == USE_PLLA) || (pll_id == RT5682S_PLL2 && (rt5682s->pll_comb == USE_PLLB || rt5682s->pll_comb == USE_PLLAB))) { dev_dbg(component->dev, "Supported freq conversion for PLL%d:(%d->%d): %d\n", pll_id + 1, freq_in, freq_out, rt5682s->pll_comb); } else { dev_err(component->dev, "Unsupported freq conversion for PLL%d:(%d->%d): %d\n", pll_id + 1, freq_in, freq_out, rt5682s->pll_comb); return -EINVAL; } if (rt5682s->pll_comb == USE_PLLA || rt5682s->pll_comb == USE_PLLAB) { dev_dbg(component->dev, "PLLA: fin=%d fout=%d m_bp=%d k_bp=%d m=%d n=%d k=%d\n", a_map.freq_in, a_map.freq_out, a_map.m_bp, a_map.k_bp, (a_map.m_bp ? 0 : a_map.m), a_map.n, (a_map.k_bp ? 0 : a_map.k)); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_1, RT5682S_PLLA_N_MASK, a_map.n); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_2, RT5682S_PLLA_M_MASK | RT5682S_PLLA_K_MASK, a_map.m << RT5682S_PLLA_M_SFT | a_map.k); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_6, RT5682S_PLLA_M_BP_MASK | RT5682S_PLLA_K_BP_MASK, a_map.m_bp << RT5682S_PLLA_M_BP_SFT | a_map.k_bp << RT5682S_PLLA_K_BP_SFT); } if (rt5682s->pll_comb == USE_PLLB || rt5682s->pll_comb == USE_PLLAB) { dev_dbg(component->dev, "PLLB: fin=%d fout=%d m_bp=%d k_bp=%d m=%d n=%d k=%d byp_ps=%d sel_ps=%d\n", b_map.freq_in, b_map.freq_out, b_map.m_bp, b_map.k_bp, (b_map.m_bp ? 0 : b_map.m), b_map.n, (b_map.k_bp ? 0 : b_map.k), b_map.byp_ps, b_map.sel_ps); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_3, RT5682S_PLLB_N_MASK, b_map.n); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_4, RT5682S_PLLB_M_MASK | RT5682S_PLLB_K_MASK, b_map.m << RT5682S_PLLB_M_SFT | b_map.k); snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_6, RT5682S_PLLB_SEL_PS_MASK | RT5682S_PLLB_BYP_PS_MASK | RT5682S_PLLB_M_BP_MASK | RT5682S_PLLB_K_BP_MASK, b_map.sel_ps << RT5682S_PLLB_SEL_PS_SFT | b_map.byp_ps << RT5682S_PLLB_BYP_PS_SFT | b_map.m_bp << RT5682S_PLLB_M_BP_SFT | b_map.k_bp << RT5682S_PLLB_K_BP_SFT); } if (rt5682s->pll_comb == USE_PLLB) snd_soc_component_update_bits(component, RT5682S_PLL_CTRL_7, RT5682S_PLLB_SRC_MASK, RT5682S_PLLB_SRC_DFIN); rt5682s->pll_in[pll_id] = freq_in; rt5682s->pll_out[pll_id] = freq_out; rt5682s->pll_src[pll_id] = source; return 0; } static int rt5682s_set_bclk1_ratio(struct snd_soc_dai *dai, unsigned int ratio) { struct snd_soc_component *component = dai->component; struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); rt5682s->bclk[dai->id] = ratio; switch (ratio) { case 256: snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_BCLK_MS1_MASK, RT5682S_TDM_BCLK_MS1_256); break; case 128: snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_BCLK_MS1_MASK, RT5682S_TDM_BCLK_MS1_128); break; case 64: snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_BCLK_MS1_MASK, RT5682S_TDM_BCLK_MS1_64); break; case 32: snd_soc_component_update_bits(component, RT5682S_TDM_TCON_CTRL_1, RT5682S_TDM_BCLK_MS1_MASK, RT5682S_TDM_BCLK_MS1_32); break; default: dev_err(dai->dev, "Invalid bclk1 ratio %d\n", ratio); return -EINVAL; } return 0; } static int rt5682s_set_bclk2_ratio(struct snd_soc_dai *dai, unsigned int ratio) { struct snd_soc_component *component = dai->component; struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); rt5682s->bclk[dai->id] = ratio; switch (ratio) { case 64: snd_soc_component_update_bits(component, RT5682S_ADDA_CLK_2, RT5682S_I2S2_BCLK_MS2_MASK, RT5682S_I2S2_BCLK_MS2_64); break; case 32: snd_soc_component_update_bits(component, RT5682S_ADDA_CLK_2, RT5682S_I2S2_BCLK_MS2_MASK, RT5682S_I2S2_BCLK_MS2_32); break; default: dev_err(dai->dev, "Invalid bclk2 ratio %d\n", ratio); return -EINVAL; } return 0; } static int rt5682s_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); switch (level) { case SND_SOC_BIAS_PREPARE: regmap_update_bits(rt5682s->regmap, RT5682S_PWR_DIG_1, RT5682S_PWR_LDO, RT5682S_PWR_LDO); break; case SND_SOC_BIAS_STANDBY: if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) regmap_update_bits(rt5682s->regmap, RT5682S_PWR_DIG_1, RT5682S_DIG_GATE_CTRL, RT5682S_DIG_GATE_CTRL); break; case SND_SOC_BIAS_OFF: regmap_update_bits(rt5682s->regmap, RT5682S_PWR_DIG_1, RT5682S_PWR_LDO, 0); if (!rt5682s->wclk_enabled) regmap_update_bits(rt5682s->regmap, RT5682S_PWR_DIG_1, RT5682S_DIG_GATE_CTRL, 0); break; case SND_SOC_BIAS_ON: break; } return 0; } #ifdef CONFIG_COMMON_CLK #define CLK_PLL2_FIN 48000000 #define CLK_48 48000 #define CLK_44 44100 static bool rt5682s_clk_check(struct rt5682s_priv *rt5682s) { if (!rt5682s->master[RT5682S_AIF1]) { dev_dbg(rt5682s->component->dev, "dai clk fmt not set correctly\n"); return false; } return true; } static int rt5682s_wclk_prepare(struct clk_hw *hw) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_WCLK_IDX]); struct snd_soc_component *component = rt5682s->component; int ref, reg; if (!rt5682s_clk_check(rt5682s)) return -EINVAL; mutex_lock(&rt5682s->wclk_mutex); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF2 | RT5682S_PWR_FV2 | RT5682S_PWR_MB, RT5682S_PWR_VREF2 | RT5682S_PWR_MB); usleep_range(15000, 20000); snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_FV2, RT5682S_PWR_FV2); /* Set and power on I2S1 */ snd_soc_component_update_bits(component, RT5682S_PWR_DIG_1, RT5682S_DIG_GATE_CTRL, RT5682S_DIG_GATE_CTRL); rt5682s_set_i2s(rt5682s, RT5682S_AIF1, 1); /* Only need to power on PLLB due to the rate set restriction */ reg = RT5682S_PLL_TRACK_2; ref = 256 * rt5682s->lrck[RT5682S_AIF1]; rt5682s_set_filter_clk(rt5682s, reg, ref); rt5682s_set_pllb_power(rt5682s, 1); rt5682s->wclk_enabled = 1; mutex_unlock(&rt5682s->wclk_mutex); return 0; } static void rt5682s_wclk_unprepare(struct clk_hw *hw) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_WCLK_IDX]); struct snd_soc_component *component = rt5682s->component; if (!rt5682s_clk_check(rt5682s)) return; mutex_lock(&rt5682s->wclk_mutex); if (!rt5682s->jack_type) snd_soc_component_update_bits(component, RT5682S_PWR_ANLG_1, RT5682S_PWR_VREF2 | RT5682S_PWR_FV2 | RT5682S_PWR_MB, 0); /* Power down I2S1 */ rt5682s_set_i2s(rt5682s, RT5682S_AIF1, 0); snd_soc_component_update_bits(component, RT5682S_PWR_DIG_1, RT5682S_DIG_GATE_CTRL, 0); /* Power down PLLB */ rt5682s_set_pllb_power(rt5682s, 0); rt5682s->wclk_enabled = 0; mutex_unlock(&rt5682s->wclk_mutex); } static unsigned long rt5682s_wclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_WCLK_IDX]); struct snd_soc_component *component = rt5682s->component; const char * const clk_name = clk_hw_get_name(hw); if (!rt5682s_clk_check(rt5682s)) return 0; /* * Only accept to set wclk rate to 44.1k or 48kHz. */ if (rt5682s->lrck[RT5682S_AIF1] != CLK_48 && rt5682s->lrck[RT5682S_AIF1] != CLK_44) { dev_warn(component->dev, "%s: clk %s only support %d or %d Hz output\n", __func__, clk_name, CLK_44, CLK_48); return 0; } return rt5682s->lrck[RT5682S_AIF1]; } static long rt5682s_wclk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_WCLK_IDX]); struct snd_soc_component *component = rt5682s->component; const char * const clk_name = clk_hw_get_name(hw); if (!rt5682s_clk_check(rt5682s)) return -EINVAL; /* * Only accept to set wclk rate to 44.1k or 48kHz. * It will force to 48kHz if not both. */ if (rate != CLK_48 && rate != CLK_44) { dev_warn(component->dev, "%s: clk %s only support %d or %d Hz output\n", __func__, clk_name, CLK_44, CLK_48); rate = CLK_48; } return rate; } static int rt5682s_wclk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_WCLK_IDX]); struct snd_soc_component *component = rt5682s->component; struct clk *parent_clk; const char * const clk_name = clk_hw_get_name(hw); unsigned int clk_pll2_fout; if (!rt5682s_clk_check(rt5682s)) return -EINVAL; /* * Whether the wclk's parent clk (mclk) exists or not, please ensure * it is fixed or set to 48MHz before setting wclk rate. It's a * temporary limitation. Only accept 48MHz clk as the clk provider. * * It will set the codec anyway by assuming mclk is 48MHz. */ parent_clk = clk_get_parent(hw->clk); if (!parent_clk) dev_warn(component->dev, "Parent mclk of wclk not acquired in driver. Please ensure mclk was provided as %d Hz.\n", CLK_PLL2_FIN); if (parent_rate != CLK_PLL2_FIN) dev_warn(component->dev, "clk %s only support %d Hz input\n", clk_name, CLK_PLL2_FIN); /* * To achieve the rate conversion from 48MHz to 44.1k or 48kHz, * PLL2 is needed. */ clk_pll2_fout = rate * 512; rt5682s_set_component_pll(component, RT5682S_PLL2, RT5682S_PLL_S_MCLK, CLK_PLL2_FIN, clk_pll2_fout); rt5682s_set_component_sysclk(component, RT5682S_SCLK_S_PLL2, 0, clk_pll2_fout, SND_SOC_CLOCK_IN); rt5682s->lrck[RT5682S_AIF1] = rate; return 0; } static unsigned long rt5682s_bclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_BCLK_IDX]); struct snd_soc_component *component = rt5682s->component; unsigned int bclks_per_wclk; bclks_per_wclk = snd_soc_component_read(component, RT5682S_TDM_TCON_CTRL_1); switch (bclks_per_wclk & RT5682S_TDM_BCLK_MS1_MASK) { case RT5682S_TDM_BCLK_MS1_256: return parent_rate * 256; case RT5682S_TDM_BCLK_MS1_128: return parent_rate * 128; case RT5682S_TDM_BCLK_MS1_64: return parent_rate * 64; case RT5682S_TDM_BCLK_MS1_32: return parent_rate * 32; default: return 0; } } static unsigned long rt5682s_bclk_get_factor(unsigned long rate, unsigned long parent_rate) { unsigned long factor; factor = rate / parent_rate; if (factor < 64) return 32; else if (factor < 128) return 64; else if (factor < 256) return 128; else return 256; } static long rt5682s_bclk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_BCLK_IDX]); unsigned long factor; if (!*parent_rate || !rt5682s_clk_check(rt5682s)) return -EINVAL; /* * BCLK rates are set as a multiplier of WCLK in HW. * We don't allow changing the parent WCLK. We just do * some rounding down based on the parent WCLK rate * and find the appropriate multiplier of BCLK to * get the rounded down BCLK value. */ factor = rt5682s_bclk_get_factor(rate, *parent_rate); return *parent_rate * factor; } static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct rt5682s_priv *rt5682s = container_of(hw, struct rt5682s_priv, dai_clks_hw[RT5682S_DAI_BCLK_IDX]); struct snd_soc_component *component = rt5682s->component; struct snd_soc_dai *dai; unsigned long factor; if (!rt5682s_clk_check(rt5682s)) return -EINVAL; factor = rt5682s_bclk_get_factor(rate, parent_rate); for_each_component_dais(component, dai) if (dai->id == RT5682S_AIF1) return rt5682s_set_bclk1_ratio(dai, factor); dev_err(component->dev, "dai %d not found in component\n", RT5682S_AIF1); return -ENODEV; } static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = { [RT5682S_DAI_WCLK_IDX] = { .prepare = rt5682s_wclk_prepare, .unprepare = rt5682s_wclk_unprepare, .recalc_rate = rt5682s_wclk_recalc_rate, .round_rate = rt5682s_wclk_round_rate, .set_rate = rt5682s_wclk_set_rate, }, [RT5682S_DAI_BCLK_IDX] = { .recalc_rate = rt5682s_bclk_recalc_rate, .round_rate = rt5682s_bclk_round_rate, .set_rate = rt5682s_bclk_set_rate, }, }; static int rt5682s_register_dai_clks(struct snd_soc_component *component) { struct device *dev = component->dev; struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); struct rt5682s_platform_data *pdata = &rt5682s->pdata; struct clk_hw *dai_clk_hw; int i, ret; for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) { struct clk_init_data init = { }; struct clk_parent_data parent_data; const struct clk_hw *parent; dai_clk_hw = &rt5682s->dai_clks_hw[i]; switch (i) { case RT5682S_DAI_WCLK_IDX: /* Make MCLK the parent of WCLK */ if (rt5682s->mclk) { parent_data = (struct clk_parent_data){ .fw_name = "mclk", }; init.parent_data = &parent_data; init.num_parents = 1; } break; case RT5682S_DAI_BCLK_IDX: /* Make WCLK the parent of BCLK */ parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]; init.parent_hws = &parent; init.num_parents = 1; break; default: dev_err(dev, "Invalid clock index\n"); return -EINVAL; } init.name = pdata->dai_clk_names[i]; init.ops = &rt5682s_dai_clk_ops[i]; init.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE; dai_clk_hw->init = &init; ret = devm_clk_hw_register(dev, dai_clk_hw); if (ret) { dev_warn(dev, "Failed to register %s: %d\n", init.name, ret); return ret; } if (dev->of_node) { ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); if (ret) return ret; } else { ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, init.name, dev_name(dev)); if (ret) return ret; } } return 0; } static int rt5682s_dai_probe_clks(struct snd_soc_component *component) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); int ret; /* Check if MCLK provided */ rt5682s->mclk = devm_clk_get_optional(component->dev, "mclk"); if (IS_ERR(rt5682s->mclk)) return PTR_ERR(rt5682s->mclk); /* Register CCF DAI clock control */ ret = rt5682s_register_dai_clks(component); if (ret) return ret; /* Initial setup for CCF */ rt5682s->lrck[RT5682S_AIF1] = CLK_48; return 0; } #else static inline int rt5682s_dai_probe_clks(struct snd_soc_component *component) { return 0; } #endif /* CONFIG_COMMON_CLK */ static int rt5682s_probe(struct snd_soc_component *component) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); rt5682s->component = component; return rt5682s_dai_probe_clks(component); } static void rt5682s_remove(struct snd_soc_component *component) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); rt5682s_reset(rt5682s); } #ifdef CONFIG_PM static int rt5682s_suspend(struct snd_soc_component *component) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); if (rt5682s->irq) disable_irq(rt5682s->irq); cancel_delayed_work_sync(&rt5682s->jack_detect_work); cancel_delayed_work_sync(&rt5682s->jd_check_work); if (rt5682s->hs_jack) rt5682s->jack_type = rt5682s_headset_detect(component, 0); regcache_cache_only(rt5682s->regmap, true); regcache_mark_dirty(rt5682s->regmap); return 0; } static int rt5682s_resume(struct snd_soc_component *component) { struct rt5682s_priv *rt5682s = snd_soc_component_get_drvdata(component); regcache_cache_only(rt5682s->regmap, false); regcache_sync(rt5682s->regmap); if (rt5682s->hs_jack) { mod_delayed_work(system_power_efficient_wq, &rt5682s->jack_detect_work, msecs_to_jiffies(0)); } if (rt5682s->irq) enable_irq(rt5682s->irq); return 0; } #else #define rt5682s_suspend NULL #define rt5682s_resume NULL #endif static const struct snd_soc_dai_ops rt5682s_aif1_dai_ops = { .hw_params = rt5682s_hw_params, .set_fmt = rt5682s_set_dai_fmt, .set_tdm_slot = rt5682s_set_tdm_slot, .set_bclk_ratio = rt5682s_set_bclk1_ratio, }; static const struct snd_soc_dai_ops rt5682s_aif2_dai_ops = { .hw_params = rt5682s_hw_params, .set_fmt = rt5682s_set_dai_fmt, .set_bclk_ratio = rt5682s_set_bclk2_ratio, }; static const struct snd_soc_component_driver rt5682s_soc_component_dev = { .probe = rt5682s_probe, .remove = rt5682s_remove, .suspend = rt5682s_suspend, .resume = rt5682s_resume, .set_bias_level = rt5682s_set_bias_level, .controls = rt5682s_snd_controls, .num_controls = ARRAY_SIZE(rt5682s_snd_controls), .dapm_widgets = rt5682s_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(rt5682s_dapm_widgets), .dapm_routes = rt5682s_dapm_routes, .num_dapm_routes = ARRAY_SIZE(rt5682s_dapm_routes), .set_sysclk = rt5682s_set_component_sysclk, .set_pll = rt5682s_set_component_pll, .set_jack = rt5682s_set_jack_detect, .use_pmdown_time = 1, .endianness = 1, }; static int rt5682s_parse_dt(struct rt5682s_priv *rt5682s, struct device *dev) { device_property_read_u32(dev, "realtek,dmic1-data-pin", &rt5682s->pdata.dmic1_data_pin); device_property_read_u32(dev, "realtek,dmic1-clk-pin", &rt5682s->pdata.dmic1_clk_pin); device_property_read_u32(dev, "realtek,jd-src", &rt5682s->pdata.jd_src); device_property_read_u32(dev, "realtek,dmic-clk-rate-hz", &rt5682s->pdata.dmic_clk_rate); device_property_read_u32(dev, "realtek,dmic-delay-ms", &rt5682s->pdata.dmic_delay); device_property_read_u32(dev, "realtek,amic-delay-ms", &rt5682s->pdata.amic_delay); device_property_read_u32(dev, "realtek,ldo-sel", &rt5682s->pdata.ldo_dacref); if (device_property_read_string_array(dev, "clock-output-names", rt5682s->pdata.dai_clk_names, RT5682S_DAI_NUM_CLKS) < 0) dev_warn(dev, "Using default DAI clk names: %s, %s\n", rt5682s->pdata.dai_clk_names[RT5682S_DAI_WCLK_IDX], rt5682s->pdata.dai_clk_names[RT5682S_DAI_BCLK_IDX]); rt5682s->pdata.dmic_clk_driving_high = device_property_read_bool(dev, "realtek,dmic-clk-driving-high"); return 0; } static void rt5682s_calibrate(struct rt5682s_priv *rt5682s) { unsigned int count, value; mutex_lock(&rt5682s->calibrate_mutex); regmap_write(rt5682s->regmap, RT5682S_PWR_ANLG_1, 0xaa80); usleep_range(15000, 20000); regmap_write(rt5682s->regmap, RT5682S_PWR_ANLG_1, 0xfa80); regmap_write(rt5682s->regmap, RT5682S_PWR_DIG_1, 0x01c0); regmap_write(rt5682s->regmap, RT5682S_MICBIAS_2, 0x0380); regmap_write(rt5682s->regmap, RT5682S_GLB_CLK, 0x8000); regmap_write(rt5682s->regmap, RT5682S_ADDA_CLK_1, 0x1001); regmap_write(rt5682s->regmap, RT5682S_CHOP_DAC_2, 0x3030); regmap_write(rt5682s->regmap, RT5682S_CHOP_ADC, 0xb000); regmap_write(rt5682s->regmap, RT5682S_STO1_ADC_MIXER, 0x686c); regmap_write(rt5682s->regmap, RT5682S_CAL_REC, 0x5151); regmap_write(rt5682s->regmap, RT5682S_HP_CALIB_CTRL_2, 0x0321); regmap_write(rt5682s->regmap, RT5682S_HP_LOGIC_CTRL_2, 0x0004); regmap_write(rt5682s->regmap, RT5682S_HP_CALIB_CTRL_1, 0x7c00); regmap_write(rt5682s->regmap, RT5682S_HP_CALIB_CTRL_1, 0xfc00); for (count = 0; count < 60; count++) { regmap_read(rt5682s->regmap, RT5682S_HP_CALIB_ST_1, &value); if (!(value & 0x8000)) break; usleep_range(10000, 10005); } if (count >= 60) dev_err(rt5682s->component->dev, "HP Calibration Failure\n"); /* restore settings */ regmap_write(rt5682s->regmap, RT5682S_MICBIAS_2, 0x0180); regmap_write(rt5682s->regmap, RT5682S_CAL_REC, 0x5858); regmap_write(rt5682s->regmap, RT5682S_STO1_ADC_MIXER, 0xc0c4); regmap_write(rt5682s->regmap, RT5682S_HP_CALIB_CTRL_2, 0x0320); regmap_write(rt5682s->regmap, RT5682S_PWR_DIG_1, 0x00c0); regmap_write(rt5682s->regmap, RT5682S_PWR_ANLG_1, 0x0800); regmap_write(rt5682s->regmap, RT5682S_GLB_CLK, 0x0000); mutex_unlock(&rt5682s->calibrate_mutex); } static const struct regmap_config rt5682s_regmap = { .reg_bits = 16, .val_bits = 16, .max_register = RT5682S_MAX_REG, .volatile_reg = rt5682s_volatile_register, .readable_reg = rt5682s_readable_register, .cache_type = REGCACHE_MAPLE, .reg_defaults = rt5682s_reg, .num_reg_defaults = ARRAY_SIZE(rt5682s_reg), .use_single_read = true, .use_single_write = true, }; static struct snd_soc_dai_driver rt5682s_dai[] = { { .name = "rt5682s-aif1", .id = RT5682S_AIF1, .playback = { .stream_name = "AIF1 Playback", .channels_min = 1, .channels_max = 2, .rates = RT5682S_STEREO_RATES, .formats = RT5682S_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 1, .channels_max = 2, .rates = RT5682S_STEREO_RATES, .formats = RT5682S_FORMATS, }, .ops = &rt5682s_aif1_dai_ops, }, { .name = "rt5682s-aif2", .id = RT5682S_AIF2, .capture = { .stream_name = "AIF2 Capture", .channels_min = 1, .channels_max = 2, .rates = RT5682S_STEREO_RATES, .formats = RT5682S_FORMATS, }, .ops = &rt5682s_aif2_dai_ops, }, }; static void rt5682s_i2c_disable_regulators(void *data) { struct rt5682s_priv *rt5682s = data; struct device *dev = regmap_get_device(rt5682s->regmap); int ret; ret = regulator_disable(rt5682s->supplies[RT5682S_SUPPLY_AVDD].consumer); if (ret) dev_err(dev, "Failed to disable supply AVDD: %d\n", ret); ret = regulator_disable(rt5682s->supplies[RT5682S_SUPPLY_DBVDD].consumer); if (ret) dev_err(dev, "Failed to disable supply DBVDD: %d\n", ret); ret = regulator_disable(rt5682s->supplies[RT5682S_SUPPLY_LDO1_IN].consumer); if (ret) dev_err(dev, "Failed to disable supply LDO1-IN: %d\n", ret); usleep_range(1000, 1500); ret = regulator_disable(rt5682s->supplies[RT5682S_SUPPLY_MICVDD].consumer); if (ret) dev_err(dev, "Failed to disable supply MICVDD: %d\n", ret); } static int rt5682s_i2c_probe(struct i2c_client *i2c) { struct rt5682s_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt5682s_priv *rt5682s; int i, ret; unsigned int val; rt5682s = devm_kzalloc(&i2c->dev, sizeof(struct rt5682s_priv), GFP_KERNEL); if (!rt5682s) return -ENOMEM; i2c_set_clientdata(i2c, rt5682s); rt5682s->pdata = i2s_default_platform_data; if (pdata) rt5682s->pdata = *pdata; else rt5682s_parse_dt(rt5682s, &i2c->dev); rt5682s->regmap = devm_regmap_init_i2c(i2c, &rt5682s_regmap); if (IS_ERR(rt5682s->regmap)) { ret = PTR_ERR(rt5682s->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(rt5682s->supplies); i++) rt5682s->supplies[i].supply = rt5682s_supply_names[i]; ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5682s->supplies), rt5682s->supplies); if (ret) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; } ret = devm_add_action_or_reset(&i2c->dev, rt5682s_i2c_disable_regulators, rt5682s); if (ret) return ret; ret = regulator_enable(rt5682s->supplies[RT5682S_SUPPLY_MICVDD].consumer); if (ret) { dev_err(&i2c->dev, "Failed to enable supply MICVDD: %d\n", ret); return ret; } usleep_range(1000, 1500); ret = regulator_enable(rt5682s->supplies[RT5682S_SUPPLY_AVDD].consumer); if (ret) { dev_err(&i2c->dev, "Failed to enable supply AVDD: %d\n", ret); return ret; } ret = regulator_enable(rt5682s->supplies[RT5682S_SUPPLY_DBVDD].consumer); if (ret) { dev_err(&i2c->dev, "Failed to enable supply DBVDD: %d\n", ret); return ret; } ret = regulator_enable(rt5682s->supplies[RT5682S_SUPPLY_LDO1_IN].consumer); if (ret) { dev_err(&i2c->dev, "Failed to enable supply LDO1-IN: %d\n", ret); return ret; } rt5682s->ldo1_en = devm_gpiod_get_optional(&i2c->dev, "realtek,ldo1-en", GPIOD_OUT_HIGH); if (IS_ERR(rt5682s->ldo1_en)) { dev_err(&i2c->dev, "Fail gpio request ldo1_en\n"); return PTR_ERR(rt5682s->ldo1_en); } /* Sleep for 50 ms minimum */ usleep_range(50000, 55000); regmap_read(rt5682s->regmap, RT5682S_DEVICE_ID, &val); if (val != DEVICE_ID) { dev_err(&i2c->dev, "Device with ID register %x is not rt5682s\n", val); return -ENODEV; } rt5682s_reset(rt5682s); rt5682s_apply_patch_list(rt5682s, &i2c->dev); regmap_update_bits(rt5682s->regmap, RT5682S_PWR_DIG_2, RT5682S_DLDO_I_LIMIT_MASK, RT5682S_DLDO_I_LIMIT_DIS); usleep_range(20000, 25000); mutex_init(&rt5682s->calibrate_mutex); mutex_init(&rt5682s->sar_mutex); mutex_init(&rt5682s->wclk_mutex); rt5682s_calibrate(rt5682s); regmap_update_bits(rt5682s->regmap, RT5682S_MICBIAS_2, RT5682S_PWR_CLK25M_MASK | RT5682S_PWR_CLK1M_MASK, RT5682S_PWR_CLK25M_PD | RT5682S_PWR_CLK1M_PU); regmap_update_bits(rt5682s->regmap, RT5682S_PWR_ANLG_1, RT5682S_PWR_BG, RT5682S_PWR_BG); regmap_update_bits(rt5682s->regmap, RT5682S_HP_LOGIC_CTRL_2, RT5682S_HP_SIG_SRC_MASK, RT5682S_HP_SIG_SRC_1BIT_CTL); regmap_update_bits(rt5682s->regmap, RT5682S_HP_CHARGE_PUMP_2, RT5682S_PM_HP_MASK, RT5682S_PM_HP_HV); regmap_update_bits(rt5682s->regmap, RT5682S_HP_AMP_DET_CTL_1, RT5682S_CP_SW_SIZE_MASK, RT5682S_CP_SW_SIZE_M); /* DMIC data pin */ switch (rt5682s->pdata.dmic1_data_pin) { case RT5682S_DMIC1_DATA_NULL: break; case RT5682S_DMIC1_DATA_GPIO2: /* share with LRCK2 */ regmap_update_bits(rt5682s->regmap, RT5682S_DMIC_CTRL_1, RT5682S_DMIC_1_DP_MASK, RT5682S_DMIC_1_DP_GPIO2); regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, RT5682S_GP2_PIN_MASK, RT5682S_GP2_PIN_DMIC_SDA); break; case RT5682S_DMIC1_DATA_GPIO5: /* share with DACDAT1 */ regmap_update_bits(rt5682s->regmap, RT5682S_DMIC_CTRL_1, RT5682S_DMIC_1_DP_MASK, RT5682S_DMIC_1_DP_GPIO5); regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, RT5682S_GP5_PIN_MASK, RT5682S_GP5_PIN_DMIC_SDA); break; default: dev_warn(&i2c->dev, "invalid DMIC_DAT pin\n"); break; } /* DMIC clk pin */ switch (rt5682s->pdata.dmic1_clk_pin) { case RT5682S_DMIC1_CLK_NULL: break; case RT5682S_DMIC1_CLK_GPIO1: /* share with IRQ */ regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, RT5682S_GP1_PIN_MASK, RT5682S_GP1_PIN_DMIC_CLK); break; case RT5682S_DMIC1_CLK_GPIO3: /* share with BCLK2 */ regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, RT5682S_GP3_PIN_MASK, RT5682S_GP3_PIN_DMIC_CLK); if (rt5682s->pdata.dmic_clk_driving_high) regmap_update_bits(rt5682s->regmap, RT5682S_PAD_DRIVING_CTRL, RT5682S_PAD_DRV_GP3_MASK, RT5682S_PAD_DRV_GP3_HIGH); break; default: dev_warn(&i2c->dev, "invalid DMIC_CLK pin\n"); break; } /* LDO output voltage control */ switch (rt5682s->pdata.ldo_dacref) { case RT5682S_LDO_1_607V: break; case RT5682S_LDO_1_5V: regmap_update_bits(rt5682s->regmap, RT5682S_BIAS_CUR_CTRL_7, RT5682S_LDO_DACREF_MASK, RT5682S_LDO_DACREF_1_5V); break; case RT5682S_LDO_1_406V: regmap_update_bits(rt5682s->regmap, RT5682S_BIAS_CUR_CTRL_7, RT5682S_LDO_DACREF_MASK, RT5682S_LDO_DACREF_1_406V); break; case RT5682S_LDO_1_731V: regmap_update_bits(rt5682s->regmap, RT5682S_BIAS_CUR_CTRL_7, RT5682S_LDO_DACREF_MASK, RT5682S_LDO_DACREF_1_731V); break; default: dev_warn(&i2c->dev, "invalid LDO output setting.\n"); break; } INIT_DELAYED_WORK(&rt5682s->jack_detect_work, rt5682s_jack_detect_handler); INIT_DELAYED_WORK(&rt5682s->jd_check_work, rt5682s_jd_check_handler); if (i2c->irq) { ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL, rt5682s_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "rt5682s", rt5682s); if (!ret) rt5682s->irq = i2c->irq; else dev_err(&i2c->dev, "Failed to request IRQ: %d\n", ret); } return devm_snd_soc_register_component(&i2c->dev, &rt5682s_soc_component_dev, rt5682s_dai, ARRAY_SIZE(rt5682s_dai)); } static void rt5682s_i2c_shutdown(struct i2c_client *client) { struct rt5682s_priv *rt5682s = i2c_get_clientdata(client); disable_irq(client->irq); cancel_delayed_work_sync(&rt5682s->jack_detect_work); cancel_delayed_work_sync(&rt5682s->jd_check_work); rt5682s_reset(rt5682s); } static void rt5682s_i2c_remove(struct i2c_client *client) { rt5682s_i2c_shutdown(client); } static const struct of_device_id rt5682s_of_match[] = { {.compatible = "realtek,rt5682s"}, {}, }; MODULE_DEVICE_TABLE(of, rt5682s_of_match); static const struct acpi_device_id rt5682s_acpi_match[] = { {"RTL5682", 0,}, {}, }; MODULE_DEVICE_TABLE(acpi, rt5682s_acpi_match); static const struct i2c_device_id rt5682s_i2c_id[] = { {"rt5682s"}, {} }; MODULE_DEVICE_TABLE(i2c, rt5682s_i2c_id); static struct i2c_driver rt5682s_i2c_driver = { .driver = { .name = "rt5682s", .of_match_table = rt5682s_of_match, .acpi_match_table = rt5682s_acpi_match, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, .probe = rt5682s_i2c_probe, .remove = rt5682s_i2c_remove, .shutdown = rt5682s_i2c_shutdown, .id_table = rt5682s_i2c_id, }; module_i2c_driver(rt5682s_i2c_driver); MODULE_DESCRIPTION("ASoC RT5682I-VS driver"); MODULE_AUTHOR("Derek Fang <[email protected]>"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0-only OR X11 */ /* * Copyright 2019 Pengutronix, Marco Felsch <[email protected]> */ #ifndef _DT_BINDINGS_DISPLAY_SDTV_STDS_H #define _DT_BINDINGS_DISPLAY_SDTV_STDS_H /* * Attention: Keep the SDTV_STD_* bit definitions in sync with * include/uapi/linux/videodev2.h V4L2_STD_* bit definitions. */ /* One bit for each standard */ #define SDTV_STD_PAL_B 0x00000001 #define SDTV_STD_PAL_B1 0x00000002 #define SDTV_STD_PAL_G 0x00000004 #define SDTV_STD_PAL_H 0x00000008 #define SDTV_STD_PAL_I 0x00000010 #define SDTV_STD_PAL_D 0x00000020 #define SDTV_STD_PAL_D1 0x00000040 #define SDTV_STD_PAL_K 0x00000080 #define SDTV_STD_PAL (SDTV_STD_PAL_B | \ SDTV_STD_PAL_B1 | \ SDTV_STD_PAL_G | \ SDTV_STD_PAL_H | \ SDTV_STD_PAL_I | \ SDTV_STD_PAL_D | \ SDTV_STD_PAL_D1 | \ SDTV_STD_PAL_K) #define SDTV_STD_PAL_M 0x00000100 #define SDTV_STD_PAL_N 0x00000200 #define SDTV_STD_PAL_Nc 0x00000400 #define SDTV_STD_PAL_60 0x00000800 #define SDTV_STD_NTSC_M 0x00001000 /* BTSC */ #define SDTV_STD_NTSC_M_JP 0x00002000 /* EIA-J */ #define SDTV_STD_NTSC_443 0x00004000 #define SDTV_STD_NTSC_M_KR 0x00008000 /* FM A2 */ #define SDTV_STD_NTSC (SDTV_STD_NTSC_M | \ SDTV_STD_NTSC_M_JP | \ SDTV_STD_NTSC_M_KR) #define SDTV_STD_SECAM_B 0x00010000 #define SDTV_STD_SECAM_D 0x00020000 #define SDTV_STD_SECAM_G 0x00040000 #define SDTV_STD_SECAM_H 0x00080000 #define SDTV_STD_SECAM_K 0x00100000 #define SDTV_STD_SECAM_K1 0x00200000 #define SDTV_STD_SECAM_L 0x00400000 #define SDTV_STD_SECAM_LC 0x00800000 #define SDTV_STD_SECAM (SDTV_STD_SECAM_B | \ SDTV_STD_SECAM_D | \ SDTV_STD_SECAM_G | \ SDTV_STD_SECAM_H | \ SDTV_STD_SECAM_K | \ SDTV_STD_SECAM_K1 | \ SDTV_STD_SECAM_L | \ SDTV_STD_SECAM_LC) /* Standards for Countries with 60Hz Line frequency */ #define SDTV_STD_525_60 (SDTV_STD_PAL_M | \ SDTV_STD_PAL_60 | \ SDTV_STD_NTSC | \ SDTV_STD_NTSC_443) /* Standards for Countries with 50Hz Line frequency */ #define SDTV_STD_625_50 (SDTV_STD_PAL | \ SDTV_STD_PAL_N | \ SDTV_STD_PAL_Nc | \ SDTV_STD_SECAM) #endif /* _DT_BINDINGS_DISPLAY_SDTV_STDS_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011 Jonathan Cameron * * A reference industrial I/O driver to illustrate the functionality available. * * There are numerous real drivers to illustrate the finer points. * The purpose of this driver is to provide a driver with far more comments * and explanatory notes than any 'real' driver would have. * Anyone starting out writing an IIO driver should first make sure they * understand all of this driver except those bits specifically marked * as being present to allow us to 'fake' the presence of hardware. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/events.h> #include <linux/iio/buffer.h> #include <linux/iio/sw_device.h> #include "iio_simple_dummy.h" static const struct config_item_type iio_dummy_type = { .ct_owner = THIS_MODULE, }; /** * struct iio_dummy_accel_calibscale - realworld to register mapping * @val: first value in read_raw - here integer part. * @val2: second value in read_raw etc - here micro part. * @regval: register value - magic device specific numbers. */ struct iio_dummy_accel_calibscale { int val; int val2; int regval; /* what would be written to hardware */ }; static const struct iio_dummy_accel_calibscale dummy_scales[] = { { 0, 100, 0x8 }, /* 0.000100 */ { 0, 133, 0x7 }, /* 0.000133 */ { 733, 13, 0x9 }, /* 733.000013 */ }; #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS /* * simple event - triggered when value rises above * a threshold */ static const struct iio_event_spec iio_dummy_event = { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }; /* * simple step detect event - triggered when a step is detected */ static const struct iio_event_spec step_detect_event = { .type = IIO_EV_TYPE_CHANGE, .dir = IIO_EV_DIR_NONE, .mask_separate = BIT(IIO_EV_INFO_ENABLE), }; /* * simple transition event - triggered when the reported running confidence * value rises above a threshold value */ static const struct iio_event_spec iio_running_event = { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }; /* * simple transition event - triggered when the reported walking confidence * value falls under a threshold value */ static const struct iio_event_spec iio_walking_event = { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_FALLING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }; #endif /* * iio_dummy_channels - Description of available channels * * This array of structures tells the IIO core about what the device * actually provides for a given channel. */ static const struct iio_chan_spec iio_dummy_channels[] = { /* indexed ADC channel in_voltage0_raw etc */ { .type = IIO_VOLTAGE, /* Channel has a numeric index of 0 */ .indexed = 1, .channel = 0, /* What other information is available? */ .info_mask_separate = /* * in_voltage0_raw * Raw (unscaled no bias removal etc) measurement * from the device. */ BIT(IIO_CHAN_INFO_RAW) | /* * in_voltage0_offset * Offset for userspace to apply prior to scale * when converting to standard units (microvolts) */ BIT(IIO_CHAN_INFO_OFFSET) | /* * in_voltage0_scale * Multipler for userspace to apply post offset * when converting to standard units (microvolts) */ BIT(IIO_CHAN_INFO_SCALE), /* * sampling_frequency * The frequency in Hz at which the channels are sampled */ .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), /* The ordering of elements in the buffer via an enum */ .scan_index = DUMMY_INDEX_VOLTAGE_0, .scan_type = { /* Description of storage in buffer */ .sign = 'u', /* unsigned */ .realbits = 13, /* 13 bits */ .storagebits = 16, /* 16 bits used for storage */ .shift = 0, /* zero shift */ }, #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS .event_spec = &iio_dummy_event, .num_event_specs = 1, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }, /* Differential ADC channel in_voltage1-voltage2_raw etc*/ { .type = IIO_VOLTAGE, .differential = 1, /* * Indexing for differential channels uses channel * for the positive part, channel2 for the negative. */ .indexed = 1, .channel = 1, .channel2 = 2, /* * in_voltage1-voltage2_raw * Raw (unscaled no bias removal etc) measurement * from the device. */ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), /* * in_voltage-voltage_scale * Shared version of scale - shared by differential * input channels of type IIO_VOLTAGE. */ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), /* * sampling_frequency * The frequency in Hz at which the channels are sampled */ .scan_index = DUMMY_INDEX_DIFFVOLTAGE_1M2, .scan_type = { /* Description of storage in buffer */ .sign = 's', /* signed */ .realbits = 12, /* 12 bits */ .storagebits = 16, /* 16 bits used for storage */ .shift = 0, /* zero shift */ }, }, /* Differential ADC channel in_voltage3-voltage4_raw etc*/ { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 3, .channel2 = 4, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_index = DUMMY_INDEX_DIFFVOLTAGE_3M4, .scan_type = { .sign = 's', .realbits = 11, .storagebits = 16, .shift = 0, }, }, /* * 'modified' (i.e. axis specified) acceleration channel * in_accel_z_raw */ { .type = IIO_ACCEL, .modified = 1, /* Channel 2 is use for modifiers */ .channel2 = IIO_MOD_X, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | /* * Internal bias and gain correction values. Applied * by the hardware or driver prior to userspace * seeing the readings. Typically part of hardware * calibration. */ BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_CALIBBIAS), .info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_index = DUMMY_INDEX_ACCELX, .scan_type = { /* Description of storage in buffer */ .sign = 's', /* signed */ .realbits = 16, /* 16 bits */ .storagebits = 16, /* 16 bits used for storage */ .shift = 0, /* zero shift */ }, }, /* * Convenience macro for timestamps. 4 is the index in * the buffer. */ IIO_CHAN_SOFT_TIMESTAMP(4), /* DAC channel out_voltage0_raw */ { .type = IIO_VOLTAGE, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .scan_index = -1, /* No buffer support */ .output = 1, .indexed = 1, .channel = 0, }, { .type = IIO_STEPS, .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_CALIBHEIGHT), .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .scan_index = -1, /* No buffer support */ #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS .event_spec = &step_detect_event, .num_event_specs = 1, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }, { .type = IIO_ACTIVITY, .modified = 1, .channel2 = IIO_MOD_RUNNING, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .scan_index = -1, /* No buffer support */ #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS .event_spec = &iio_running_event, .num_event_specs = 1, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }, { .type = IIO_ACTIVITY, .modified = 1, .channel2 = IIO_MOD_WALKING, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .scan_index = -1, /* No buffer support */ #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS .event_spec = &iio_walking_event, .num_event_specs = 1, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }, }; /** * iio_dummy_read_raw() - data read function. * @indio_dev: the struct iio_dev associated with this device instance * @chan: the channel whose data is to be read * @val: first element of returned value (typically INT) * @val2: second element of returned value (typically MICRO) * @mask: what we actually want to read as per the info_mask_* * in iio_chan_spec. */ static int iio_dummy_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct iio_dummy_state *st = iio_priv(indio_dev); switch (mask) { case IIO_CHAN_INFO_RAW: /* magic value - channel value read */ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { guard(mutex)(&st->lock); switch (chan->type) { case IIO_VOLTAGE: if (chan->output) { /* Set integer part to cached value */ *val = st->dac_val; return IIO_VAL_INT; } else if (chan->differential) { if (chan->channel == 1) *val = st->differential_adc_val[0]; else *val = st->differential_adc_val[1]; return IIO_VAL_INT; } else { *val = st->single_ended_adc_val; return IIO_VAL_INT; } case IIO_ACCEL: *val = st->accel_val; return IIO_VAL_INT; default: return -EINVAL; } } unreachable(); case IIO_CHAN_INFO_PROCESSED: iio_device_claim_direct_scoped(return -EBUSY, indio_dev) { guard(mutex)(&st->lock); switch (chan->type) { case IIO_STEPS: *val = st->steps; return IIO_VAL_INT; case IIO_ACTIVITY: switch (chan->channel2) { case IIO_MOD_RUNNING: *val = st->activity_running; return IIO_VAL_INT; case IIO_MOD_WALKING: *val = st->activity_walking; return IIO_VAL_INT; default: return -EINVAL; } default: return -EINVAL; } } unreachable(); case IIO_CHAN_INFO_OFFSET: /* only single ended adc -> 7 */ *val = 7; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: switch (chan->differential) { case 0: /* only single ended adc -> 0.001333 */ *val = 0; *val2 = 1333; return IIO_VAL_INT_PLUS_MICRO; case 1: /* all differential adc -> 0.000001344 */ *val = 0; *val2 = 1344; return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } default: return -EINVAL; } case IIO_CHAN_INFO_CALIBBIAS: { guard(mutex)(&st->lock); /* only the acceleration axis - read from cache */ *val = st->accel_calibbias; return IIO_VAL_INT; } case IIO_CHAN_INFO_CALIBSCALE: { guard(mutex)(&st->lock); *val = st->accel_calibscale->val; *val2 = st->accel_calibscale->val2; return IIO_VAL_INT_PLUS_MICRO; } case IIO_CHAN_INFO_SAMP_FREQ: *val = 3; *val2 = 33; return IIO_VAL_INT_PLUS_NANO; case IIO_CHAN_INFO_ENABLE: { guard(mutex)(&st->lock); switch (chan->type) { case IIO_STEPS: *val = st->steps_enabled; return IIO_VAL_INT; default: return -EINVAL; } } case IIO_CHAN_INFO_CALIBHEIGHT: { guard(mutex)(&st->lock); switch (chan->type) { case IIO_STEPS: *val = st->height; return IIO_VAL_INT; default: return -EINVAL; } } default: return -EINVAL; } } /** * iio_dummy_write_raw() - data write function. * @indio_dev: the struct iio_dev associated with this device instance * @chan: the channel whose data is to be written * @val: first element of value to set (typically INT) * @val2: second element of value to set (typically MICRO) * @mask: what we actually want to write as per the info_mask_* * in iio_chan_spec. * * Note that all raw writes are assumed IIO_VAL_INT and info mask elements * are assumed to be IIO_INT_PLUS_MICRO unless the callback write_raw_get_fmt * in struct iio_info is provided by the driver. */ static int iio_dummy_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int i; struct iio_dummy_state *st = iio_priv(indio_dev); switch (mask) { case IIO_CHAN_INFO_RAW: switch (chan->type) { case IIO_VOLTAGE: if (chan->output == 0) return -EINVAL; scoped_guard(mutex, &st->lock) { /* Locking not required as writing single value */ st->dac_val = val; } return 0; default: return -EINVAL; } case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_STEPS: scoped_guard(mutex, &st->lock) { st->steps = val; } return 0; case IIO_ACTIVITY: if (val < 0) val = 0; if (val > 100) val = 100; switch (chan->channel2) { case IIO_MOD_RUNNING: st->activity_running = val; return 0; case IIO_MOD_WALKING: st->activity_walking = val; return 0; default: return -EINVAL; } break; default: return -EINVAL; } case IIO_CHAN_INFO_CALIBSCALE: { guard(mutex)(&st->lock); /* Compare against table - hard matching here */ for (i = 0; i < ARRAY_SIZE(dummy_scales); i++) if (val == dummy_scales[i].val && val2 == dummy_scales[i].val2) break; if (i == ARRAY_SIZE(dummy_scales)) return -EINVAL; st->accel_calibscale = &dummy_scales[i]; return 0; } case IIO_CHAN_INFO_CALIBBIAS: scoped_guard(mutex, &st->lock) { st->accel_calibbias = val; } return 0; case IIO_CHAN_INFO_ENABLE: switch (chan->type) { case IIO_STEPS: scoped_guard(mutex, &st->lock) { st->steps_enabled = val; } return 0; default: return -EINVAL; } case IIO_CHAN_INFO_CALIBHEIGHT: switch (chan->type) { case IIO_STEPS: st->height = val; return 0; default: return -EINVAL; } default: return -EINVAL; } } /* * Device type specific information. */ static const struct iio_info iio_dummy_info = { .read_raw = &iio_dummy_read_raw, .write_raw = &iio_dummy_write_raw, #ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS .read_event_config = &iio_simple_dummy_read_event_config, .write_event_config = &iio_simple_dummy_write_event_config, .read_event_value = &iio_simple_dummy_read_event_value, .write_event_value = &iio_simple_dummy_write_event_value, #endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */ }; /** * iio_dummy_init_device() - device instance specific init * @indio_dev: the iio device structure * * Most drivers have one of these to set up default values, * reset the device to known state etc. */ static int iio_dummy_init_device(struct iio_dev *indio_dev) { struct iio_dummy_state *st = iio_priv(indio_dev); st->dac_val = 0; st->single_ended_adc_val = 73; st->differential_adc_val[0] = 33; st->differential_adc_val[1] = -34; st->accel_val = 34; st->accel_calibbias = -7; st->accel_calibscale = &dummy_scales[0]; st->steps = 47; st->activity_running = 98; st->activity_walking = 4; return 0; } /** * iio_dummy_probe() - device instance probe * @name: name of this instance. * * Arguments are bus type specific. * I2C: iio_dummy_probe(struct i2c_client *client, * const struct i2c_device_id *id) * SPI: iio_dummy_probe(struct spi_device *spi) */ static struct iio_sw_device *iio_dummy_probe(const char *name) { int ret; struct iio_dev *indio_dev; struct iio_dummy_state *st; struct iio_sw_device *swd; struct device *parent = NULL; /* * With hardware: Set the parent device. * parent = &spi->dev; * parent = &client->dev; */ swd = kzalloc(sizeof(*swd), GFP_KERNEL); if (!swd) return ERR_PTR(-ENOMEM); /* * Allocate an IIO device. * * This structure contains all generic state * information about the device instance. * It also has a region (accessed by iio_priv() * for chip specific state information. */ indio_dev = iio_device_alloc(parent, sizeof(*st)); if (!indio_dev) { ret = -ENOMEM; goto error_free_swd; } st = iio_priv(indio_dev); mutex_init(&st->lock); iio_dummy_init_device(indio_dev); /* * Make the iio_dev struct available to remove function. * Bus equivalents * i2c_set_clientdata(client, indio_dev); * spi_set_drvdata(spi, indio_dev); */ swd->device = indio_dev; /* * Set the device name. * * This is typically a part number and obtained from the module * id table. * e.g. for i2c and spi: * indio_dev->name = id->name; * indio_dev->name = spi_get_device_id(spi)->name; */ indio_dev->name = kstrdup(name, GFP_KERNEL); if (!indio_dev->name) { ret = -ENOMEM; goto error_free_device; } /* Provide description of available channels */ indio_dev->channels = iio_dummy_channels; indio_dev->num_channels = ARRAY_SIZE(iio_dummy_channels); /* * Provide device type specific interface functions and * constant data. */ indio_dev->info = &iio_dummy_info; /* Specify that device provides sysfs type interfaces */ indio_dev->modes = INDIO_DIRECT_MODE; ret = iio_simple_dummy_events_register(indio_dev); if (ret < 0) goto error_free_name; ret = iio_simple_dummy_configure_buffer(indio_dev); if (ret < 0) goto error_unregister_events; ret = iio_device_register(indio_dev); if (ret < 0) goto error_unconfigure_buffer; iio_swd_group_init_type_name(swd, name, &iio_dummy_type); return swd; error_unconfigure_buffer: iio_simple_dummy_unconfigure_buffer(indio_dev); error_unregister_events: iio_simple_dummy_events_unregister(indio_dev); error_free_name: kfree(indio_dev->name); error_free_device: iio_device_free(indio_dev); error_free_swd: kfree(swd); return ERR_PTR(ret); } /** * iio_dummy_remove() - device instance removal function * @swd: pointer to software IIO device abstraction * * Parameters follow those of iio_dummy_probe for buses. */ static int iio_dummy_remove(struct iio_sw_device *swd) { /* * Get a pointer to the device instance iio_dev structure * from the bus subsystem. E.g. * struct iio_dev *indio_dev = i2c_get_clientdata(client); * struct iio_dev *indio_dev = spi_get_drvdata(spi); */ struct iio_dev *indio_dev = swd->device; /* Unregister the device */ iio_device_unregister(indio_dev); /* Device specific code to power down etc */ /* Buffered capture related cleanup */ iio_simple_dummy_unconfigure_buffer(indio_dev); iio_simple_dummy_events_unregister(indio_dev); /* Free all structures */ kfree(indio_dev->name); iio_device_free(indio_dev); return 0; } /* * module_iio_sw_device_driver() - device driver registration * * Varies depending on bus type of the device. As there is no device * here, call probe directly. For information on device registration * i2c: * Documentation/i2c/writing-clients.rst * spi: * Documentation/spi/spi-summary.rst */ static const struct iio_sw_device_ops iio_dummy_device_ops = { .probe = iio_dummy_probe, .remove = iio_dummy_remove, }; static struct iio_sw_device_type iio_dummy_device = { .name = "dummy", .owner = THIS_MODULE, .ops = &iio_dummy_device_ops, }; module_iio_sw_device_driver(iio_dummy_device); MODULE_AUTHOR("Jonathan Cameron <[email protected]>"); MODULE_DESCRIPTION("IIO dummy driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0+ /* * Amlogic Meson-AXG Clock Controller Driver * * Copyright (c) 2016 Baylibre SAS. * Author: Michael Turquette <[email protected]> * * Copyright (c) 2018 Amlogic, inc. * Author: Qiufang Dai <[email protected]> */ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include "meson-aoclk.h" #include "clk-regmap.h" #include "clk-dualdiv.h" #include <dt-bindings/clock/axg-aoclkc.h> #include <dt-bindings/reset/axg-aoclkc.h> /* * AO Configuration Clock registers offsets * Register offsets from the data sheet must be multiplied by 4. */ #define AO_RTI_PWR_CNTL_REG1 0x0C #define AO_RTI_PWR_CNTL_REG0 0x10 #define AO_RTI_GEN_CNTL_REG0 0x40 #define AO_OSCIN_CNTL 0x58 #define AO_CRT_CLK_CNTL1 0x68 #define AO_SAR_CLK 0x90 #define AO_RTC_ALT_CLK_CNTL0 0x94 #define AO_RTC_ALT_CLK_CNTL1 0x98 #define AXG_AO_GATE(_name, _bit) \ static struct clk_regmap axg_aoclk_##_name = { \ .data = &(struct clk_regmap_gate_data) { \ .offset = (AO_RTI_GEN_CNTL_REG0), \ .bit_idx = (_bit), \ }, \ .hw.init = &(struct clk_init_data) { \ .name = "axg_ao_" #_name, \ .ops = &clk_regmap_gate_ops, \ .parent_data = &(const struct clk_parent_data) { \ .fw_name = "mpeg-clk", \ }, \ .num_parents = 1, \ .flags = CLK_IGNORE_UNUSED, \ }, \ } AXG_AO_GATE(remote, 0); AXG_AO_GATE(i2c_master, 1); AXG_AO_GATE(i2c_slave, 2); AXG_AO_GATE(uart1, 3); AXG_AO_GATE(uart2, 5); AXG_AO_GATE(ir_blaster, 6); AXG_AO_GATE(saradc, 7); static struct clk_regmap axg_aoclk_cts_oscin = { .data = &(struct clk_regmap_gate_data){ .offset = AO_RTI_PWR_CNTL_REG0, .bit_idx = 14, }, .hw.init = &(struct clk_init_data){ .name = "cts_oscin", .ops = &clk_regmap_gate_ro_ops, .parent_data = &(const struct clk_parent_data) { .fw_name = "xtal", }, .num_parents = 1, }, }; static struct clk_regmap axg_aoclk_32k_pre = { .data = &(struct clk_regmap_gate_data){ .offset = AO_RTC_ALT_CLK_CNTL0, .bit_idx = 31, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_32k_pre", .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_cts_oscin.hw }, .num_parents = 1, }, }; static const struct meson_clk_dualdiv_param axg_32k_div_table[] = { { .dual = 1, .n1 = 733, .m1 = 8, .n2 = 732, .m2 = 11, }, {} }; static struct clk_regmap axg_aoclk_32k_div = { .data = &(struct meson_clk_dualdiv_data){ .n1 = { .reg_off = AO_RTC_ALT_CLK_CNTL0, .shift = 0, .width = 12, }, .n2 = { .reg_off = AO_RTC_ALT_CLK_CNTL0, .shift = 12, .width = 12, }, .m1 = { .reg_off = AO_RTC_ALT_CLK_CNTL1, .shift = 0, .width = 12, }, .m2 = { .reg_off = AO_RTC_ALT_CLK_CNTL1, .shift = 12, .width = 12, }, .dual = { .reg_off = AO_RTC_ALT_CLK_CNTL0, .shift = 28, .width = 1, }, .table = axg_32k_div_table, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_32k_div", .ops = &meson_clk_dualdiv_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_32k_pre.hw }, .num_parents = 1, }, }; static struct clk_regmap axg_aoclk_32k_sel = { .data = &(struct clk_regmap_mux_data) { .offset = AO_RTC_ALT_CLK_CNTL1, .mask = 0x1, .shift = 24, .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_32k_sel", .ops = &clk_regmap_mux_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_32k_div.hw, &axg_aoclk_32k_pre.hw, }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_regmap axg_aoclk_32k = { .data = &(struct clk_regmap_gate_data){ .offset = AO_RTC_ALT_CLK_CNTL0, .bit_idx = 30, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_32k", .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_32k_sel.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_regmap axg_aoclk_cts_rtc_oscin = { .data = &(struct clk_regmap_mux_data) { .offset = AO_RTI_PWR_CNTL_REG0, .mask = 0x1, .shift = 10, .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_cts_rtc_oscin", .ops = &clk_regmap_mux_ops, .parent_data = (const struct clk_parent_data []) { { .hw = &axg_aoclk_32k.hw }, { .fw_name = "ext_32k-0", }, }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_regmap axg_aoclk_clk81 = { .data = &(struct clk_regmap_mux_data) { .offset = AO_RTI_PWR_CNTL_REG0, .mask = 0x1, .shift = 8, .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_clk81", .ops = &clk_regmap_mux_ro_ops, .parent_data = (const struct clk_parent_data []) { { .fw_name = "mpeg-clk", }, { .hw = &axg_aoclk_cts_rtc_oscin.hw }, }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_regmap axg_aoclk_saradc_mux = { .data = &(struct clk_regmap_mux_data) { .offset = AO_SAR_CLK, .mask = 0x3, .shift = 9, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_saradc_mux", .ops = &clk_regmap_mux_ops, .parent_data = (const struct clk_parent_data []) { { .fw_name = "xtal", }, { .hw = &axg_aoclk_clk81.hw }, }, .num_parents = 2, }, }; static struct clk_regmap axg_aoclk_saradc_div = { .data = &(struct clk_regmap_div_data) { .offset = AO_SAR_CLK, .shift = 0, .width = 8, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_saradc_div", .ops = &clk_regmap_divider_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_saradc_mux.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; static struct clk_regmap axg_aoclk_saradc_gate = { .data = &(struct clk_regmap_gate_data) { .offset = AO_SAR_CLK, .bit_idx = 8, }, .hw.init = &(struct clk_init_data){ .name = "axg_ao_saradc_gate", .ops = &clk_regmap_gate_ops, .parent_hws = (const struct clk_hw *[]) { &axg_aoclk_saradc_div.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; static const unsigned int axg_aoclk_reset[] = { [RESET_AO_REMOTE] = 16, [RESET_AO_I2C_MASTER] = 18, [RESET_AO_I2C_SLAVE] = 19, [RESET_AO_UART1] = 17, [RESET_AO_UART2] = 22, [RESET_AO_IR_BLASTER] = 23, }; static struct clk_regmap *axg_aoclk_regmap[] = { &axg_aoclk_remote, &axg_aoclk_i2c_master, &axg_aoclk_i2c_slave, &axg_aoclk_uart1, &axg_aoclk_uart2, &axg_aoclk_ir_blaster, &axg_aoclk_saradc, &axg_aoclk_cts_oscin, &axg_aoclk_32k_pre, &axg_aoclk_32k_div, &axg_aoclk_32k_sel, &axg_aoclk_32k, &axg_aoclk_cts_rtc_oscin, &axg_aoclk_clk81, &axg_aoclk_saradc_mux, &axg_aoclk_saradc_div, &axg_aoclk_saradc_gate, }; static struct clk_hw *axg_aoclk_hw_clks[] = { [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw, [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw, [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw, [CLKID_AO_UART1] = &axg_aoclk_uart1.hw, [CLKID_AO_UART2] = &axg_aoclk_uart2.hw, [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw, [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw, [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw, [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw, [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw, [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw, [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw, [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw, [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw, [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw, [CLKID_AO_32K] = &axg_aoclk_32k.hw, [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw, }; static const struct meson_aoclk_data axg_aoclkc_data = { .reset_reg = AO_RTI_GEN_CNTL_REG0, .num_reset = ARRAY_SIZE(axg_aoclk_reset), .reset = axg_aoclk_reset, .num_clks = ARRAY_SIZE(axg_aoclk_regmap), .clks = axg_aoclk_regmap, .hw_clks = { .hws = axg_aoclk_hw_clks, .num = ARRAY_SIZE(axg_aoclk_hw_clks), }, }; static const struct of_device_id axg_aoclkc_match_table[] = { { .compatible = "amlogic,meson-axg-aoclkc", .data = &axg_aoclkc_data, }, { } }; MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table); static struct platform_driver axg_aoclkc_driver = { .probe = meson_aoclkc_probe, .driver = { .name = "axg-aoclkc", .of_match_table = axg_aoclkc_match_table, }, }; module_platform_driver(axg_aoclkc_driver); MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("CLK_MESON");
/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _VKMS_DRV_H_ #define _VKMS_DRV_H_ #include <linux/hrtimer.h> #include <drm/drm.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_encoder.h> #include <drm/drm_writeback.h> #define XRES_MIN 10 #define YRES_MIN 10 #define XRES_DEF 1024 #define YRES_DEF 768 #define XRES_MAX 8192 #define YRES_MAX 8192 #define NUM_OVERLAY_PLANES 8 #define VKMS_LUT_SIZE 256 /** * struct vkms_frame_info - Structure to store the state of a frame * * @fb: backing drm framebuffer * @src: source rectangle of this frame in the source framebuffer, stored in 16.16 fixed-point form * @dst: destination rectangle in the crtc buffer, stored in whole pixel units * @map: see @drm_shadow_plane_state.data * @rotation: rotation applied to the source. * * @src and @dst should have the same size modulo the rotation. */ struct vkms_frame_info { struct drm_framebuffer *fb; struct drm_rect src, dst; struct drm_rect rotated; struct iosys_map map[DRM_FORMAT_MAX_PLANES]; unsigned int rotation; unsigned int offset; unsigned int pitch; unsigned int cpp; }; struct pixel_argb_u16 { u16 a, r, g, b; }; struct line_buffer { size_t n_pixels; struct pixel_argb_u16 *pixels; }; struct vkms_writeback_job { struct iosys_map data[DRM_FORMAT_MAX_PLANES]; struct vkms_frame_info wb_frame_info; void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel); }; /** * struct vkms_plane_state - Driver specific plane state * @base: base plane state * @frame_info: data required for composing computation * @pixel_read: function to read a pixel in this plane. The creator of a struct vkms_plane_state * must ensure that this pointer is valid */ struct vkms_plane_state { struct drm_shadow_plane_state base; struct vkms_frame_info *frame_info; void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel); }; struct vkms_plane { struct drm_plane base; }; struct vkms_color_lut { struct drm_color_lut *base; size_t lut_length; s64 channel_value2index_ratio; }; /** * struct vkms_crtc_state - Driver specific CRTC state * * @base: base CRTC state * @composer_work: work struct to compose and add CRC entries * * @num_active_planes: Number of active planes * @active_planes: List containing all the active planes (counted by * @num_active_planes). They should be stored in z-order. * @active_writeback: Current active writeback job * @gamma_lut: Look up table for gamma used in this CRTC * @crc_pending: Protected by @vkms_output.composer_lock, true when the frame CRC is not computed * yet. Used by vblank to detect if the composer is too slow. * @wb_pending: Protected by @vkms_output.composer_lock, true when a writeback frame is requested. * @frame_start: Protected by @vkms_output.composer_lock, saves the frame number before the start * of the composition process. * @frame_end: Protected by @vkms_output.composer_lock, saves the last requested frame number. * This is used to generate enough CRC entries when the composition worker is too slow. */ struct vkms_crtc_state { struct drm_crtc_state base; struct work_struct composer_work; int num_active_planes; struct vkms_plane_state **active_planes; struct vkms_writeback_job *active_writeback; struct vkms_color_lut gamma_lut; bool crc_pending; bool wb_pending; u64 frame_start; u64 frame_end; }; /** * struct vkms_output - Internal representation of all output components in VKMS * * @crtc: Base CRTC in DRM * @encoder: DRM encoder used for this output * @connector: DRM connector used for this output * @wb_connecter: DRM writeback connector used for this output * @vblank_hrtimer: Timer used to trigger the vblank * @period_ns: vblank period, in nanoseconds, used to configure @vblank_hrtimer and to compute * vblank timestamps * @composer_workq: Ordered workqueue for @composer_state.composer_work. * @lock: Lock used to protect concurrent access to the composer * @composer_enabled: Protected by @lock, true when the VKMS composer is active (crc needed or * writeback) * @composer_state: Protected by @lock, current state of this VKMS output * @composer_lock: Lock used internally to protect @composer_state members */ struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; struct drm_writeback_connector wb_connector; struct hrtimer vblank_hrtimer; ktime_t period_ns; struct workqueue_struct *composer_workq; spinlock_t lock; bool composer_enabled; struct vkms_crtc_state *composer_state; spinlock_t composer_lock; }; /** * struct vkms_config - General configuration for VKMS driver * * @writeback: If true, a writeback buffer can be attached to the CRTC * @cursor: If true, a cursor plane is created in the VKMS device * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { bool writeback; bool cursor; bool overlay; struct vkms_device *dev; }; /** * struct vkms_device - Description of a VKMS device * * @drm - Base device in DRM * @platform - Associated platform device * @output - Configuration and sub-components of the VKMS device * @config: Configuration used in this VKMS device */ struct vkms_device { struct drm_device drm; struct platform_device *platform; struct vkms_output output; const struct vkms_config *config; }; /* * The following helpers are used to convert a member of a struct into its parent. */ #define drm_crtc_to_vkms_output(target) \ container_of(target, struct vkms_output, crtc) #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm) #define to_vkms_crtc_state(target)\ container_of(target, struct vkms_crtc_state, base) #define to_vkms_plane_state(target)\ container_of(target, struct vkms_plane_state, base.base) /** * vkms_crtc_init() - Initialize a CRTC for VKMS * @dev: DRM device associated with the VKMS buffer * @crtc: uninitialized CRTC device * @primary: primary plane to attach to the CRTC * @cursor: plane to attach to the CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); /** * vkms_output_init() - Initialize all sub-components needed for a VKMS device. * * @vkmsdev: VKMS device to initialize * @index: CRTC which can be attached to the planes. The caller must ensure that * @index is positive and less or equals to 31. */ int vkms_output_init(struct vkms_device *vkmsdev, int index); /** * vkms_plane_init() - Initialize a plane * * @vkmsdev: VKMS device containing the plane * @type: type of plane to initialize * @index: CRTC which can be attached to the plane. The caller must ensure that * @index is positive and less or equals to 31. */ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, enum drm_plane_type type, int index); /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, size_t *count); int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name); int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt); /* Composer Support */ void vkms_composer_worker(struct work_struct *work); void vkms_set_composer(struct vkms_output *out, bool enabled); void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y); void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y); /* Writeback */ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev); #endif /* _VKMS_DRV_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SPS30_H #define _SPS30_H #include <linux/types.h> struct sps30_state; struct sps30_ops { int (*start_meas)(struct sps30_state *state); int (*stop_meas)(struct sps30_state *state); int (*read_meas)(struct sps30_state *state, __be32 *meas, size_t num); int (*reset)(struct sps30_state *state); int (*clean_fan)(struct sps30_state *state); int (*read_cleaning_period)(struct sps30_state *state, __be32 *period); int (*write_cleaning_period)(struct sps30_state *state, __be32 period); int (*show_info)(struct sps30_state *state); }; struct sps30_state { /* serialize access to the device */ struct mutex lock; struct device *dev; int state; /* * priv pointer is solely for serdev driver private data. We keep it * here because driver_data inside dev has been already used for iio and * struct serdev_device doesn't have one. */ void *priv; const struct sps30_ops *ops; }; int sps30_probe(struct device *dev, const char *name, void *priv, const struct sps30_ops *ops); #endif