code
stringlengths 0
23.9M
|
---|
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* at91-sama5d3_eds.dts - Device Tree file for the SAMA5D3 Ethernet
* Development System board.
*
* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
*
* Author: Jerry Ray <[email protected]>
*/
/dts-v1/;
#include "sama5d36.dtsi"
/ {
model = "SAMA5D3 Ethernet Development System";
compatible = "microchip,sama5d3-eds", "atmel,sama5d36",
"atmel,sama5d3", "atmel,sama5";
chosen {
stdout-path = "serial0:115200n8";
};
gpio-keys {
compatible = "gpio-keys";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_key_gpio>;
button-3 {
label = "PB_USER";
gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
linux,code = <0x104>;
wakeup-source;
};
};
memory@20000000 {
reg = <0x20000000 0x10000000>;
};
vcc_3v3_reg: regulator-1 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vcc_2v5_reg: regulator-2 {
compatible = "regulator-fixed";
regulator-name = "VCC_2V5";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
vin-supply = <&vcc_3v3_reg>;
};
vcc_1v8_reg: regulator-3 {
compatible = "regulator-fixed";
regulator-name = "VCC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
vin-supply = <&vcc_3v3_reg>;
};
vcc_1v2_reg: regulator-4 {
compatible = "regulator-fixed";
regulator-name = "VCC_1V2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
};
vcc_mmc0_reg: regulator-5 {
compatible = "regulator-fixed";
regulator-name = "mmc0-card-supply";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_vcc_mmc0_reg_gpio>;
gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
};
};
&can0 {
status = "okay";
};
&dbgu {
status = "okay";
};
&ebi {
pinctrl-0 = <&pinctrl_ebi_nand_addr>;
pinctrl-names = "default";
status = "okay";
nand_controller: nand-controller {
status = "okay";
nand@3 {
reg = <0x3 0x0 0x2>;
atmel,rb = <0>;
nand-bus-width = <8>;
nand-ecc-mode = "hw";
nand-ecc-strength = <4>;
nand-ecc-step-size = <512>;
nand-on-flash-bbt;
label = "atmel_nand";
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
at91bootstrap@0 {
label = "at91bootstrap";
reg = <0x0 0x40000>;
};
bootloader@40000 {
label = "bootloader";
reg = <0x40000 0xc0000>;
};
bootloaderenvred@100000 {
label = "bootloader env redundant";
reg = <0x100000 0x40000>;
};
bootloaderenv@140000 {
label = "bootloader env";
reg = <0x140000 0x40000>;
};
dtb@180000 {
label = "device tree";
reg = <0x180000 0x80000>;
};
kernel@200000 {
label = "kernel";
reg = <0x200000 0x600000>;
};
rootfs@800000 {
label = "rootfs";
reg = <0x800000 0x0f800000>;
};
};
};
};
};
&i2c0 {
pinctrl-0 = <&pinctrl_i2c0_pu>;
status = "okay";
};
&i2c1 {
status = "okay";
};
&i2c2 {
pinctrl-0 = <&pinctrl_i2c2_pu>;
status = "okay";
};
&main_xtal {
clock-frequency = <12000000>;
};
&mmc0 {
pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3
&pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>;
vmmc-supply = <&vcc_mmc0_reg>;
vqmmc-supply = <&vcc_3v3_reg>;
status = "okay";
slot@0 {
reg = <0>;
bus-width = <8>;
cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>;
};
};
&pinctrl {
board {
pinctrl_i2c0_pu: i2c0-pu {
atmel,pins =
<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
pinctrl_i2c2_pu: i2c2-pu {
atmel,pins =
<AT91_PIOA 18 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 19 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
};
pinctrl_key_gpio: key-gpio-0 {
atmel,pins =
<AT91_PIOE 29 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
pinctrl_mmc0_cd: mmc0-cd {
atmel,pins =
<AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
/* Reserved for reset signal to the RGMII connector. */
pinctrl_rgmii_rstn: rgmii-rstn {
atmel,pins =
<AT91_PIOD 18 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
/* Reserved for an interrupt line from the RMII and RGMII connectors. */
pinctrl_spi_irqn: spi-irqn {
atmel,pins =
<AT91_PIOB 28 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
};
pinctrl_spi0_cs: spi0-cs-default {
atmel,pins =
<AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_spi1_cs: spi1-cs-default {
atmel,pins = <AT91_PIOC 25 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOC 28 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_usba_vbus: usba-vbus {
atmel,pins =
<AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
};
pinctrl_usb_default: usb-default {
atmel,pins =
<AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
/* Reserved for VBUS fault interrupt. */
pinctrl_vbusfault_irqn: vbusfault-irqn {
atmel,pins =
<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
};
pinctrl_vcc_mmc0_reg_gpio: vcc-mmc0-reg-gpio-default {
atmel,pins = <AT91_PIOE 2 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
};
};
};
&slow_xtal {
clock-frequency = <32768>;
};
&spi0 {
pinctrl-names = "default", "cs";
pinctrl-1 = <&pinctrl_spi0_cs>;
cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
status = "okay";
};
&spi1 {
pinctrl-names = "default", "cs";
pinctrl-1 = <&pinctrl_spi1_cs>;
cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioC 28 0>;
status = "okay";
};
&tcb0 {
timer0: timer@0 {
compatible = "atmel,tcb-timer";
reg = <0>;
};
timer1: timer@1 {
compatible = "atmel,tcb-timer";
reg = <1>;
};
};
&usb0 { /* USB Device port with VBUS detection. */
atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
status = "okay";
};
&usb1 { /* 3-port Host. First port is unused. */
atmel,vbus-gpio = <0
&pioE 3 GPIO_ACTIVE_HIGH
&pioE 4 GPIO_ACTIVE_HIGH
>;
num-ports = <3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb_default>;
status = "okay";
};
&usb2 {
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0
/ {
/*
* This file provides the now deprecated ACT LED to the
* Raspberry Pi boards. Please don't include this file
* for new boards!
*/
leds: leds {
compatible = "gpio-leds";
led_act: led-act {
label = "ACT";
default-state = "keep";
linux,default-trigger = "heartbeat";
};
};
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Authors:
* (C) 2020 Alexander Aring <[email protected]>
*/
#include <net/ipv6.h>
#include <net/rpl.h>
#define IPV6_PFXTAIL_LEN(x) (sizeof(struct in6_addr) - (x))
#define IPV6_RPL_BEST_ADDR_COMPRESSION 15
static void ipv6_rpl_addr_decompress(struct in6_addr *dst,
const struct in6_addr *daddr,
const void *post, unsigned char pfx)
{
memcpy(dst, daddr, pfx);
memcpy(&dst->s6_addr[pfx], post, IPV6_PFXTAIL_LEN(pfx));
}
static void ipv6_rpl_addr_compress(void *dst, const struct in6_addr *addr,
unsigned char pfx)
{
memcpy(dst, &addr->s6_addr[pfx], IPV6_PFXTAIL_LEN(pfx));
}
static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
{
return (void *)&hdr->rpl_segdata[i * IPV6_PFXTAIL_LEN(hdr->cmpri)];
}
void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
const struct ipv6_rpl_sr_hdr *inhdr,
const struct in6_addr *daddr, unsigned char n)
{
int i;
outhdr->nexthdr = inhdr->nexthdr;
outhdr->hdrlen = (((n + 1) * sizeof(struct in6_addr)) >> 3);
outhdr->pad = 0;
outhdr->type = inhdr->type;
outhdr->segments_left = inhdr->segments_left;
outhdr->cmpri = 0;
outhdr->cmpre = 0;
for (i = 0; i < n; i++)
ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[i], daddr,
ipv6_rpl_segdata_pos(inhdr, i),
inhdr->cmpri);
ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[n], daddr,
ipv6_rpl_segdata_pos(inhdr, n),
inhdr->cmpre);
}
static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr,
const struct in6_addr *daddr,
unsigned char n)
{
unsigned char plen;
int i;
for (plen = 0; plen < sizeof(*daddr); plen++) {
for (i = 0; i < n; i++) {
if (daddr->s6_addr[plen] !=
inhdr->rpl_segaddr[i].s6_addr[plen])
return plen;
}
}
return IPV6_RPL_BEST_ADDR_COMPRESSION;
}
static unsigned char ipv6_rpl_srh_calc_cmpre(const struct in6_addr *daddr,
const struct in6_addr *last_segment)
{
unsigned int plen;
for (plen = 0; plen < sizeof(*daddr); plen++) {
if (daddr->s6_addr[plen] != last_segment->s6_addr[plen])
return plen;
}
return IPV6_RPL_BEST_ADDR_COMPRESSION;
}
void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
const struct ipv6_rpl_sr_hdr *inhdr,
const struct in6_addr *daddr, unsigned char n)
{
unsigned char cmpri, cmpre;
size_t seglen;
int i;
cmpri = ipv6_rpl_srh_calc_cmpri(inhdr, daddr, n);
cmpre = ipv6_rpl_srh_calc_cmpre(daddr, &inhdr->rpl_segaddr[n]);
outhdr->nexthdr = inhdr->nexthdr;
seglen = (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
outhdr->hdrlen = seglen >> 3;
if (seglen & 0x7) {
outhdr->hdrlen++;
outhdr->pad = 8 - (seglen & 0x7);
} else {
outhdr->pad = 0;
}
outhdr->type = inhdr->type;
outhdr->segments_left = inhdr->segments_left;
outhdr->cmpri = cmpri;
outhdr->cmpre = cmpre;
for (i = 0; i < n; i++)
ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, i),
&inhdr->rpl_segaddr[i], cmpri);
ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, n),
&inhdr->rpl_segaddr[n], cmpre);
}
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright (c) 2020-2021 Microchip Technology Inc */
/dts-v1/;
#include "dt-bindings/clock/microchip,mpfs-clock.h"
/ {
#address-cells = <2>;
#size-cells = <2>;
model = "Microchip PolarFire SoC";
compatible = "microchip,mpfs";
cpus {
#address-cells = <1>;
#size-cells = <0>;
timebase-frequency = <1000000>;
cpu0: cpu@0 {
compatible = "sifive,e51", "sifive,rocket0", "riscv";
device_type = "cpu";
i-cache-block-size = <64>;
i-cache-sets = <128>;
i-cache-size = <16384>;
reg = <0>;
riscv,isa = "rv64imac";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "c", "zicntr", "zicsr", "zifencei",
"zihpm";
clocks = <&clkcfg CLK_CPU>;
status = "disabled";
cpu0_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu1: cpu@1 {
compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
d-cache-block-size = <64>;
d-cache-sets = <64>;
d-cache-size = <32768>;
d-tlb-sets = <1>;
d-tlb-size = <32>;
device_type = "cpu";
i-cache-block-size = <64>;
i-cache-sets = <64>;
i-cache-size = <32768>;
i-tlb-sets = <1>;
i-tlb-size = <32>;
mmu-type = "riscv,sv39";
reg = <1>;
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
"zifencei", "zihpm";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
next-level-cache = <&cctrllr>;
status = "okay";
cpu1_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu2: cpu@2 {
compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
d-cache-block-size = <64>;
d-cache-sets = <64>;
d-cache-size = <32768>;
d-tlb-sets = <1>;
d-tlb-size = <32>;
device_type = "cpu";
i-cache-block-size = <64>;
i-cache-sets = <64>;
i-cache-size = <32768>;
i-tlb-sets = <1>;
i-tlb-size = <32>;
mmu-type = "riscv,sv39";
reg = <2>;
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
"zifencei", "zihpm";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
next-level-cache = <&cctrllr>;
status = "okay";
cpu2_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu3: cpu@3 {
compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
d-cache-block-size = <64>;
d-cache-sets = <64>;
d-cache-size = <32768>;
d-tlb-sets = <1>;
d-tlb-size = <32>;
device_type = "cpu";
i-cache-block-size = <64>;
i-cache-sets = <64>;
i-cache-size = <32768>;
i-tlb-sets = <1>;
i-tlb-size = <32>;
mmu-type = "riscv,sv39";
reg = <3>;
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
"zifencei", "zihpm";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
next-level-cache = <&cctrllr>;
status = "okay";
cpu3_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu4: cpu@4 {
compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
d-cache-block-size = <64>;
d-cache-sets = <64>;
d-cache-size = <32768>;
d-tlb-sets = <1>;
d-tlb-size = <32>;
device_type = "cpu";
i-cache-block-size = <64>;
i-cache-sets = <64>;
i-cache-size = <32768>;
i-tlb-sets = <1>;
i-tlb-size = <32>;
mmu-type = "riscv,sv39";
reg = <4>;
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
"zifencei", "zihpm";
clocks = <&clkcfg CLK_CPU>;
tlb-split;
next-level-cache = <&cctrllr>;
status = "okay";
cpu4_intc: interrupt-controller {
#interrupt-cells = <1>;
compatible = "riscv,cpu-intc";
interrupt-controller;
};
};
cpu-map {
cluster0 {
core0 {
cpu = <&cpu0>;
};
core1 {
cpu = <&cpu1>;
};
core2 {
cpu = <&cpu2>;
};
core3 {
cpu = <&cpu3>;
};
core4 {
cpu = <&cpu4>;
};
};
};
};
refclk: mssrefclk {
compatible = "fixed-clock";
#clock-cells = <0>;
};
syscontroller: syscontroller {
compatible = "microchip,mpfs-sys-controller";
mboxes = <&mbox 0>;
};
scbclk: mssclkclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <80000000>;
};
soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
ranges;
cctrllr: cache-controller@2010000 {
compatible = "microchip,mpfs-ccache", "sifive,fu540-c000-ccache", "cache";
reg = <0x0 0x2010000 0x0 0x1000>;
cache-block-size = <64>;
cache-level = <2>;
cache-sets = <1024>;
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic>;
interrupts = <1>, <3>, <4>, <2>;
};
clint: clint@2000000 {
compatible = "sifive,fu540-c000-clint", "sifive,clint0";
reg = <0x0 0x2000000 0x0 0xC000>;
interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>,
<&cpu1_intc 3>, <&cpu1_intc 7>,
<&cpu2_intc 3>, <&cpu2_intc 7>,
<&cpu3_intc 3>, <&cpu3_intc 7>,
<&cpu4_intc 3>, <&cpu4_intc 7>;
};
plic: interrupt-controller@c000000 {
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
reg = <0x0 0xc000000 0x0 0x4000000>;
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
interrupts-extended = <&cpu0_intc 11>,
<&cpu1_intc 11>, <&cpu1_intc 9>,
<&cpu2_intc 11>, <&cpu2_intc 9>,
<&cpu3_intc 11>, <&cpu3_intc 9>,
<&cpu4_intc 11>, <&cpu4_intc 9>;
riscv,ndev = <186>;
};
pdma: dma-controller@3000000 {
compatible = "microchip,mpfs-pdma", "sifive,pdma0";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic>;
interrupts = <5 6>, <7 8>, <9 10>, <11 12>;
dma-channels = <4>;
#dma-cells = <1>;
};
clkcfg: clkcfg@20002000 {
compatible = "microchip,mpfs-clkcfg";
reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>;
clocks = <&refclk>;
#clock-cells = <1>;
#reset-cells = <1>;
};
ccc_se: clock-controller@38010000 {
compatible = "microchip,mpfs-ccc";
reg = <0x0 0x38010000 0x0 0x1000>, <0x0 0x38020000 0x0 0x1000>,
<0x0 0x39010000 0x0 0x1000>, <0x0 0x39020000 0x0 0x1000>;
#clock-cells = <1>;
status = "disabled";
};
ccc_ne: clock-controller@38040000 {
compatible = "microchip,mpfs-ccc";
reg = <0x0 0x38040000 0x0 0x1000>, <0x0 0x38080000 0x0 0x1000>,
<0x0 0x39040000 0x0 0x1000>, <0x0 0x39080000 0x0 0x1000>;
#clock-cells = <1>;
status = "disabled";
};
ccc_nw: clock-controller@38100000 {
compatible = "microchip,mpfs-ccc";
reg = <0x0 0x38100000 0x0 0x1000>, <0x0 0x38200000 0x0 0x1000>,
<0x0 0x39100000 0x0 0x1000>, <0x0 0x39200000 0x0 0x1000>;
#clock-cells = <1>;
status = "disabled";
};
ccc_sw: clock-controller@38400000 {
compatible = "microchip,mpfs-ccc";
reg = <0x0 0x38400000 0x0 0x1000>, <0x0 0x38800000 0x0 0x1000>,
<0x0 0x39400000 0x0 0x1000>, <0x0 0x39800000 0x0 0x1000>;
#clock-cells = <1>;
status = "disabled";
};
mmuart0: serial@20000000 {
compatible = "ns16550a";
reg = <0x0 0x20000000 0x0 0x400>;
reg-io-width = <4>;
reg-shift = <2>;
interrupt-parent = <&plic>;
interrupts = <90>;
current-speed = <115200>;
clocks = <&clkcfg CLK_MMUART0>;
status = "disabled"; /* Reserved for the HSS */
};
mmuart1: serial@20100000 {
compatible = "ns16550a";
reg = <0x0 0x20100000 0x0 0x400>;
reg-io-width = <4>;
reg-shift = <2>;
interrupt-parent = <&plic>;
interrupts = <91>;
current-speed = <115200>;
clocks = <&clkcfg CLK_MMUART1>;
status = "disabled";
};
mmuart2: serial@20102000 {
compatible = "ns16550a";
reg = <0x0 0x20102000 0x0 0x400>;
reg-io-width = <4>;
reg-shift = <2>;
interrupt-parent = <&plic>;
interrupts = <92>;
current-speed = <115200>;
clocks = <&clkcfg CLK_MMUART2>;
status = "disabled";
};
mmuart3: serial@20104000 {
compatible = "ns16550a";
reg = <0x0 0x20104000 0x0 0x400>;
reg-io-width = <4>;
reg-shift = <2>;
interrupt-parent = <&plic>;
interrupts = <93>;
current-speed = <115200>;
clocks = <&clkcfg CLK_MMUART3>;
status = "disabled";
};
mmuart4: serial@20106000 {
compatible = "ns16550a";
reg = <0x0 0x20106000 0x0 0x400>;
reg-io-width = <4>;
reg-shift = <2>;
interrupt-parent = <&plic>;
interrupts = <94>;
clocks = <&clkcfg CLK_MMUART4>;
current-speed = <115200>;
status = "disabled";
};
/* Common node entry for emmc/sd */
mmc: mmc@20008000 {
compatible = "microchip,mpfs-sd4hc", "cdns,sd4hc";
reg = <0x0 0x20008000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <88>;
clocks = <&clkcfg CLK_MMC>;
max-frequency = <200000000>;
status = "disabled";
};
spi0: spi@20108000 {
compatible = "microchip,mpfs-spi";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x20108000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <54>;
clocks = <&clkcfg CLK_SPI0>;
status = "disabled";
};
spi1: spi@20109000 {
compatible = "microchip,mpfs-spi";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x20109000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <55>;
clocks = <&clkcfg CLK_SPI1>;
status = "disabled";
};
qspi: spi@21000000 {
compatible = "microchip,mpfs-qspi", "microchip,coreqspi-rtl-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x21000000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <85>;
clocks = <&clkcfg CLK_QSPI>;
status = "disabled";
};
i2c0: i2c@2010a000 {
compatible = "microchip,mpfs-i2c", "microchip,corei2c-rtl-v7";
reg = <0x0 0x2010a000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <&plic>;
interrupts = <58>;
clocks = <&clkcfg CLK_I2C0>;
clock-frequency = <100000>;
status = "disabled";
};
i2c1: i2c@2010b000 {
compatible = "microchip,mpfs-i2c", "microchip,corei2c-rtl-v7";
reg = <0x0 0x2010b000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <&plic>;
interrupts = <61>;
clocks = <&clkcfg CLK_I2C1>;
clock-frequency = <100000>;
status = "disabled";
};
can0: can@2010c000 {
compatible = "microchip,mpfs-can";
reg = <0x0 0x2010c000 0x0 0x1000>;
clocks = <&clkcfg CLK_CAN0>, <&clkcfg CLK_MSSPLL3>;
interrupt-parent = <&plic>;
interrupts = <56>;
status = "disabled";
};
can1: can@2010d000 {
compatible = "microchip,mpfs-can";
reg = <0x0 0x2010d000 0x0 0x1000>;
clocks = <&clkcfg CLK_CAN1>, <&clkcfg CLK_MSSPLL3>;
interrupt-parent = <&plic>;
interrupts = <57>;
status = "disabled";
};
mac0: ethernet@20110000 {
compatible = "microchip,mpfs-macb", "cdns,macb";
reg = <0x0 0x20110000 0x0 0x2000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <&plic>;
interrupts = <64>, <65>, <66>, <67>, <68>, <69>;
local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg CLK_MAC0>, <&clkcfg CLK_AHB>;
clock-names = "pclk", "hclk";
resets = <&clkcfg CLK_MAC0>;
status = "disabled";
};
mac1: ethernet@20112000 {
compatible = "microchip,mpfs-macb", "cdns,macb";
reg = <0x0 0x20112000 0x0 0x2000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <&plic>;
interrupts = <70>, <71>, <72>, <73>, <74>, <75>;
local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg CLK_MAC1>, <&clkcfg CLK_AHB>;
clock-names = "pclk", "hclk";
resets = <&clkcfg CLK_MAC1>;
status = "disabled";
};
gpio0: gpio@20120000 {
compatible = "microchip,mpfs-gpio";
reg = <0x0 0x20120000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupt-controller;
#interrupt-cells = <1>;
clocks = <&clkcfg CLK_GPIO0>;
gpio-controller;
#gpio-cells = <2>;
status = "disabled";
};
gpio1: gpio@20121000 {
compatible = "microchip,mpfs-gpio";
reg = <0x0 0x20121000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupt-controller;
#interrupt-cells = <1>;
clocks = <&clkcfg CLK_GPIO1>;
gpio-controller;
#gpio-cells = <2>;
status = "disabled";
};
gpio2: gpio@20122000 {
compatible = "microchip,mpfs-gpio";
reg = <0x0 0x20122000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupt-controller;
#interrupt-cells = <1>;
clocks = <&clkcfg CLK_GPIO2>;
gpio-controller;
#gpio-cells = <2>;
status = "disabled";
};
rtc: rtc@20124000 {
compatible = "microchip,mpfs-rtc";
reg = <0x0 0x20124000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <80>, <81>;
clocks = <&clkcfg CLK_RTC>, <&clkcfg CLK_RTCREF>;
clock-names = "rtc", "rtcref";
status = "disabled";
};
usb: usb@20201000 {
compatible = "microchip,mpfs-musb";
reg = <0x0 0x20201000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <86>, <87>;
clocks = <&clkcfg CLK_USB>;
interrupt-names = "dma","mc";
status = "disabled";
};
mbox: mailbox@37020000 {
compatible = "microchip,mpfs-mailbox";
reg = <0x0 0x37020000 0x0 0x58>, <0x0 0x2000318C 0x0 0x40>,
<0x0 0x37020800 0x0 0x100>;
interrupt-parent = <&plic>;
interrupts = <96>;
#mbox-cells = <1>;
status = "disabled";
};
syscontroller_qspi: spi@37020100 {
compatible = "microchip,mpfs-qspi", "microchip,coreqspi-rtl-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x37020100 0x0 0x100>;
interrupt-parent = <&plic>;
interrupts = <110>;
clocks = <&scbclk>;
status = "disabled";
};
};
};
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/swap_cgroup.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/swapops.h> /* depends on mm.h include */
static DEFINE_MUTEX(swap_cgroup_mutex);
struct swap_cgroup_ctrl {
struct page **map;
unsigned long length;
spinlock_t lock;
};
static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
struct swap_cgroup {
unsigned short id;
};
#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
/*
* SwapCgroup implements "lookup" and "exchange" operations.
* In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
* against SwapCache. At swap_free(), this is accessed directly from swap.
*
* This means,
* - we have no race in "exchange" when we're accessed via SwapCache because
* SwapCache(and its swp_entry) is under lock.
* - When called via swap_free(), there is no user of this entry and no race.
* Then, we don't need lock around "exchange".
*
* TODO: we can push these buffers out to HIGHMEM.
*/
/*
* allocate buffer for swap_cgroup.
*/
static int swap_cgroup_prepare(int type)
{
struct page *page;
struct swap_cgroup_ctrl *ctrl;
unsigned long idx, max;
ctrl = &swap_cgroup_ctrl[type];
for (idx = 0; idx < ctrl->length; idx++) {
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
if (!(idx % SWAP_CLUSTER_MAX))
cond_resched();
}
return 0;
not_enough_page:
max = idx;
for (idx = 0; idx < max; idx++)
__free_page(ctrl->map[idx]);
return -ENOMEM;
}
static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
pgoff_t offset)
{
struct page *mappage;
struct swap_cgroup *sc;
mappage = ctrl->map[offset / SC_PER_PAGE];
sc = page_address(mappage);
return sc + offset % SC_PER_PAGE;
}
static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
struct swap_cgroup_ctrl **ctrlp)
{
pgoff_t offset = swp_offset(ent);
struct swap_cgroup_ctrl *ctrl;
ctrl = &swap_cgroup_ctrl[swp_type(ent)];
if (ctrlp)
*ctrlp = ctrl;
return __lookup_swap_cgroup(ctrl, offset);
}
/**
* swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
* @ent: swap entry to be cmpxchged
* @old: old id
* @new: new id
*
* Returns old id at success, 0 at failure.
* (There is no mem_cgroup using 0 as its id)
*/
unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new)
{
struct swap_cgroup_ctrl *ctrl;
struct swap_cgroup *sc;
unsigned long flags;
unsigned short retval;
sc = lookup_swap_cgroup(ent, &ctrl);
spin_lock_irqsave(&ctrl->lock, flags);
retval = sc->id;
if (retval == old)
sc->id = new;
else
retval = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
return retval;
}
/**
* swap_cgroup_record - record mem_cgroup for a set of swap entries
* @ent: the first swap entry to be recorded into
* @id: mem_cgroup to be recorded
* @nr_ents: number of swap entries to be recorded
*
* Returns old value at success, 0 at failure.
* (Of course, old value can be 0.)
*/
unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
unsigned int nr_ents)
{
struct swap_cgroup_ctrl *ctrl;
struct swap_cgroup *sc;
unsigned short old;
unsigned long flags;
pgoff_t offset = swp_offset(ent);
pgoff_t end = offset + nr_ents;
sc = lookup_swap_cgroup(ent, &ctrl);
spin_lock_irqsave(&ctrl->lock, flags);
old = sc->id;
for (;;) {
VM_BUG_ON(sc->id != old);
sc->id = id;
offset++;
if (offset == end)
break;
if (offset % SC_PER_PAGE)
sc++;
else
sc = __lookup_swap_cgroup(ctrl, offset);
}
spin_unlock_irqrestore(&ctrl->lock, flags);
return old;
}
/**
* lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
* @ent: swap entry to be looked up.
*
* Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
if (mem_cgroup_disabled())
return 0;
return lookup_swap_cgroup(ent, NULL)->id;
}
int swap_cgroup_swapon(int type, unsigned long max_pages)
{
void *array;
unsigned long length;
struct swap_cgroup_ctrl *ctrl;
if (mem_cgroup_disabled())
return 0;
length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
array = vcalloc(length, sizeof(void *));
if (!array)
goto nomem;
ctrl = &swap_cgroup_ctrl[type];
mutex_lock(&swap_cgroup_mutex);
ctrl->length = length;
ctrl->map = array;
spin_lock_init(&ctrl->lock);
if (swap_cgroup_prepare(type)) {
/* memory shortage */
ctrl->map = NULL;
ctrl->length = 0;
mutex_unlock(&swap_cgroup_mutex);
vfree(array);
goto nomem;
}
mutex_unlock(&swap_cgroup_mutex);
return 0;
nomem:
pr_info("couldn't allocate enough memory for swap_cgroup\n");
pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
return -ENOMEM;
}
void swap_cgroup_swapoff(int type)
{
struct page **map;
unsigned long i, length;
struct swap_cgroup_ctrl *ctrl;
if (mem_cgroup_disabled())
return;
mutex_lock(&swap_cgroup_mutex);
ctrl = &swap_cgroup_ctrl[type];
map = ctrl->map;
length = ctrl->length;
ctrl->map = NULL;
ctrl->length = 0;
mutex_unlock(&swap_cgroup_mutex);
if (map) {
for (i = 0; i < length; i++) {
struct page *page = map[i];
if (page)
__free_page(page);
if (!(i % SWAP_CLUSTER_MAX))
cond_resched();
}
vfree(map);
}
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for CMOS RTC Address Space access
*
* Copyright (C) 2013, Intel Corporation
* Authors: Lan Tianyu <[email protected]>
*/
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h>
#include "../internal.h"
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
{ "PNP0B00" },
{ "PNP0B01" },
{ "PNP0B02" },
{}
};
static acpi_status
acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
int i;
u8 *value = (u8 *)value64;
if (address > 0xff || !value64)
return AE_BAD_PARAMETER;
if (function != ACPI_WRITE && function != ACPI_READ)
return AE_BAD_PARAMETER;
spin_lock_irq(&rtc_lock);
for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value)
if (function == ACPI_READ)
*value = CMOS_READ(address);
else
CMOS_WRITE(*value, address);
spin_unlock_irq(&rtc_lock);
return AE_OK;
}
int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
{
acpi_status status;
status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS,
&acpi_cmos_rtc_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_err("Error installing CMOS-RTC region handler\n");
return -ENODEV;
}
return 1;
}
EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler);
void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
pr_err("Error removing CMOS-RTC region handler\n");
}
EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler);
static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id)
{
return acpi_install_cmos_rtc_space_handler(adev->handle);
}
static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev)
{
acpi_remove_cmos_rtc_space_handler(adev->handle);
}
static struct acpi_scan_handler cmos_rtc_handler = {
.ids = acpi_cmos_rtc_ids,
.attach = acpi_cmos_rtc_attach_handler,
.detach = acpi_cmos_rtc_detach_handler,
};
void __init acpi_cmos_rtc_init(void)
{
acpi_scan_add_handler(&cmos_rtc_handler);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 MediaTek Inc.
* James Liao <[email protected]>
* Copyright (c) 2023 Collabora, Ltd.
* AngeloGioacchino Del Regno <[email protected]>
*/
#include <dt-bindings/clock/mt8135-clk.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "clk-mtk.h"
#include "clk-pll.h"
#define MT8135_PLL_FMAX (2000 * MHZ)
#define CON0_MT8135_RST_BAR BIT(27)
#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
.id = _id, \
.name = _name, \
.reg = _reg, \
.pwr_reg = _pwr_reg, \
.en_mask = _en_mask, \
.flags = _flags, \
.rst_bar_mask = CON0_MT8135_RST_BAR, \
.fmax = MT8135_PLL_FMAX, \
.pcwbits = _pcwbits, \
.pd_reg = _pd_reg, \
.pd_shift = _pd_shift, \
.tuner_reg = _tuner_reg, \
.pcw_reg = _pcw_reg, \
.pcw_shift = _pcw_shift, \
}
static const struct mtk_pll_data plls[] = {
PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000000, 0, 21, 0x204, 24, 0x0, 0x204, 0),
PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000000, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000000, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000000, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000000, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000000, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000000, 0, 31, 0x294, 6, 0x0, 0x298, 0),
PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000000, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
};
static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
int ret;
clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
if (!clk_data)
return -ENOMEM;
ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
if (ret)
goto free_clk_data;
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (ret)
goto unregister_plls;
return 0;
unregister_plls:
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
return ret;
}
static void clk_mt8135_apmixed_remove(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
of_clk_del_provider(node);
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
mtk_free_clk_data(clk_data);
}
static const struct of_device_id of_match_clk_mt8135_apmixed[] = {
{ .compatible = "mediatek,mt8135-apmixedsys" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed);
static struct platform_driver clk_mt8135_apmixed_drv = {
.probe = clk_mt8135_apmixed_probe,
.remove = clk_mt8135_apmixed_remove,
.driver = {
.name = "clk-mt8135-apmixed",
.of_match_table = of_match_clk_mt8135_apmixed,
},
};
module_platform_driver(clk_mt8135_apmixed_drv)
MODULE_DESCRIPTION("MediaTek MT8135 apmixedsys clocks driver");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Global definitions for the ANSI FDDI interface.
*
* Version: @(#)if_fddi.h 1.0.2 Sep 29 2004
*
* Author: Lawrence V. Stefani, <[email protected]>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <[email protected]>
* Donald Becker, <[email protected]>
* Alan Cox, <[email protected]>
* Steve Whitehouse, <[email protected]>
* Peter De Schrijver, <[email protected]>
*/
#ifndef _LINUX_IF_FDDI_H
#define _LINUX_IF_FDDI_H
#include <linux/netdevice.h>
#include <uapi/linux/if_fddi.h>
/* Define FDDI statistics structure */
struct fddi_statistics {
/* Generic statistics. */
struct net_device_stats gen;
/* Detailed FDDI statistics. Adopted from RFC 1512 */
__u8 smt_station_id[8];
__u32 smt_op_version_id;
__u32 smt_hi_version_id;
__u32 smt_lo_version_id;
__u8 smt_user_data[32];
__u32 smt_mib_version_id;
__u32 smt_mac_cts;
__u32 smt_non_master_cts;
__u32 smt_master_cts;
__u32 smt_available_paths;
__u32 smt_config_capabilities;
__u32 smt_config_policy;
__u32 smt_connection_policy;
__u32 smt_t_notify;
__u32 smt_stat_rpt_policy;
__u32 smt_trace_max_expiration;
__u32 smt_bypass_present;
__u32 smt_ecm_state;
__u32 smt_cf_state;
__u32 smt_remote_disconnect_flag;
__u32 smt_station_status;
__u32 smt_peer_wrap_flag;
__u32 smt_time_stamp;
__u32 smt_transition_time_stamp;
__u32 mac_frame_status_functions;
__u32 mac_t_max_capability;
__u32 mac_tvx_capability;
__u32 mac_available_paths;
__u32 mac_current_path;
__u8 mac_upstream_nbr[FDDI_K_ALEN];
__u8 mac_downstream_nbr[FDDI_K_ALEN];
__u8 mac_old_upstream_nbr[FDDI_K_ALEN];
__u8 mac_old_downstream_nbr[FDDI_K_ALEN];
__u32 mac_dup_address_test;
__u32 mac_requested_paths;
__u32 mac_downstream_port_type;
__u8 mac_smt_address[FDDI_K_ALEN];
__u32 mac_t_req;
__u32 mac_t_neg;
__u32 mac_t_max;
__u32 mac_tvx_value;
__u32 mac_frame_cts;
__u32 mac_copied_cts;
__u32 mac_transmit_cts;
__u32 mac_error_cts;
__u32 mac_lost_cts;
__u32 mac_frame_error_threshold;
__u32 mac_frame_error_ratio;
__u32 mac_rmt_state;
__u32 mac_da_flag;
__u32 mac_una_da_flag;
__u32 mac_frame_error_flag;
__u32 mac_ma_unitdata_available;
__u32 mac_hardware_present;
__u32 mac_ma_unitdata_enable;
__u32 path_tvx_lower_bound;
__u32 path_t_max_lower_bound;
__u32 path_max_t_req;
__u32 path_configuration[8];
__u32 port_my_type[2];
__u32 port_neighbor_type[2];
__u32 port_connection_policies[2];
__u32 port_mac_indicated[2];
__u32 port_current_path[2];
__u8 port_requested_paths[3*2];
__u32 port_mac_placement[2];
__u32 port_available_paths[2];
__u32 port_pmd_class[2];
__u32 port_connection_capabilities[2];
__u32 port_bs_flag[2];
__u32 port_lct_fail_cts[2];
__u32 port_ler_estimate[2];
__u32 port_lem_reject_cts[2];
__u32 port_lem_cts[2];
__u32 port_ler_cutoff[2];
__u32 port_ler_alarm[2];
__u32 port_connect_state[2];
__u32 port_pcm_state[2];
__u32 port_pc_withhold[2];
__u32 port_ler_flag[2];
__u32 port_hardware_present[2];
};
#endif /* _LINUX_IF_FDDI_H */
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (c) 2019 Christian Hewitt <[email protected]>
*/
/ {
model = "Khadas VIM3";
vddcpu_a: regulator-vddcpu-a {
/*
* MP8756GD Regulator.
*/
compatible = "pwm-regulator";
regulator-name = "VDDCPU_A";
regulator-min-microvolt = <690000>;
regulator-max-microvolt = <1050000>;
pwm-supply = <&dc_in>;
pwms = <&pwm_ab 0 1250 0>;
pwm-dutycycle-range = <100 0>;
regulator-boot-on;
regulator-always-on;
};
vddcpu_b: regulator-vddcpu-b {
/*
* Silergy SY8030DEC Regulator.
*/
compatible = "pwm-regulator";
regulator-name = "VDDCPU_B";
regulator-min-microvolt = <690000>;
regulator-max-microvolt = <1050000>;
pwm-supply = <&vsys_3v3>;
pwms = <&pwm_AO_cd 1 1250 0>;
pwm-dutycycle-range = <100 0>;
regulator-boot-on;
regulator-always-on;
};
};
&cpu0 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu1 {
cpu-supply = <&vddcpu_b>;
operating-points-v2 = <&cpu_opp_table_0>;
clocks = <&clkc CLKID_CPU_CLK>;
clock-latency = <50000>;
};
&cpu100 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
clock-latency = <50000>;
};
&cpu101 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
clock-latency = <50000>;
};
&cpu102 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
clock-latency = <50000>;
};
&cpu103 {
cpu-supply = <&vddcpu_a>;
operating-points-v2 = <&cpub_opp_table_1>;
clocks = <&clkc CLKID_CPUB_CLK>;
clock-latency = <50000>;
};
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin0";
status = "okay";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
clocks = <&xtal>;
clock-names = "clkin1";
status = "okay";
};
|
/*
* PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_ctl.h"
#include "pm8001_chips.h"
/* scsi host attributes */
/**
* pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
} else {
return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
}
}
static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
* controller_fatal_error_show - check controller is under fatal err
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t controller_fatal_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return sysfs_emit(buf, "%d\n",
pm8001_ha->controller_fatal_error);
}
static DEVICE_ATTR_RO(controller_fatal_error);
/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
} else {
return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
}
}
static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
/**
* pm8001_ctl_ila_version_show - ila version
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version));
}
return 0;
}
static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
/**
* pm8001_ctl_inactive_fw_version_show - Inactive firmware version number
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
}
return 0;
}
static
DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL);
/**
* pm8001_ctl_max_out_io_show - max outstanding io supported
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
} else {
return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
}
}
static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
/**
* pm8001_ctl_max_devices_show - max devices support
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
return sysfs_emit(buf, "%04d\n",
(u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16));
} else {
return sysfs_emit(buf, "%04d\n",
(u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16));
}
}
static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
/**
* pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
* hardware limitation
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
return sysfs_emit(buf, "%04d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF);
} else {
return sysfs_emit(buf, "%04d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF);
}
}
static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
#define SAS_1_0 0x1
#define SAS_1_1 0x2
#define SAS_2_0 0x4
static ssize_t
show_sas_spec_support_status(unsigned int mode, char *buf)
{
ssize_t len = 0;
if (mode & SAS_1_1)
len = sprintf(buf, "%s", "SAS1.1");
if (mode & SAS_2_0)
len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0");
len += sprintf(buf + len, "\n");
return len;
}
/**
* pm8001_ctl_sas_spec_support_show - sas spec supported
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
unsigned int mode;
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
/* fe000000 means supports SAS2.1 */
if (pm8001_ha->chip_id == chip_8001)
mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
0xfe000000)>>25;
else
/* fe000000 means supports SAS2.1 */
mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
0xfe000000)>>25;
return show_sas_spec_support_status(mode, buf);
}
static DEVICE_ATTR(sas_spec_support, S_IRUGO,
pm8001_ctl_sas_spec_support_show, NULL);
/**
* pm8001_ctl_host_sas_address_show - sas address
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* This is the controller sas address
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return sysfs_emit(buf, "0x%016llx\n",
be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
}
static DEVICE_ATTR(host_sas_address, S_IRUGO,
pm8001_ctl_host_sas_address_show, NULL);
/**
* pm8001_ctl_logging_level_show - logging level
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read/write' shost attribute.
*/
static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level);
}
static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int val = 0;
if (sscanf(buf, "%x", &val) != 1)
return -EINVAL;
pm8001_ha->logging_level = val;
return strlen(buf);
}
static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store);
/**
* pm8001_ctl_aap_log_show - aap1 event log
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
u8 *ptr = (u8 *)pm8001_ha->memoryMap.region[AAP1].virt_ptr;
int i;
char *str = buf;
int max = 2;
for (i = 0; i < max; i++) {
str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
"0x%08x 0x%08x\n",
pm8001_ctl_aap1_memmap(ptr, i, 0),
pm8001_ctl_aap1_memmap(ptr, i, 4),
pm8001_ctl_aap1_memmap(ptr, i, 8),
pm8001_ctl_aap1_memmap(ptr, i, 12),
pm8001_ctl_aap1_memmap(ptr, i, 16),
pm8001_ctl_aap1_memmap(ptr, i, 20),
pm8001_ctl_aap1_memmap(ptr, i, 24),
pm8001_ctl_aap1_memmap(ptr, i, 28));
}
return str - buf;
}
static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
/**
* pm8001_ctl_ib_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int offset;
char *str = buf;
int start = 0;
u32 ib_offset = pm8001_ha->ib_offset;
u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ib_offset].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
}
static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
/**
* pm8001_ctl_ob_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int offset;
char *str = buf;
int start = 0;
u32 ob_offset = pm8001_ha->ob_offset;
u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ob_offset].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
}
static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
/**
* pm8001_ctl_bios_version_show - Bios version Display
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf:the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
char *str = buf;
int bios_index;
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 7;
payload.offset = 0;
payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) {
kfree(payload.func_specific);
return -ENOMEM;
}
wait_for_completion(&completion);
for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
bios_index++)
str += sprintf(str, "%c",
*(payload.func_specific+bios_index));
kfree(payload.func_specific);
return str - buf;
}
static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
/**
* event_log_size_show - event log size
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs read shost attribute.
*/
static ssize_t event_log_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
}
static DEVICE_ATTR_RO(event_log_size);
/**
* pm8001_ctl_iop_log_show - IOP event log
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
char *str = buf;
u32 read_size =
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
static u32 start, end, count;
u32 max_read_times = 32;
u32 max_count = (read_size * 1024) / (max_read_times * 4);
u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
if ((count % max_count) == 0) {
start = 0;
end = max_read_times;
count = 0;
} else {
start = end;
end = end + max_read_times;
}
for (; start < end; start++)
str += sprintf(str, "%08x ", *(temp+start));
count++;
return str - buf;
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
/**
* pm8001_ctl_fatal_log_show - fatal error logging
* @cdev:pointer to embedded class device
* @attr: device attribute
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
ssize_t count;
count = pm80xx_get_fatal_dump(cdev, attr, buf);
return count;
}
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
/**
* non_fatal_log_show - non fatal error logging
* @cdev:pointer to embedded class device
* @attr: device attribute
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t non_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
u32 count;
count = pm80xx_get_non_fatal_dump(cdev, attr, buf);
return count;
}
static DEVICE_ATTR_RO(non_fatal_log);
static ssize_t non_fatal_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return sysfs_emit(buf, "%08x\n",
pm8001_ha->non_fatal_count);
}
static ssize_t non_fatal_count_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int val = 0;
if (kstrtoint(buf, 16, &val) != 0)
return -EINVAL;
pm8001_ha->non_fatal_count = val;
return strlen(buf);
}
static DEVICE_ATTR_RW(non_fatal_count);
/**
* pm8001_ctl_gsm_log_show - gsm dump collection
* @cdev:pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
ssize_t count;
count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
return count;
}
static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
#define FLASH_CMD_NONE 0x00
#define FLASH_CMD_UPDATE 0x01
#define FLASH_CMD_SET_NVMD 0x02
struct flash_command {
u8 command[8];
int code;
};
static const struct flash_command flash_command_table[] = {
{"set_nvmd", FLASH_CMD_SET_NVMD},
{"update", FLASH_CMD_UPDATE},
{"", FLASH_CMD_NONE} /* Last entry should be NULL. */
};
struct error_fw {
char *reason;
int err_code;
};
static const struct error_fw flash_error_table[] = {
{"Failed to open fw image file", FAIL_OPEN_BIOS_FILE},
{"image header mismatch", FLASH_UPDATE_HDR_ERR},
{"image offset mismatch", FLASH_UPDATE_OFFSET_ERR},
{"image CRC Error", FLASH_UPDATE_CRC_ERR},
{"image length Error.", FLASH_UPDATE_LENGTH_ERR},
{"Failed to program flash chip", FLASH_UPDATE_HW_ERR},
{"Flash chip not supported.", FLASH_UPDATE_DNLD_NOT_SUPPORTED},
{"Flash update disabled.", FLASH_UPDATE_DISABLED},
{"Flash in progress", FLASH_IN_PROGRESS},
{"Image file size Error", FAIL_FILE_SIZE},
{"Input parameter error", FAIL_PARAMETERS},
{"Out of memory", FAIL_OUT_MEMORY},
{"OK", 0} /* Last entry err_code = 0. */
};
static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
{
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
u32 ret;
u32 length = 1024 * 5 + sizeof(*payload) - 1;
if (pm8001_ha->fw_image->size > 4096) {
pm8001_ha->fw_status = FAIL_FILE_SIZE;
return -EFAULT;
}
ioctlbuffer = kzalloc(length, GFP_KERNEL);
if (!ioctlbuffer) {
pm8001_ha->fw_status = FAIL_OUT_MEMORY;
return -ENOMEM;
}
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
pm8001_ha->fw_image->size);
payload->wr_length = pm8001_ha->fw_image->size;
payload->id = 0;
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
if (ret) {
pm8001_ha->fw_status = FAIL_OUT_MEMORY;
goto out;
}
wait_for_completion(&completion);
out:
kfree(ioctlbuffer);
return ret;
}
static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
{
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
struct fw_control_info *fwControl;
__be32 partitionSizeTmp;
u32 partitionSize;
u32 loopNumber, loopcount;
struct pm8001_fw_image_header *image_hdr;
u32 sizeRead = 0;
u32 ret = 0;
u32 length = 1024 * 16 + sizeof(*payload) - 1;
u32 fc_len;
u8 *read_buf;
if (pm8001_ha->fw_image->size < 28) {
pm8001_ha->fw_status = FAIL_FILE_SIZE;
return -EFAULT;
}
ioctlbuffer = kzalloc(length, GFP_KERNEL);
if (!ioctlbuffer) {
pm8001_ha->fw_status = FAIL_OUT_MEMORY;
return -ENOMEM;
}
image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
while (sizeRead < pm8001_ha->fw_image->size) {
partitionSizeTmp =
*(__be32 *)((u8 *)&image_hdr->image_length + sizeRead);
partitionSize = be32_to_cpu(partitionSizeTmp);
loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
IOCTL_BUF_SIZE);
for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
payload->wr_length = 1024*16;
payload->id = 0;
fwControl =
(struct fw_control_info *)&payload->func_specific;
fwControl->len = IOCTL_BUF_SIZE; /* IN */
fwControl->size = partitionSize + HEADER_LEN;/* IN */
fwControl->retcode = 0;/* OUT */
fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
/*
* for the last chunk of data in case file size is
* not even with 4k, load only the rest
*/
read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead;
fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
if (loopcount - loopNumber == 1 && fc_len) {
fwControl->len = fc_len;
memcpy((u8 *)fwControl->buffer, read_buf, fc_len);
sizeRead += fc_len;
} else {
memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE);
sizeRead += IOCTL_BUF_SIZE;
}
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
if (ret) {
pm8001_ha->fw_status = FAIL_OUT_MEMORY;
goto out;
}
wait_for_completion(&completion);
if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
pm8001_ha->fw_status = fwControl->retcode;
ret = -EFAULT;
goto out;
}
}
}
out:
kfree(ioctlbuffer);
return ret;
}
static ssize_t pm8001_store_update_fw(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
char *cmd_ptr, *filename_ptr;
int res, i;
int flash_command = FLASH_CMD_NONE;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* this test protects us from running two flash processes at once,
* so we should start with this test */
if (pm8001_ha->fw_status == FLASH_IN_PROGRESS)
return -EINPROGRESS;
pm8001_ha->fw_status = FLASH_IN_PROGRESS;
cmd_ptr = kcalloc(count, 2, GFP_KERNEL);
if (!cmd_ptr) {
pm8001_ha->fw_status = FAIL_OUT_MEMORY;
return -ENOMEM;
}
filename_ptr = cmd_ptr + count;
res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
if (res != 2) {
pm8001_ha->fw_status = FAIL_PARAMETERS;
ret = -EINVAL;
goto out;
}
for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
if (!memcmp(flash_command_table[i].command,
cmd_ptr, strlen(cmd_ptr))) {
flash_command = flash_command_table[i].code;
break;
}
}
if (flash_command == FLASH_CMD_NONE) {
pm8001_ha->fw_status = FAIL_PARAMETERS;
ret = -EINVAL;
goto out;
}
ret = request_firmware(&pm8001_ha->fw_image,
filename_ptr,
pm8001_ha->dev);
if (ret) {
pm8001_dbg(pm8001_ha, FAIL,
"Failed to load firmware image file %s, error %d\n",
filename_ptr, ret);
pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE;
goto out;
}
if (FLASH_CMD_UPDATE == flash_command)
ret = pm8001_update_flash(pm8001_ha);
else
ret = pm8001_set_nvmd(pm8001_ha);
release_firmware(pm8001_ha->fw_image);
out:
kfree(cmd_ptr);
if (ret)
return ret;
pm8001_ha->fw_status = FLASH_OK;
return count;
}
static ssize_t pm8001_show_update_fw(struct device *cdev,
struct device_attribute *attr, char *buf)
{
int i;
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
for (i = 0; flash_error_table[i].err_code != 0; i++) {
if (flash_error_table[i].err_code == pm8001_ha->fw_status)
break;
}
if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
pm8001_ha->fw_status = FLASH_OK;
return sysfs_emit(buf, "status=%x %s\n",
flash_error_table[i].err_code,
flash_error_table[i].reason);
}
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
static const char *const mpiStateText[] = {
"MPI is not initialized",
"MPI is successfully initialized",
"MPI termination is in progress",
"MPI initialization failed with error in [31:16]"
};
/**
* ctl_mpi_state_show - controller MPI state check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_mpi_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
unsigned int mpidw0;
mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
return sysfs_emit(buf, "%s\n", mpiStateText[mpidw0 & 0x0003]);
}
static DEVICE_ATTR_RO(ctl_mpi_state);
/**
* ctl_hmi_error_show - controller MPI initialization fails
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_hmi_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
unsigned int mpidw0;
mpidw0 = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 0);
return sysfs_emit(buf, "0x%08x\n", (mpidw0 >> 16));
}
static DEVICE_ATTR_RO(ctl_hmi_error);
/**
* ctl_raae_count_show - controller raae count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_raae_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
unsigned int raaecnt;
raaecnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 12);
return sysfs_emit(buf, "0x%08x\n", raaecnt);
}
static DEVICE_ATTR_RO(ctl_raae_count);
/**
* ctl_iop0_count_show - controller iop0 count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_iop0_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
unsigned int iop0cnt;
iop0cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 16);
return sysfs_emit(buf, "0x%08x\n", iop0cnt);
}
static DEVICE_ATTR_RO(ctl_iop0_count);
/**
* ctl_iop1_count_show - controller iop1 count check
* @cdev: pointer to embedded class device
* @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
static ssize_t ctl_iop1_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
unsigned int iop1cnt;
iop1cnt = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 20);
return sysfs_emit(buf, "0x%08x\n", iop1cnt);
}
static DEVICE_ATTR_RO(ctl_iop1_count);
static struct attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev.attr,
&dev_attr_controller_fatal_error.attr,
&dev_attr_fw_version.attr,
&dev_attr_update_fw.attr,
&dev_attr_aap_log.attr,
&dev_attr_iop_log.attr,
&dev_attr_fatal_log.attr,
&dev_attr_non_fatal_log.attr,
&dev_attr_non_fatal_count.attr,
&dev_attr_gsm_log.attr,
&dev_attr_max_out_io.attr,
&dev_attr_max_devices.attr,
&dev_attr_max_sg_list.attr,
&dev_attr_sas_spec_support.attr,
&dev_attr_logging_level.attr,
&dev_attr_event_log_size.attr,
&dev_attr_host_sas_address.attr,
&dev_attr_bios_version.attr,
&dev_attr_ib_log.attr,
&dev_attr_ob_log.attr,
&dev_attr_ila_version.attr,
&dev_attr_inc_fw_ver.attr,
&dev_attr_ctl_mpi_state.attr,
&dev_attr_ctl_hmi_error.attr,
&dev_attr_ctl_raae_count.attr,
&dev_attr_ctl_iop0_count.attr,
&dev_attr_ctl_iop1_count.attr,
NULL,
};
static const struct attribute_group pm8001_host_attr_group = {
.attrs = pm8001_host_attrs
};
const struct attribute_group *pm8001_host_groups[] = {
&pm8001_host_attr_group,
NULL
};
const struct attribute_group *pm8001_sdev_groups[] = {
&sas_ata_sdev_attr_group,
NULL
};
|
#ifndef _SLHC_H
#define _SLHC_H
/*
* Definitions for tcp compression routines.
*
* $Header: slcompress.h,v 1.10 89/12/31 08:53:02 van Exp $
*
* Copyright (c) 1989 Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms are permitted
* provided that the above copyright notice and this paragraph are
* duplicated in all such forms and that any documentation,
* advertising materials, and other materials related to such
* distribution and use acknowledge that the software was developed
* by the University of California, Berkeley. The name of the
* University may not be used to endorse or promote products derived
* from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Van Jacobson ([email protected]), Dec 31, 1989:
* - Initial distribution.
*
*
* modified for KA9Q Internet Software Package by
* Katie Stevens ([email protected])
* University of California, Davis
* Computing Services
* - 01-31-90 initial adaptation
*
* - Feb 1991 [email protected]
* variable number of conversation slots
* allow zero or one slots
* separate routines
* status display
*/
/*
* Compressed packet format:
*
* The first octet contains the packet type (top 3 bits), TCP
* 'push' bit, and flags that indicate which of the 4 TCP sequence
* numbers have changed (bottom 5 bits). The next octet is a
* conversation number that associates a saved IP/TCP header with
* the compressed packet. The next two octets are the TCP checksum
* from the original datagram. The next 0 to 15 octets are
* sequence number changes, one change per bit set in the header
* (there may be no changes and there are two special cases where
* the receiver implicitly knows what changed -- see below).
*
* There are 5 numbers which can change (they are always inserted
* in the following order): TCP urgent pointer, window,
* acknowledgment, sequence number and IP ID. (The urgent pointer
* is different from the others in that its value is sent, not the
* change in value.) Since typical use of SLIP links is biased
* toward small packets (see comments on MTU/MSS below), changes
* use a variable length coding with one octet for numbers in the
* range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
* range 256 - 65535 or 0. (If the change in sequence number or
* ack is more than 65535, an uncompressed packet is sent.)
*/
/*
* Packet types (must not conflict with IP protocol version)
*
* The top nibble of the first octet is the packet type. There are
* three possible types: IP (not proto TCP or tcp with one of the
* control flags set); uncompressed TCP (a normal IP/TCP packet but
* with the 8-bit protocol field replaced by an 8-bit connection id --
* this type of packet syncs the sender & receiver); and compressed
* TCP (described above).
*
* LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
* is logically part of the 4-bit "changes" field that follows. Top
* three bits are actual packet type. For backward compatibility
* and in the interest of conserving bits, numbers are chosen so the
* IP protocol version number (4) which normally appears in this nibble
* means "IP packet".
*/
#include <linux/ip.h>
#include <linux/tcp.h>
/* SLIP compression masks for len/vers byte */
#define SL_TYPE_IP 0x40
#define SL_TYPE_UNCOMPRESSED_TCP 0x70
#define SL_TYPE_COMPRESSED_TCP 0x80
#define SL_TYPE_ERROR 0x00
/* Bits in first octet of compressed packet */
#define NEW_C 0x40 /* flag bits for what changed in a packet */
#define NEW_I 0x20
#define NEW_S 0x08
#define NEW_A 0x04
#define NEW_W 0x02
#define NEW_U 0x01
/* reserved, special-case values of above */
#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
#define TCP_PUSH_BIT 0x10
/*
* data type and sizes conversion assumptions:
*
* VJ code KA9Q style generic
* u_char byte_t unsigned char 8 bits
* u_short int16 unsigned short 16 bits
* u_int int16 unsigned short 16 bits
* u_long unsigned long unsigned long 32 bits
* int int32 long 32 bits
*/
typedef __u8 byte_t;
typedef __u32 int32;
/*
* "state" data for each active tcp conversation on the wire. This is
* basically a copy of the entire IP/TCP header from the last packet
* we saw from the conversation together with a small identifier
* the transmit & receive ends of the line use to locate saved header.
*/
struct cstate {
byte_t cs_this; /* connection id number (xmit) */
bool initialized; /* true if initialized */
struct cstate *next; /* next in ring (xmit) */
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
struct tcphdr cs_tcp;
unsigned char cs_ipopt[64];
unsigned char cs_tcpopt[64];
int cs_hsize;
};
#define NULLSLSTATE (struct cstate *)0
/*
* all the state data for one serial line (we need one of these per line).
*/
struct slcompress {
struct cstate *tstate; /* transmit connection states (array)*/
struct cstate *rstate; /* receive connection states (array)*/
byte_t tslot_limit; /* highest transmit slot id (0-l)*/
byte_t rslot_limit; /* highest receive slot id (0-l)*/
byte_t xmit_oldest; /* oldest xmit in ring */
byte_t xmit_current; /* most recent xmit id */
byte_t recv_current; /* most recent rcvd id */
byte_t flags;
#define SLF_TOSS 0x01 /* tossing rcvd frames until id received */
int32 sls_o_nontcp; /* outbound non-TCP packets */
int32 sls_o_tcp; /* outbound TCP packets */
int32 sls_o_uncompressed; /* outbound uncompressed packets */
int32 sls_o_compressed; /* outbound compressed packets */
int32 sls_o_searches; /* searches for connection state */
int32 sls_o_misses; /* times couldn't find conn. state */
int32 sls_i_uncompressed; /* inbound uncompressed packets */
int32 sls_i_compressed; /* inbound compressed packets */
int32 sls_i_error; /* inbound error packets */
int32 sls_i_tossed; /* inbound packets tossed because of error */
int32 sls_i_runt;
int32 sls_i_badcheck;
};
#define NULLSLCOMPR (struct slcompress *)0
/* In slhc.c: */
struct slcompress *slhc_init(int rslots, int tslots);
void slhc_free(struct slcompress *comp);
int slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
unsigned char *ocp, unsigned char **cpp, int compress_cid);
int slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize);
int slhc_remember(struct slcompress *comp, unsigned char *icp, int isize);
int slhc_toss(struct slcompress *comp);
#endif /* _SLHC_H */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <crypto/internal/kpp.h>
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
#include "atmel-i2c.h"
static struct atmel_ecc_driver_data driver_data;
/**
* struct atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
* private key.
* @public_key : generated when calling set_secret(). It's the responsibility
* of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id
* @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key.
*/
struct atmel_ecdh_ctx {
struct i2c_client *client;
struct crypto_kpp *fallback;
const u8 *public_key;
unsigned int curve_id;
bool do_fallback;
};
static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
if (status)
goto free_work_data;
/* might want less than we've got */
n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
&cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
/* fall through */
free_work_data:
kfree_sensitive(work_data);
kpp_request_complete(req, status);
}
/*
* A random private key is generated and stored in the device. The device
* returns the pair public key.
*/
static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
struct atmel_i2c_cmd *cmd;
void *public_key;
struct ecdh params;
int ret = -ENOMEM;
/* free the old public key, if any */
kfree(ctx->public_key);
/* make sure you don't free the old public key twice */
ctx->public_key = NULL;
if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n");
return -EINVAL;
}
if (params.key_size) {
/* fallback to ecdh software implementation */
ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len);
}
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
/*
* The device only supports NIST P256 ECC keys. The public key size will
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL);
if (!public_key)
goto free_cmd;
ctx->do_fallback = false;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
ret = atmel_i2c_send_receive(ctx->client, cmd);
if (ret)
goto free_public_key;
/* save the public key */
memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE);
ctx->public_key = public_key;
kfree(cmd);
return 0;
free_public_key:
kfree(public_key);
free_cmd:
kfree(cmd);
return ret;
}
static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
kpp_request_set_tfm(req, ctx->fallback);
return crypto_kpp_generate_public_key(req);
}
if (!ctx->public_key)
return -EINVAL;
/* might want less than we've got */
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
/* public key was saved at private key generation */
copied = sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, nbytes),
ctx->public_key, nbytes);
if (copied != nbytes)
ret = -EINVAL;
return ret;
}
static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
struct atmel_i2c_work_data *work_data;
gfp_t gfp;
int ret;
if (ctx->do_fallback) {
kpp_request_set_tfm(req, ctx->fallback);
return crypto_kpp_compute_shared_secret(req);
}
/* must have exactly two points to be on the curve */
if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
work_data = kmalloc(sizeof(*work_data), gfp);
if (!work_data)
return -ENOMEM;
work_data->ctx = ctx;
work_data->client = ctx->client;
ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
if (ret)
goto free_work_data;
atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
return -EINPROGRESS;
free_work_data:
kfree(work_data);
return ret;
}
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
{
struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
struct i2c_client *client = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
spin_lock(&driver_data.i2c_list_lock);
if (list_empty(&driver_data.i2c_client_list)) {
spin_unlock(&driver_data.i2c_list_lock);
return ERR_PTR(-ENODEV);
}
list_for_each_entry(i2c_priv, &driver_data.i2c_client_list,
i2c_client_list_node) {
tfm_cnt = atomic_read(&i2c_priv->tfm_count);
if (tfm_cnt < min_tfm_cnt) {
min_tfm_cnt = tfm_cnt;
min_i2c_priv = i2c_priv;
}
if (!min_tfm_cnt)
break;
}
if (min_i2c_priv) {
atomic_inc(&min_i2c_priv->tfm_count);
client = min_i2c_priv->client;
}
spin_unlock(&driver_data.i2c_list_lock);
return client;
}
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
atomic_dec(&i2c_priv->tfm_count);
}
static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
{
const char *alg = kpp_alg_name(tfm);
struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n");
return PTR_ERR(ctx->client);
}
fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
ctx->fallback = fallback;
return 0;
}
static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
kfree(ctx->public_key);
crypto_free_kpp(ctx->fallback);
atmel_ecc_i2c_client_free(ctx->client);
}
static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
{
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
if (ctx->fallback)
return crypto_kpp_maxsize(ctx->fallback);
/*
* The device only supports NIST P256 ECC keys. The public key size will
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
return ATMEL_ECC_PUBKEY_SIZE;
}
static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret,
.init = atmel_ecdh_init_tfm,
.exit = atmel_ecdh_exit_tfm,
.max_size = atmel_ecdh_max_size,
.base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct atmel_ecdh_ctx),
},
};
static int atmel_ecc_probe(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv;
int ret;
ret = atmel_i2c_probe(client);
if (ret)
return ret;
i2c_priv = i2c_get_clientdata(client);
spin_lock(&driver_data.i2c_list_lock);
list_add_tail(&i2c_priv->i2c_client_list_node,
&driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock);
ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) {
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n",
atmel_ecdh_nist_p256.base.cra_driver_name);
} else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
return ret;
}
static void atmel_ecc_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
/* Return EBUSY if i2c client already allocated. */
if (atomic_read(&i2c_priv->tfm_count)) {
/*
* After we return here, the memory backing the device is freed.
* That happens no matter what the return value of this function
* is because in the Linux device model there is no error
* handling for unbinding a driver.
* If there is still some action pending, it probably involves
* accessing the freed memory.
*/
dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n");
return;
}
crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
}
#ifdef CONFIG_OF
static const struct of_device_id atmel_ecc_dt_ids[] = {
{
.compatible = "atmel,atecc508a",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
#endif
static const struct i2c_device_id atmel_ecc_id[] = {
{ "atecc508a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
static struct i2c_driver atmel_ecc_driver = {
.driver = {
.name = "atmel-ecc",
.of_match_table = of_match_ptr(atmel_ecc_dt_ids),
},
.probe = atmel_ecc_probe,
.remove = atmel_ecc_remove,
.id_table = atmel_ecc_id,
};
static int __init atmel_ecc_init(void)
{
spin_lock_init(&driver_data.i2c_list_lock);
INIT_LIST_HEAD(&driver_data.i2c_client_list);
return i2c_add_driver(&atmel_ecc_driver);
}
static void __exit atmel_ecc_exit(void)
{
atmel_i2c_flush_queue();
i2c_del_driver(&atmel_ecc_driver);
}
module_init(atmel_ecc_init);
module_exit(atmel_ecc_exit);
MODULE_AUTHOR("Tudor Ambarus");
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* OMAP Multi-Channel Buffered Serial Port
*
* Contact: Jarkko Nikula <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#ifndef __OMAP_MCBSP_PRIV_H__
#define __OMAP_MCBSP_PRIV_H__
#include <linux/platform_data/asoc-ti-mcbsp.h>
#ifdef CONFIG_ARCH_OMAP1
#define mcbsp_omap1() 1
#else
#define mcbsp_omap1() 0
#endif
/* McBSP register numbers. Register address offset = num * reg_step */
enum {
/* Common registers */
OMAP_MCBSP_REG_SPCR2 = 4,
OMAP_MCBSP_REG_SPCR1,
OMAP_MCBSP_REG_RCR2,
OMAP_MCBSP_REG_RCR1,
OMAP_MCBSP_REG_XCR2,
OMAP_MCBSP_REG_XCR1,
OMAP_MCBSP_REG_SRGR2,
OMAP_MCBSP_REG_SRGR1,
OMAP_MCBSP_REG_MCR2,
OMAP_MCBSP_REG_MCR1,
OMAP_MCBSP_REG_RCERA,
OMAP_MCBSP_REG_RCERB,
OMAP_MCBSP_REG_XCERA,
OMAP_MCBSP_REG_XCERB,
OMAP_MCBSP_REG_PCR0,
OMAP_MCBSP_REG_RCERC,
OMAP_MCBSP_REG_RCERD,
OMAP_MCBSP_REG_XCERC,
OMAP_MCBSP_REG_XCERD,
OMAP_MCBSP_REG_RCERE,
OMAP_MCBSP_REG_RCERF,
OMAP_MCBSP_REG_XCERE,
OMAP_MCBSP_REG_XCERF,
OMAP_MCBSP_REG_RCERG,
OMAP_MCBSP_REG_RCERH,
OMAP_MCBSP_REG_XCERG,
OMAP_MCBSP_REG_XCERH,
/* OMAP1-OMAP2420 registers */
OMAP_MCBSP_REG_DRR2 = 0,
OMAP_MCBSP_REG_DRR1,
OMAP_MCBSP_REG_DXR2,
OMAP_MCBSP_REG_DXR1,
/* OMAP2430 and onwards */
OMAP_MCBSP_REG_DRR = 0,
OMAP_MCBSP_REG_DXR = 2,
OMAP_MCBSP_REG_SYSCON = 35,
OMAP_MCBSP_REG_THRSH2,
OMAP_MCBSP_REG_THRSH1,
OMAP_MCBSP_REG_IRQST = 40,
OMAP_MCBSP_REG_IRQEN,
OMAP_MCBSP_REG_WAKEUPEN,
OMAP_MCBSP_REG_XCCR,
OMAP_MCBSP_REG_RCCR,
OMAP_MCBSP_REG_XBUFFSTAT,
OMAP_MCBSP_REG_RBUFFSTAT,
OMAP_MCBSP_REG_SSELCR,
};
/************************** McBSP SPCR1 bit definitions ***********************/
#define RRST BIT(0)
#define RRDY BIT(1)
#define RFULL BIT(2)
#define RSYNC_ERR BIT(3)
#define RINTM(value) (((value) & 0x3) << 4) /* bits 4:5 */
#define ABIS BIT(6)
#define DXENA BIT(7)
#define CLKSTP(value) (((value) & 0x3) << 11) /* bits 11:12 */
#define RJUST(value) (((value) & 0x3) << 13) /* bits 13:14 */
#define ALB BIT(15)
#define DLB BIT(15)
/************************** McBSP SPCR2 bit definitions ***********************/
#define XRST BIT(0)
#define XRDY BIT(1)
#define XEMPTY BIT(2)
#define XSYNC_ERR BIT(3)
#define XINTM(value) (((value) & 0x3) << 4) /* bits 4:5 */
#define GRST BIT(6)
#define FRST BIT(7)
#define SOFT BIT(8)
#define FREE BIT(9)
/************************** McBSP PCR bit definitions *************************/
#define CLKRP BIT(0)
#define CLKXP BIT(1)
#define FSRP BIT(2)
#define FSXP BIT(3)
#define DR_STAT BIT(4)
#define DX_STAT BIT(5)
#define CLKS_STAT BIT(6)
#define SCLKME BIT(7)
#define CLKRM BIT(8)
#define CLKXM BIT(9)
#define FSRM BIT(10)
#define FSXM BIT(11)
#define RIOEN BIT(12)
#define XIOEN BIT(13)
#define IDLE_EN BIT(14)
/************************** McBSP RCR1 bit definitions ************************/
#define RWDLEN1(value) (((value) & 0x7) << 5) /* Bits 5:7 */
#define RFRLEN1(value) (((value) & 0x7f) << 8) /* Bits 8:14 */
/************************** McBSP XCR1 bit definitions ************************/
#define XWDLEN1(value) (((value) & 0x7) << 5) /* Bits 5:7 */
#define XFRLEN1(value) (((value) & 0x7f) << 8) /* Bits 8:14 */
/*************************** McBSP RCR2 bit definitions ***********************/
#define RDATDLY(value) ((value) & 0x3) /* Bits 0:1 */
#define RFIG BIT(2)
#define RCOMPAND(value) (((value) & 0x3) << 3) /* Bits 3:4 */
#define RWDLEN2(value) (((value) & 0x7) << 5) /* Bits 5:7 */
#define RFRLEN2(value) (((value) & 0x7f) << 8) /* Bits 8:14 */
#define RPHASE BIT(15)
/*************************** McBSP XCR2 bit definitions ***********************/
#define XDATDLY(value) ((value) & 0x3) /* Bits 0:1 */
#define XFIG BIT(2)
#define XCOMPAND(value) (((value) & 0x3) << 3) /* Bits 3:4 */
#define XWDLEN2(value) (((value) & 0x7) << 5) /* Bits 5:7 */
#define XFRLEN2(value) (((value) & 0x7f) << 8) /* Bits 8:14 */
#define XPHASE BIT(15)
/************************* McBSP SRGR1 bit definitions ************************/
#define CLKGDV(value) ((value) & 0x7f) /* Bits 0:7 */
#define FWID(value) (((value) & 0xff) << 8) /* Bits 8:15 */
/************************* McBSP SRGR2 bit definitions ************************/
#define FPER(value) ((value) & 0x0fff) /* Bits 0:11 */
#define FSGM BIT(12)
#define CLKSM BIT(13)
#define CLKSP BIT(14)
#define GSYNC BIT(15)
/************************* McBSP MCR1 bit definitions *************************/
#define RMCM BIT(0)
#define RCBLK(value) (((value) & 0x7) << 2) /* Bits 2:4 */
#define RPABLK(value) (((value) & 0x3) << 5) /* Bits 5:6 */
#define RPBBLK(value) (((value) & 0x3) << 7) /* Bits 7:8 */
/************************* McBSP MCR2 bit definitions *************************/
#define XMCM(value) ((value) & 0x3) /* Bits 0:1 */
#define XCBLK(value) (((value) & 0x7) << 2) /* Bits 2:4 */
#define XPABLK(value) (((value) & 0x3) << 5) /* Bits 5:6 */
#define XPBBLK(value) (((value) & 0x3) << 7) /* Bits 7:8 */
/*********************** McBSP XCCR bit definitions *************************/
#define XDISABLE BIT(0)
#define XDMAEN BIT(3)
#define DILB BIT(5)
#define XFULL_CYCLE BIT(11)
#define DXENDLY(value) (((value) & 0x3) << 12) /* Bits 12:13 */
#define PPCONNECT BIT(14)
#define EXTCLKGATE BIT(15)
/********************** McBSP RCCR bit definitions *************************/
#define RDISABLE BIT(0)
#define RDMAEN BIT(3)
#define RFULL_CYCLE BIT(11)
/********************** McBSP SYSCONFIG bit definitions ********************/
#define SOFTRST BIT(1)
#define ENAWAKEUP BIT(2)
#define SIDLEMODE(value) (((value) & 0x3) << 3)
#define CLOCKACTIVITY(value) (((value) & 0x3) << 8)
/********************** McBSP DMA operating modes **************************/
#define MCBSP_DMA_MODE_ELEMENT 0
#define MCBSP_DMA_MODE_THRESHOLD 1
/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
#define RSYNCERREN BIT(0)
#define RFSREN BIT(1)
#define REOFEN BIT(2)
#define RRDYEN BIT(3)
#define RUNDFLEN BIT(4)
#define ROVFLEN BIT(5)
#define XSYNCERREN BIT(7)
#define XFSXEN BIT(8)
#define XEOFEN BIT(9)
#define XRDYEN BIT(10)
#define XUNDFLEN BIT(11)
#define XOVFLEN BIT(12)
#define XEMPTYEOFEN BIT(14)
/* Clock signal muxing options */
#define CLKR_SRC_CLKR 0 /* CLKR signal is from the CLKR pin */
#define CLKR_SRC_CLKX 1 /* CLKR signal is from the CLKX pin */
#define FSR_SRC_FSR 2 /* FSR signal is from the FSR pin */
#define FSR_SRC_FSX 3 /* FSR signal is from the FSX pin */
/* McBSP functional clock sources */
#define MCBSP_CLKS_PRCM_SRC 0
#define MCBSP_CLKS_PAD_SRC 1
/* we don't do multichannel for now */
struct omap_mcbsp_reg_cfg {
u16 spcr2;
u16 spcr1;
u16 rcr2;
u16 rcr1;
u16 xcr2;
u16 xcr1;
u16 srgr2;
u16 srgr1;
u16 mcr2;
u16 mcr1;
u16 pcr0;
u16 rcerc;
u16 rcerd;
u16 xcerc;
u16 xcerd;
u16 rcere;
u16 rcerf;
u16 xcere;
u16 xcerf;
u16 rcerg;
u16 rcerh;
u16 xcerg;
u16 xcerh;
u16 xccr;
u16 rccr;
};
struct omap_mcbsp_st_data;
struct omap_mcbsp {
struct device *dev;
struct clk *fclk;
spinlock_t lock;
unsigned long phys_base;
unsigned long phys_dma_base;
void __iomem *io_base;
u8 id;
/*
* Flags indicating is the bus already activated and configured by
* another substream
*/
int active;
int configured;
u8 free;
int irq;
int rx_irq;
int tx_irq;
/* Protect the field .free, while checking if the mcbsp is in use */
struct omap_mcbsp_platform_data *pdata;
struct omap_mcbsp_st_data *st_data;
struct omap_mcbsp_reg_cfg cfg_regs;
struct snd_dmaengine_dai_dma_data dma_data[2];
unsigned int dma_req[2];
int dma_op_mode;
u16 max_tx_thres;
u16 max_rx_thres;
void *reg_cache;
int reg_cache_size;
unsigned int fmt;
unsigned int in_freq;
unsigned int latency[2];
int clk_div;
int wlen;
struct pm_qos_request pm_qos_req;
};
static inline void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
{
void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
if (mcbsp->pdata->reg_size == 2) {
((u16 *)mcbsp->reg_cache)[reg] = (u16)val;
writew_relaxed((u16)val, addr);
} else {
((u32 *)mcbsp->reg_cache)[reg] = val;
writel_relaxed(val, addr);
}
}
static inline int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg,
bool from_cache)
{
void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
if (mcbsp->pdata->reg_size == 2) {
return !from_cache ? readw_relaxed(addr) :
((u16 *)mcbsp->reg_cache)[reg];
} else {
return !from_cache ? readl_relaxed(addr) :
((u32 *)mcbsp->reg_cache)[reg];
}
}
#define MCBSP_READ(mcbsp, reg) \
omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 0)
#define MCBSP_WRITE(mcbsp, reg, val) \
omap_mcbsp_write(mcbsp, OMAP_MCBSP_REG_##reg, val)
#define MCBSP_READ_CACHE(mcbsp, reg) \
omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
/* Sidetone specific API */
int omap_mcbsp_st_init(struct platform_device *pdev);
int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp);
int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp);
#endif /* __OMAP_MCBSP_PRIV_H__ */
|
// SPDX-License-Identifier: GPL-2.0-only
/* huawei_cdc_ncm.c - handles Huawei devices using the CDC NCM protocol as
* transport layer.
* Copyright (C) 2013 Enrico Mioso <[email protected]>
*
* ABSTRACT:
* This driver handles devices resembling the CDC NCM standard, but
* encapsulating another protocol inside it. An example are some Huawei 3G
* devices, exposing an embedded AT channel where you can set up the NCM
* connection.
* This code has been heavily inspired by the cdc_mbim.c driver, which is
* Copyright (c) 2012 Smith Micro Software, Inc.
* Copyright (c) 2012 Bjørn Mork <[email protected]>
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
#include <linux/usb/cdc_ncm.h>
/* Driver data */
struct huawei_cdc_ncm_state {
struct cdc_ncm_ctx *ctx;
atomic_t pmcount;
struct usb_driver *subdriver;
struct usb_interface *control;
struct usb_interface *data;
};
static int huawei_cdc_ncm_manage_power(struct usbnet *usbnet_dev, int on)
{
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
int rv;
if ((on && atomic_add_return(1, &drvstate->pmcount) == 1) ||
(!on && atomic_dec_and_test(&drvstate->pmcount))) {
rv = usb_autopm_get_interface(usbnet_dev->intf);
usbnet_dev->intf->needs_remote_wakeup = on;
if (!rv)
usb_autopm_put_interface(usbnet_dev->intf);
}
return 0;
}
static int huawei_cdc_ncm_wdm_manage_power(struct usb_interface *intf,
int status)
{
struct usbnet *usbnet_dev = usb_get_intfdata(intf);
/* can be called while disconnecting */
if (!usbnet_dev)
return 0;
return huawei_cdc_ncm_manage_power(usbnet_dev, status);
}
static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret;
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
int drvflags = 0;
/* altsetting should always be 1 for NCM devices - so we hard-coded
* it here. Some huawei devices will need the NDP part of the NCM package to
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
/* For many Huawei devices the NTB32 mode is the default and the best mode
* they work with. Huawei E5785 and E5885 devices refuse to work in NTB16 mode at all.
*/
drvflags |= CDC_NCM_FLAG_PREFER_NTB32;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
ctx = drvstate->ctx;
if (usbnet_dev->status)
/* The wMaxCommand buffer must be big enough to hold
* any message from the modem. Experience has shown
* that some replies are more than 256 bytes long
*/
subdriver = usb_cdc_wdm_register(ctx->control,
&usbnet_dev->status->desc,
1024, /* wMaxCommand */
WWAN_PORT_AT,
huawei_cdc_ncm_wdm_manage_power);
if (IS_ERR(subdriver)) {
ret = PTR_ERR(subdriver);
cdc_ncm_unbind(usbnet_dev, intf);
goto err;
}
/* Prevent usbnet from using the status descriptor */
usbnet_dev->status = NULL;
drvstate->subdriver = subdriver;
err:
return ret;
}
static void huawei_cdc_ncm_unbind(struct usbnet *usbnet_dev,
struct usb_interface *intf)
{
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
struct cdc_ncm_ctx *ctx = drvstate->ctx;
if (drvstate->subdriver && drvstate->subdriver->disconnect)
drvstate->subdriver->disconnect(ctx->control);
drvstate->subdriver = NULL;
cdc_ncm_unbind(usbnet_dev, intf);
}
static int huawei_cdc_ncm_suspend(struct usb_interface *intf,
pm_message_t message)
{
int ret = 0;
struct usbnet *usbnet_dev = usb_get_intfdata(intf);
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
struct cdc_ncm_ctx *ctx = drvstate->ctx;
if (ctx == NULL) {
ret = -ENODEV;
goto error;
}
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto error;
if (intf == ctx->control &&
drvstate->subdriver &&
drvstate->subdriver->suspend)
ret = drvstate->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
error:
return ret;
}
static int huawei_cdc_ncm_resume(struct usb_interface *intf)
{
int ret = 0;
struct usbnet *usbnet_dev = usb_get_intfdata(intf);
struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
bool callsub;
struct cdc_ncm_ctx *ctx = drvstate->ctx;
/* should we call subdriver's resume function? */
callsub =
(intf == ctx->control &&
drvstate->subdriver &&
drvstate->subdriver->resume);
if (callsub)
ret = drvstate->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
if (ret < 0 && callsub)
drvstate->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
static const struct driver_info huawei_cdc_ncm_info = {
.description = "Huawei CDC NCM device",
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
.bind = huawei_cdc_ncm_bind,
.unbind = huawei_cdc_ncm_unbind,
.manage_power = huawei_cdc_ncm_manage_power,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
};
static const struct usb_device_id huawei_cdc_ncm_devs[] = {
/* Huawei NCM devices disguised as vendor specific */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
/* Terminating entry */
{
},
};
MODULE_DEVICE_TABLE(usb, huawei_cdc_ncm_devs);
static struct usb_driver huawei_cdc_ncm_driver = {
.name = "huawei_cdc_ncm",
.id_table = huawei_cdc_ncm_devs,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = huawei_cdc_ncm_suspend,
.resume = huawei_cdc_ncm_resume,
.reset_resume = huawei_cdc_ncm_resume,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(huawei_cdc_ncm_driver);
MODULE_AUTHOR("Enrico Mioso <[email protected]>");
MODULE_DESCRIPTION("USB CDC NCM host driver with encapsulated protocol support");
MODULE_LICENSE("GPL");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 David Vernet <[email protected]>
*/
#include <bpf/bpf.h>
#include <scx/common.h>
#include <sys/wait.h>
#include <unistd.h>
#include "maximal.bpf.skel.h"
#include "scx_test.h"
static enum scx_test_status setup(void **ctx)
{
struct maximal *skel;
skel = maximal__open_and_load();
SCX_FAIL_IF(!skel, "Failed to open and load skel");
*ctx = skel;
return SCX_TEST_PASS;
}
static enum scx_test_status run(void *ctx)
{
struct maximal *skel = ctx;
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.maximal_ops);
SCX_FAIL_IF(!link, "Failed to attach scheduler");
bpf_link__destroy(link);
return SCX_TEST_PASS;
}
static void cleanup(void *ctx)
{
struct maximal *skel = ctx;
maximal__destroy(skel);
}
struct scx_test maximal = {
.name = "maximal",
.description = "Verify we can load a scheduler with every callback defined",
.setup = setup,
.run = run,
.cleanup = cleanup,
};
REGISTER_SCX_TEST(&maximal)
|
// SPDX-License-Identifier: GPL-2.0+
/*
* phy-keystone - USB PHY, talking to dwc3 controller in Keystone.
*
* Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
*
* Author: WingMan Kwok <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
#include <linux/io.h>
#include <linux/of.h>
#include "phy-generic.h"
/* USB PHY control register offsets */
#define USB_PHY_CTL_UTMI 0x0000
#define USB_PHY_CTL_PIPE 0x0004
#define USB_PHY_CTL_PARAM_1 0x0008
#define USB_PHY_CTL_PARAM_2 0x000c
#define USB_PHY_CTL_CLOCK 0x0010
#define USB_PHY_CTL_PLL 0x0014
#define PHY_REF_SSP_EN BIT(29)
struct keystone_usbphy {
struct usb_phy_generic usb_phy_gen;
void __iomem *phy_ctrl;
};
static inline u32 keystone_usbphy_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void keystone_usbphy_writel(void __iomem *base,
u32 offset, u32 value)
{
writel(value, base + offset);
}
static int keystone_usbphy_init(struct usb_phy *phy)
{
struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
u32 val;
val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
val | PHY_REF_SSP_EN);
return 0;
}
static void keystone_usbphy_shutdown(struct usb_phy *phy)
{
struct keystone_usbphy *k_phy = dev_get_drvdata(phy->dev);
u32 val;
val = keystone_usbphy_readl(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK);
keystone_usbphy_writel(k_phy->phy_ctrl, USB_PHY_CTL_CLOCK,
val & ~PHY_REF_SSP_EN);
}
static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
ret = usb_phy_gen_create_phy(dev, &k_phy->usb_phy_gen);
if (ret)
return ret;
k_phy->usb_phy_gen.phy.init = keystone_usbphy_init;
k_phy->usb_phy_gen.phy.shutdown = keystone_usbphy_shutdown;
platform_set_drvdata(pdev, k_phy);
return usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
}
static void keystone_usbphy_remove(struct platform_device *pdev)
{
struct keystone_usbphy *k_phy = platform_get_drvdata(pdev);
usb_remove_phy(&k_phy->usb_phy_gen.phy);
}
static const struct of_device_id keystone_usbphy_ids[] = {
{ .compatible = "ti,keystone-usbphy" },
{ }
};
MODULE_DEVICE_TABLE(of, keystone_usbphy_ids);
static struct platform_driver keystone_usbphy_driver = {
.probe = keystone_usbphy_probe,
.remove = keystone_usbphy_remove,
.driver = {
.name = "keystone-usbphy",
.of_match_table = keystone_usbphy_ids,
},
};
module_platform_driver(keystone_usbphy_driver);
MODULE_ALIAS("platform:keystone-usbphy");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("Keystone USB phy driver");
MODULE_LICENSE("GPL v2");
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Helper functions for BLAKE2b implementations.
* Keep this in sync with the corresponding BLAKE2s header.
*/
#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
#define _CRYPTO_INTERNAL_BLAKE2B_H
#include <crypto/blake2b.h>
#include <crypto/internal/hash.h>
#include <linux/string.h>
void blake2b_compress_generic(struct blake2b_state *state,
const u8 *block, size_t nblocks, u32 inc);
static inline void blake2b_set_lastblock(struct blake2b_state *state)
{
state->f[0] = -1;
}
typedef void (*blake2b_compress_t)(struct blake2b_state *state,
const u8 *block, size_t nblocks, u32 inc);
static inline void __blake2b_update(struct blake2b_state *state,
const u8 *in, size_t inlen,
blake2b_compress_t compress)
{
const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
if (unlikely(!inlen))
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
(*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
}
if (inlen > BLAKE2B_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
/* Hash one less (full) block than strictly possible */
(*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
}
memcpy(state->buf + state->buflen, in, inlen);
state->buflen += inlen;
}
static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
blake2b_compress_t compress)
{
int i;
blake2b_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
(*compress)(state, state->buf, 1, state->buflen);
for (i = 0; i < ARRAY_SIZE(state->h); i++)
__cpu_to_le64s(&state->h[i]);
memcpy(out, state->h, state->outlen);
}
/* Helper functions for shash implementations of BLAKE2b */
struct blake2b_tfm_ctx {
u8 key[BLAKE2B_KEY_SIZE];
unsigned int keylen;
};
static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
return -EINVAL;
memcpy(tctx->key, key, keylen);
tctx->keylen = keylen;
return 0;
}
static inline int crypto_blake2b_init(struct shash_desc *desc)
{
const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct blake2b_state *state = shash_desc_ctx(desc);
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
__blake2b_init(state, outlen, tctx->key, tctx->keylen);
return 0;
}
static inline int crypto_blake2b_update(struct shash_desc *desc,
const u8 *in, unsigned int inlen,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
__blake2b_update(state, in, inlen, compress);
return 0;
}
static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
__blake2b_final(state, out, compress);
return 0;
}
#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPARC64_ESTATE_H
#define _SPARC64_ESTATE_H
/* UltraSPARC-III E-cache Error Enable */
#define ESTATE_ERROR_FMT 0x0000000000040000 /* Force MTAG ECC */
#define ESTATE_ERROR_FMESS 0x000000000003c000 /* Forced MTAG ECC val */
#define ESTATE_ERROR_FMD 0x0000000000002000 /* Force DATA ECC */
#define ESTATE_ERROR_FDECC 0x0000000000001ff0 /* Forced DATA ECC val */
#define ESTATE_ERROR_UCEEN 0x0000000000000008 /* See below */
#define ESTATE_ERROR_NCEEN 0x0000000000000002 /* See below */
#define ESTATE_ERROR_CEEN 0x0000000000000001 /* See below */
/* UCEEN enables the fast_ECC_error trap for: 1) software correctable E-cache
* errors 2) uncorrectable E-cache errors. Such events only occur on reads
* of the E-cache by the local processor for: 1) data loads 2) instruction
* fetches 3) atomic operations. Such events _cannot_ occur for: 1) merge
* 2) writeback 2) copyout. The AFSR bits associated with these traps are
* UCC and UCU.
*/
/* NCEEN enables instruction_access_error, data_access_error, and ECC_error traps
* for uncorrectable ECC errors and system errors.
*
* Uncorrectable system bus data error or MTAG ECC error, system bus TimeOUT,
* or system bus BusERR:
* 1) As the result of an instruction fetch, will generate instruction_access_error
* 2) As the result of a load etc. will generate data_access_error.
* 3) As the result of store merge completion, writeback, or copyout will
* generate a disrupting ECC_error trap.
* 4) As the result of such errors on instruction vector fetch can generate any
* of the 3 trap types.
*
* The AFSR bits associated with these traps are EMU, EDU, WDU, CPU, IVU, UE,
* BERR, and TO.
*/
/* CEEN enables the ECC_error trap for hardware corrected ECC errors. System bus
* reads resulting in a hardware corrected data or MTAG ECC error will generate an
* ECC_error disrupting trap with this bit enabled.
*
* This same trap will also be generated when a hardware corrected ECC error results
* during store merge, writeback, and copyout operations.
*/
/* In general, if the trap enable bits above are disabled the AFSR bits will still
* log the events even though the trap will not be generated by the processor.
*/
#endif /* _SPARC64_ESTATE_H */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* VFIO platform driver specialized for AMD xgbe reset
* reset code is inherited from AMD xgbe native driver
*
* Copyright (c) 2015 Linaro Ltd.
* www.linaro.org
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <uapi/linux/mdio.h>
#include <linux/delay.h>
#include "../vfio_platform_private.h"
#define DMA_MR 0x3000
#define MAC_VR 0x0110
#define DMA_ISR 0x3008
#define MAC_ISR 0x00b0
#define PCS_MMD_SELECT 0xff
#define MDIO_AN_INT 0x8002
#define MDIO_AN_INTMASK 0x8001
static unsigned int xmdio_read(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg)
{
unsigned int mmd_address, value;
mmd_address = (mmd << 16) | ((reg) & 0xffff);
iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2));
value = ioread32(ioaddr + ((mmd_address & 0xff) << 2));
return value;
}
static void xmdio_write(void __iomem *ioaddr, unsigned int mmd,
unsigned int reg, unsigned int value)
{
unsigned int mmd_address;
mmd_address = (mmd << 16) | ((reg) & 0xffff);
iowrite32(mmd_address >> 8, ioaddr + (PCS_MMD_SELECT << 2));
iowrite32(value, ioaddr + ((mmd_address & 0xff) << 2));
}
static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *xgmac_regs = &vdev->regions[0];
struct vfio_platform_region *xpcs_regs = &vdev->regions[1];
u32 dma_mr_value, pcs_value, value;
unsigned int count;
if (!xgmac_regs->ioaddr) {
xgmac_regs->ioaddr =
ioremap(xgmac_regs->addr, xgmac_regs->size);
if (!xgmac_regs->ioaddr)
return -ENOMEM;
}
if (!xpcs_regs->ioaddr) {
xpcs_regs->ioaddr =
ioremap(xpcs_regs->addr, xpcs_regs->size);
if (!xpcs_regs->ioaddr)
return -ENOMEM;
}
/* reset the PHY through MDIO*/
pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1);
pcs_value |= MDIO_CTRL1_RESET;
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_PCS, MDIO_CTRL1, pcs_value);
count = 50;
do {
msleep(20);
pcs_value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_PCS,
MDIO_CTRL1);
} while ((pcs_value & MDIO_CTRL1_RESET) && --count);
if (pcs_value & MDIO_CTRL1_RESET)
dev_warn(vdev->device, "%s: XGBE PHY reset timeout\n",
__func__);
/* disable auto-negotiation */
value = xmdio_read(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1);
value &= ~MDIO_AN_CTRL1_ENABLE;
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_CTRL1, value);
/* disable AN IRQ */
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
/* clear AN IRQ */
xmdio_write(xpcs_regs->ioaddr, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* MAC software reset */
dma_mr_value = ioread32(xgmac_regs->ioaddr + DMA_MR);
dma_mr_value |= 0x1;
iowrite32(dma_mr_value, xgmac_regs->ioaddr + DMA_MR);
usleep_range(10, 15);
count = 2000;
while (--count && (ioread32(xgmac_regs->ioaddr + DMA_MR) & 1))
usleep_range(500, 600);
if (!count)
dev_warn(vdev->device, "%s: MAC SW reset failed\n", __func__);
return 0;
}
module_vfio_reset_handler("amd,xgbe-seattle-v1a", vfio_platform_amdxgbe_reset);
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Auger <[email protected]>");
MODULE_DESCRIPTION("Reset support for AMD xgbe vfio platform device");
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 MediaTek Inc.
* Author: Yong Liang <[email protected]>
*/
#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2712
#define _DT_BINDINGS_RESET_CONTROLLER_MT2712
#define MT2712_TOPRGU_INFRA_SW_RST 0
#define MT2712_TOPRGU_MM_SW_RST 1
#define MT2712_TOPRGU_MFG_SW_RST 2
#define MT2712_TOPRGU_VENC_SW_RST 3
#define MT2712_TOPRGU_VDEC_SW_RST 4
#define MT2712_TOPRGU_IMG_SW_RST 5
#define MT2712_TOPRGU_INFRA_AO_SW_RST 8
#define MT2712_TOPRGU_USB_SW_RST 9
#define MT2712_TOPRGU_APMIXED_SW_RST 10
#define MT2712_TOPRGU_SW_RST_NUM 11
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2712 */
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021-2023 Digiteq Automotive
* author: Martin Tuma <[email protected]>
*
* This module handles all the sysfs info/configuration that is related to the
* v4l2 output devices.
*/
#include <linux/device.h>
#include <linux/nospec.h>
#include "mgb4_core.h"
#include "mgb4_i2c.h"
#include "mgb4_vout.h"
#include "mgb4_vin.h"
#include "mgb4_cmt.h"
#include "mgb4_sysfs.h"
static int loopin_cnt(struct mgb4_vin_dev *vindev)
{
struct mgb4_vout_dev *voutdev;
u32 config;
int i, cnt = 0;
for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
voutdev = vindev->mgbdev->vout[i];
if (!voutdev)
continue;
config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.config);
if ((config & 0xc) >> 2 == vindev->config->id)
cnt++;
}
return cnt;
}
static bool is_busy(struct video_device *dev)
{
bool ret;
mutex_lock(dev->lock);
ret = vb2_is_busy(dev->queue);
mutex_unlock(dev->lock);
return ret;
}
/* Common for both FPDL3 and GMSL */
static ssize_t output_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
return sprintf(buf, "%d\n", voutdev->config->id);
}
static ssize_t video_source_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.config);
return sprintf(buf, "%u\n", (config & 0xc) >> 2);
}
/*
* Video source change may affect the buffer queue of ANY video input/output on
* the card thus if any of the inputs/outputs is in use, we do not allow
* the change.
*
* As we do not want to lock all the video devices at the same time, a two-stage
* locking strategy is used. In addition to the video device locking there is
* a global (PCI device) variable "io_reconfig" atomically checked/set when
* the reconfiguration is running. All the video devices check the variable in
* their queue_setup() functions and do not allow to start the queue when
* the reconfiguration has started.
*/
static ssize_t video_source_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
struct mgb4_dev *mgbdev = voutdev->mgbdev;
struct mgb4_vin_dev *loopin_new = NULL, *loopin_old = NULL;
unsigned long val;
ssize_t ret;
u32 config;
int i;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 3)
return -EINVAL;
if (test_and_set_bit(0, &mgbdev->io_reconfig))
return -EBUSY;
ret = -EBUSY;
for (i = 0; i < MGB4_VIN_DEVICES; i++)
if (mgbdev->vin[i] && is_busy(&mgbdev->vin[i]->vdev))
goto end;
for (i = 0; i < MGB4_VOUT_DEVICES; i++)
if (mgbdev->vout[i] && is_busy(&mgbdev->vout[i]->vdev))
goto end;
config = mgb4_read_reg(&mgbdev->video, voutdev->config->regs.config);
if (((config & 0xc) >> 2) < MGB4_VIN_DEVICES)
loopin_old = mgbdev->vin[(config & 0xc) >> 2];
if (val < MGB4_VIN_DEVICES) {
val = array_index_nospec(val, MGB4_VIN_DEVICES);
loopin_new = mgbdev->vin[val];
}
if (loopin_old && loopin_cnt(loopin_old) == 1)
mgb4_mask_reg(&mgbdev->video, loopin_old->config->regs.config,
0x2, 0x0);
if (loopin_new)
mgb4_mask_reg(&mgbdev->video, loopin_new->config->regs.config,
0x2, 0x2);
if (val == voutdev->config->id + MGB4_VIN_DEVICES)
mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
config & ~(1 << 1));
else
mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
config | (1U << 1));
mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0xc,
val << 2);
ret = count;
end:
clear_bit(0, &mgbdev->io_reconfig);
return ret;
}
static ssize_t display_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.resolution);
return sprintf(buf, "%u\n", config >> 16);
}
static ssize_t display_width_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFFFF)
return -EINVAL;
mutex_lock(voutdev->vdev.lock);
if (vb2_is_busy(voutdev->vdev.queue)) {
mutex_unlock(voutdev->vdev.lock);
return -EBUSY;
}
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
0xFFFF0000, val << 16);
mutex_unlock(voutdev->vdev.lock);
return count;
}
static ssize_t display_height_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.resolution);
return sprintf(buf, "%u\n", config & 0xFFFF);
}
static ssize_t display_height_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFFFF)
return -EINVAL;
mutex_lock(voutdev->vdev.lock);
if (vb2_is_busy(voutdev->vdev.queue)) {
mutex_unlock(voutdev->vdev.lock);
return -EBUSY;
}
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
0xFFFF, val);
mutex_unlock(voutdev->vdev.lock);
return count;
}
static ssize_t frame_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 period = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.frame_limit);
return sprintf(buf, "%u\n", period ? MGB4_HW_FREQ / period : 0);
}
/*
* Frame rate change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t frame_rate_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int limit, ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
limit = val ? MGB4_HW_FREQ / val : 0;
mgb4_write_reg(&voutdev->mgbdev->video,
voutdev->config->regs.frame_limit, limit);
return count;
}
static ssize_t hsync_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
/*
* HSYNC width change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t hsync_width_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
0x00FF0000, val << 16);
return count;
}
static ssize_t vsync_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
}
/*
* VSYNC vidth change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t vsync_width_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
0x00FF0000, val << 16);
return count;
}
static ssize_t hback_porch_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
/*
* hback porch change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t hback_porch_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
0x0000FF00, val << 8);
return count;
}
static ssize_t vback_porch_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
}
/*
* vback porch change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t vback_porch_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
0x0000FF00, val << 8);
return count;
}
static ssize_t hfront_porch_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.hsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
/*
* hfront porch change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t hfront_porch_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
0x000000FF, val);
return count;
}
static ssize_t vfront_porch_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.vsync);
return sprintf(buf, "%u\n", (sig & 0x000000FF));
}
/*
* vfront porch change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t vfront_porch_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 0xFF)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
0x000000FF, val);
return count;
}
/* FPDL3 only */
static ssize_t hsync_polarity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.hsync);
return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
}
/*
* HSYNC polarity change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t hsync_polarity_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 1)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
(1U << 31), val << 31);
return count;
}
static ssize_t vsync_polarity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.vsync);
return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
}
/*
* VSYNC polarity change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t vsync_polarity_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 1)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
(1U << 31), val << 31);
return count;
}
static ssize_t de_polarity_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
voutdev->config->regs.vsync);
return sprintf(buf, "%u\n", (config & (1U << 30)) >> 30);
}
/*
* DE polarity change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t de_polarity_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 1)
return -EINVAL;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
(1U << 30), val << 30);
return count;
}
static ssize_t fpdl3_output_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
s32 ret;
mutex_lock(&voutdev->mgbdev->i2c_lock);
ret = mgb4_i2c_read_byte(&voutdev->ser, 0x5B);
mutex_unlock(&voutdev->mgbdev->i2c_lock);
if (ret < 0)
return -EIO;
switch ((u8)ret & 0x03) {
case 0:
return sprintf(buf, "0\n");
case 1:
return sprintf(buf, "1\n");
case 3:
return sprintf(buf, "2\n");
default:
return -EINVAL;
}
}
/*
* FPD-Link width change is expected to be called on live streams. Video device
* locking/queue check is not needed.
*/
static ssize_t fpdl3_output_width_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u8 i2c_data;
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
switch (val) {
case 0: /* auto */
i2c_data = 0x00;
break;
case 1: /* single */
i2c_data = 0x01;
break;
case 2: /* dual */
i2c_data = 0x03;
break;
default:
return -EINVAL;
}
mutex_lock(&voutdev->mgbdev->i2c_lock);
ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x5B, 0x03, i2c_data);
mutex_unlock(&voutdev->mgbdev->i2c_lock);
if (ret < 0)
return -EIO;
return count;
}
static ssize_t pclk_frequency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
return sprintf(buf, "%u\n", voutdev->freq);
}
static ssize_t pclk_frequency_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
int ret;
unsigned int dp;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
mutex_lock(voutdev->vdev.lock);
if (vb2_is_busy(voutdev->vdev.queue)) {
mutex_unlock(voutdev->vdev.lock);
return -EBUSY;
}
dp = (val > 50000) ? 1 : 0;
voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, val >> dp) << dp;
mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.config,
0x10, dp << 4);
mutex_lock(&voutdev->mgbdev->i2c_lock);
ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x4F, 1 << 6, ((~dp) & 1) << 6);
mutex_unlock(&voutdev->mgbdev->i2c_lock);
mutex_unlock(voutdev->vdev.lock);
return (ret < 0) ? -EIO : count;
}
static DEVICE_ATTR_RO(output_id);
static DEVICE_ATTR_RW(video_source);
static DEVICE_ATTR_RW(display_width);
static DEVICE_ATTR_RW(display_height);
static DEVICE_ATTR_RW(frame_rate);
static DEVICE_ATTR_RW(hsync_polarity);
static DEVICE_ATTR_RW(vsync_polarity);
static DEVICE_ATTR_RW(de_polarity);
static DEVICE_ATTR_RW(pclk_frequency);
static DEVICE_ATTR_RW(hsync_width);
static DEVICE_ATTR_RW(vsync_width);
static DEVICE_ATTR_RW(hback_porch);
static DEVICE_ATTR_RW(hfront_porch);
static DEVICE_ATTR_RW(vback_porch);
static DEVICE_ATTR_RW(vfront_porch);
static DEVICE_ATTR_RW(fpdl3_output_width);
struct attribute *mgb4_fpdl3_out_attrs[] = {
&dev_attr_output_id.attr,
&dev_attr_video_source.attr,
&dev_attr_display_width.attr,
&dev_attr_display_height.attr,
&dev_attr_frame_rate.attr,
&dev_attr_hsync_polarity.attr,
&dev_attr_vsync_polarity.attr,
&dev_attr_de_polarity.attr,
&dev_attr_pclk_frequency.attr,
&dev_attr_hsync_width.attr,
&dev_attr_vsync_width.attr,
&dev_attr_hback_porch.attr,
&dev_attr_hfront_porch.attr,
&dev_attr_vback_porch.attr,
&dev_attr_vfront_porch.attr,
&dev_attr_fpdl3_output_width.attr,
NULL
};
struct attribute *mgb4_gmsl_out_attrs[] = {
&dev_attr_output_id.attr,
&dev_attr_video_source.attr,
&dev_attr_display_width.attr,
&dev_attr_display_height.attr,
&dev_attr_frame_rate.attr,
NULL
};
|
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
#include "include/gpio_types.h"
#include "hw_gpio.h"
#include "reg_helper.h"
#include "gpio_regs.h"
#undef FN
#define FN(reg_name, field_name) \
gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
#define CTX \
gpio->base.ctx
#define REG(reg)\
(gpio->regs->reg)
static void store_registers(
struct hw_gpio *gpio)
{
REG_GET(MASK_reg, MASK, &gpio->store.mask);
REG_GET(A_reg, A, &gpio->store.a);
REG_GET(EN_reg, EN, &gpio->store.en);
/* TODO store GPIO_MUX_CONTROL if we ever use it */
}
static void restore_registers(
struct hw_gpio *gpio)
{
REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
REG_UPDATE(A_reg, A, gpio->store.a);
REG_UPDATE(EN_reg, EN, gpio->store.en);
/* TODO restore GPIO_MUX_CONTROL if we ever use it */
}
bool dal_hw_gpio_open(
struct hw_gpio_pin *ptr,
enum gpio_mode mode)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
store_registers(pin);
ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
return ptr->opened;
}
enum gpio_result dal_hw_gpio_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
enum gpio_result result = GPIO_RESULT_OK;
switch (ptr->mode) {
case GPIO_MODE_INPUT:
case GPIO_MODE_OUTPUT:
case GPIO_MODE_HARDWARE:
case GPIO_MODE_FAST_OUTPUT:
REG_GET(Y_reg, Y, value);
break;
default:
result = GPIO_RESULT_NON_SPECIFIC_ERROR;
}
return result;
}
enum gpio_result dal_hw_gpio_set_value(
const struct hw_gpio_pin *ptr,
uint32_t value)
{
struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
/* This is the public interface
* where the input comes from client, not shifted yet
* (because client does not know the shifts). */
switch (ptr->mode) {
case GPIO_MODE_OUTPUT:
REG_UPDATE(A_reg, A, value);
return GPIO_RESULT_OK;
case GPIO_MODE_FAST_OUTPUT:
/* We use (EN) to faster switch (used in DDC GPIO).
* So (A) is grounded, output is driven by (EN = 0)
* to pull the line down (output == 0) and (EN=1)
* then output is tri-state */
REG_UPDATE(EN_reg, EN, ~value);
return GPIO_RESULT_OK;
default:
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
}
enum gpio_result dal_hw_gpio_change_mode(
struct hw_gpio_pin *ptr,
enum gpio_mode mode)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
return dal_hw_gpio_config_mode(pin, mode);
}
void dal_hw_gpio_close(
struct hw_gpio_pin *ptr)
{
struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
restore_registers(pin);
ptr->mode = GPIO_MODE_UNKNOWN;
ptr->opened = false;
}
enum gpio_result dal_hw_gpio_config_mode(
struct hw_gpio *gpio,
enum gpio_mode mode)
{
gpio->base.mode = mode;
switch (mode) {
case GPIO_MODE_INPUT:
/* turn off output enable, act as input pin;
* program the pin as GPIO, mask out signal driven by HW */
REG_UPDATE(EN_reg, EN, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_OUTPUT:
/* turn on output enable, act as output pin;
* program the pin as GPIO, mask out signal driven by HW */
REG_UPDATE(A_reg, A, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_FAST_OUTPUT:
/* grounding the A register then use the EN register bit
* will have faster effect on the rise time */
REG_UPDATE(A_reg, A, 0);
REG_UPDATE(MASK_reg, MASK, 1);
return GPIO_RESULT_OK;
case GPIO_MODE_HARDWARE:
/* program the pin as tri-state, pin is driven by HW */
REG_UPDATE(MASK_reg, MASK, 0);
return GPIO_RESULT_OK;
case GPIO_MODE_INTERRUPT:
/* Interrupt mode supported only by HPD (IrqGpio) pins. */
REG_UPDATE(MASK_reg, MASK, 0);
return GPIO_RESULT_OK;
default:
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
}
void dal_hw_gpio_construct(
struct hw_gpio *pin,
enum gpio_id id,
uint32_t en,
struct dc_context *ctx)
{
pin->base.ctx = ctx;
pin->base.id = id;
pin->base.en = en;
pin->base.mode = GPIO_MODE_UNKNOWN;
pin->base.opened = false;
pin->store.mask = 0;
pin->store.a = 0;
pin->store.en = 0;
pin->store.mux = 0;
pin->mux_supported = false;
}
void dal_hw_gpio_destruct(
struct hw_gpio *pin)
{
ASSERT(!pin->base.opened);
}
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _POWERNV_H
#define _POWERNV_H
/*
* There's various hacks scattered throughout the generic powerpc arch code
* that needs to call into powernv platform stuff. The prototypes for those
* functions are in asm/powernv.h
*/
#include <asm/powernv.h>
#ifdef CONFIG_SMP
extern void pnv_smp_init(void);
#else
static inline void pnv_smp_init(void) { }
#endif
extern void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) __noreturn;
struct pci_dev;
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
#endif
extern u32 pnv_get_supported_cpuidle_states(void);
extern void pnv_lpc_init(void);
extern void opal_handle_events(void);
extern bool opal_have_pending_events(void);
extern void opal_event_shutdown(void);
bool cpu_core_split_required(void);
struct memcons;
ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
u32 __init memcons_get_size(struct memcons *mc);
struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name);
void pnv_rng_init(void);
#endif /* _POWERNV_H */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/83xx/mpc830x_rdb.c
*
* Description: MPC830x RDB board specific routines.
* This file is based on mpc831x_rdb.c
*
* Copyright (C) Freescale Semiconductor, Inc. 2009. All rights reserved.
* Copyright (C) 2010. Ilya Yanok, Emcraft Systems, [email protected]
*/
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/ipic.h>
#include <asm/udbg.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include "mpc83xx.h"
/*
* Setup the architecture
*/
static void __init mpc830x_rdb_setup_arch(void)
{
mpc83xx_setup_arch();
mpc831x_usb_cfg();
}
static const char *board[] __initdata = {
"MPC8308RDB",
"fsl,mpc8308rdb",
"denx,mpc8308_p1m",
NULL
};
machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
define_machine(mpc830x_rdb) {
.name = "MPC830x RDB",
.compatibles = board,
.setup_arch = mpc830x_rdb_setup_arch,
.discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
.progress = udbg_progress,
};
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Collabora Ltd */
#ifndef __PANFROST_PERFCNT_H__
#define __PANFROST_PERFCNT_H__
#include "panfrost_device.h"
void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
int panfrost_perfcnt_init(struct panfrost_device *pfdev);
void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
void panfrost_perfcnt_close(struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains platform specific structure definitions
* and init function used by Lunar Lake PCH.
*
* Copyright (c) 2022, Intel Corporation.
* All Rights Reserved.
*
*/
#include <linux/cpu.h>
#include <linux/pci.h>
#include "core.h"
const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
{"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
{"XHCI", CNP_PMC_LTR_XHCI},
{"SOUTHPORT_F", ADL_PMC_LTR_SPF},
{"ME", CNP_PMC_LTR_ME},
/* EVA is Enterprise Value Add, doesn't really exist on PCH */
{"SATA1", CNP_PMC_LTR_EVA},
{"SOUTHPORT_C", CNP_PMC_LTR_SPC},
{"HD_AUDIO", CNP_PMC_LTR_AZ},
{"CNV", CNP_PMC_LTR_CNV},
{"LPSS", CNP_PMC_LTR_LPSS},
{"SOUTHPORT_D", CNP_PMC_LTR_SPD},
{"SOUTHPORT_E", CNP_PMC_LTR_SPE},
{"SATA2", CNP_PMC_LTR_CAM},
{"ESPI", CNP_PMC_LTR_ESPI},
{"SCC", CNP_PMC_LTR_SCC},
{"ISH", CNP_PMC_LTR_ISH},
{"UFSX2", CNP_PMC_LTR_UFSX2},
{"EMMC", CNP_PMC_LTR_EMMC},
/*
* Check intel_pmc_core_ids[] users of cnp_reg_map for
* a list of core SoCs using this.
*/
{"WIGIG", ICL_PMC_LTR_WIGIG},
{"THC0", TGL_PMC_LTR_THC0},
{"THC1", TGL_PMC_LTR_THC1},
{"SOUTHPORT_G", CNP_PMC_LTR_RESERVED},
{"ESE", MTL_PMC_LTR_ESE},
{"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
{"DMI3", ARL_PMC_LTR_DMI3},
{"OSSE", LNL_PMC_LTR_OSSE},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
{"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
{}
};
const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0), 0},
{"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
{"ESPISPI_PGD0_PG_STS", BIT(2), 0},
{"XHCI_PGD0_PG_STS", BIT(3), 1},
{"SPA_PGD0_PG_STS", BIT(4), 1},
{"SPB_PGD0_PG_STS", BIT(5), 1},
{"SPR16B0_PGD0_PG_STS", BIT(6), 0},
{"GBE_PGD0_PG_STS", BIT(7), 1},
{"SBR8B7_PGD0_PG_STS", BIT(8), 0},
{"SBR8B6_PGD0_PG_STS", BIT(9), 0},
{"SBR16B1_PGD0_PG_STS", BIT(10), 0},
{"SBR8B8_PGD0_PG_STS", BIT(11), 0},
{"ESE_PGD3_PG_STS", BIT(12), 1},
{"D2D_DISP_PGD0_PG_STS", BIT(13), 1},
{"LPSS_PGD0_PG_STS", BIT(14), 1},
{"LPC_PGD0_PG_STS", BIT(15), 0},
{"SMB_PGD0_PG_STS", BIT(16), 0},
{"ISH_PGD0_PG_STS", BIT(17), 0},
{"SBR8B2_PGD0_PG_STS", BIT(18), 0},
{"NPK_PGD0_PG_STS", BIT(19), 0},
{"D2D_NOC_PGD0_PG_STS", BIT(20), 0},
{"SAFSS_PGD0_PG_STS", BIT(21), 0},
{"FUSE_PGD0_PG_STS", BIT(22), 0},
{"D2D_DISP_PGD1_PG_STS", BIT(23), 1},
{"MPFPW1_PGD0_PG_STS", BIT(24), 0},
{"XDCI_PGD0_PG_STS", BIT(25), 1},
{"EXI_PGD0_PG_STS", BIT(26), 0},
{"CSE_PGD0_PG_STS", BIT(27), 1},
{"KVMCC_PGD0_PG_STS", BIT(28), 1},
{"PMT_PGD0_PG_STS", BIT(29), 1},
{"CLINK_PGD0_PG_STS", BIT(30), 1},
{"PTIO_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0), 1},
{"SUSRAM_PGD0_PG_STS", BIT(1), 1},
{"SMT1_PGD0_PG_STS", BIT(2), 1},
{"U3FPW1_PGD0_PG_STS", BIT(3), 0},
{"SMS2_PGD0_PG_STS", BIT(4), 1},
{"SMS1_PGD0_PG_STS", BIT(5), 1},
{"CSMERTC_PGD0_PG_STS", BIT(6), 0},
{"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
{"FIA_PG_PGD0_PG_STS", BIT(8), 0},
{"SBR16B4_PGD0_PG_STS", BIT(9), 0},
{"P2SB8B_PGD0_PG_STS", BIT(10), 1},
{"DBG_SBR_PGD0_PG_STS", BIT(11), 0},
{"SBR8B9_PGD0_PG_STS", BIT(12), 0},
{"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
{"SBR8B10_PGD0_PG_STS", BIT(14), 0},
{"SBR16B3_PGD0_PG_STS", BIT(15), 0},
{"G5FPW1_PGD0_PG_STS", BIT(16), 0},
{"SBRG_PGD0_PG_STS", BIT(17), 0},
{"PSF4_PGD0_PG_STS", BIT(18), 0},
{"CNVI_PGD0_PG_STS", BIT(19), 0},
{"USFX2_PGD0_PG_STS", BIT(20), 1},
{"ENDBG_PGD0_PG_STS", BIT(21), 0},
{"FIACPCB_P5X4_PGD0_PG_STS", BIT(22), 0},
{"SBR8B3_PGD0_PG_STS", BIT(23), 0},
{"SBR8B0_PGD0_PG_STS", BIT(24), 0},
{"NPK_PGD1_PG_STS", BIT(25), 0},
{"OSSE_HOTHAM_PGD0_PG_STS", BIT(26), 1},
{"D2D_NOC_PGD2_PG_STS", BIT(27), 1},
{"SBR8B1_PGD0_PG_STS", BIT(28), 0},
{"PSF6_PGD0_PG_STS", BIT(29), 0},
{"PSF7_PGD0_PG_STS", BIT(30), 0},
{"FIA_U_PGD0_PG_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0), 0},
{"SBR16B2_PGD0_PG_STS", BIT(1), 0},
{"D2D_IPU_PGD0_PG_STS", BIT(2), 1},
{"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
{"TAM_PGD0_PG_STS", BIT(4), 1},
{"D2D_NOC_PGD1_PG_STS", BIT(5), 1},
{"TBTLSX_PGD0_PG_STS", BIT(6), 1},
{"THC0_PGD0_PG_STS", BIT(7), 1},
{"THC1_PGD0_PG_STS", BIT(8), 1},
{"PMC_PGD0_PG_STS", BIT(9), 0},
{"SBR8B5_PGD0_PG_STS", BIT(10), 0},
{"UFSPW1_PGD0_PG_STS", BIT(11), 0},
{"DBC_PGD0_PG_STS", BIT(12), 0},
{"TCSS_PGD0_PG_STS", BIT(13), 0},
{"FIA_P5X4_PGD0_PG_STS", BIT(14), 0},
{"DISP_PGA_PGD0_PG_STS", BIT(15), 0},
{"DISP_PSF_PGD0_PG_STS", BIT(16), 0},
{"PSF0_PGD0_PG_STS", BIT(17), 0},
{"P2SB16B_PGD0_PG_STS", BIT(18), 1},
{"ACE_PGD0_PG_STS", BIT(19), 0},
{"ACE_PGD1_PG_STS", BIT(20), 0},
{"ACE_PGD2_PG_STS", BIT(21), 0},
{"ACE_PGD3_PG_STS", BIT(22), 0},
{"ACE_PGD4_PG_STS", BIT(23), 0},
{"ACE_PGD5_PG_STS", BIT(24), 0},
{"ACE_PGD6_PG_STS", BIT(25), 0},
{"ACE_PGD7_PG_STS", BIT(26), 0},
{"ACE_PGD8_PG_STS", BIT(27), 0},
{"ACE_PGD9_PG_STS", BIT(28), 0},
{"ACE_PGD10_PG_STS", BIT(29), 0},
{"FIACPCB_PG_PGD0_PG_STS", BIT(30), 0},
{"OSSE_PGD0_PG_STS", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_d3_status_0_map[] = {
{"LPSS_D3_STS", BIT(3), 1},
{"XDCI_D3_STS", BIT(4), 1},
{"XHCI_D3_STS", BIT(5), 1},
{"SPA_D3_STS", BIT(12), 0},
{"SPB_D3_STS", BIT(13), 0},
{"OSSE_D3_STS", BIT(15), 0},
{"ESPISPI_D3_STS", BIT(18), 0},
{"PSTH_D3_STS", BIT(21), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_1_map[] = {
{"OSSE_SMT1_D3_STS", BIT(7), 0},
{"GBE_D3_STS", BIT(19), 0},
{"ITSS_D3_STS", BIT(23), 0},
{"CNVI_D3_STS", BIT(27), 0},
{"UFSX2_D3_STS", BIT(28), 1},
{"OSSE_HOTHAM_D3_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_2_map[] = {
{"ESE_D3_STS", BIT(0), 0},
{"CSMERTC_D3_STS", BIT(1), 0},
{"SUSRAM_D3_STS", BIT(2), 0},
{"CSE_D3_STS", BIT(4), 0},
{"KVMCC_D3_STS", BIT(5), 0},
{"USBR0_D3_STS", BIT(6), 0},
{"ISH_D3_STS", BIT(7), 0},
{"SMT1_D3_STS", BIT(8), 0},
{"SMT2_D3_STS", BIT(9), 0},
{"SMT3_D3_STS", BIT(10), 0},
{"OSSE_SMT2_D3_STS", BIT(13), 0},
{"CLINK_D3_STS", BIT(14), 0},
{"PTIO_D3_STS", BIT(16), 0},
{"PMT_D3_STS", BIT(17), 0},
{"SMS1_D3_STS", BIT(18), 0},
{"SMS2_D3_STS", BIT(19), 0},
{}
};
const struct pmc_bit_map lnl_d3_status_3_map[] = {
{"THC0_D3_STS", BIT(14), 1},
{"THC1_D3_STS", BIT(15), 1},
{"OSSE_SMT3_D3_STS", BIT(21), 0},
{"ACE_D3_STS", BIT(23), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
{"LPSS_VNN_REQ_STS", BIT(3), 1},
{"OSSE_VNN_REQ_STS", BIT(15), 1},
{"ESPISPI_VNN_REQ_STS", BIT(18), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4), 1},
{"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1},
{"DFXAGG_VNN_REQ_STS", BIT(8), 0},
{"EXI_VNN_REQ_STS", BIT(9), 1},
{"P2D_VNN_REQ_STS", BIT(18), 1},
{"GBE_VNN_REQ_STS", BIT(19), 1},
{"SMB_VNN_REQ_STS", BIT(25), 1},
{"LPC_VNN_REQ_STS", BIT(26), 0},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
{"eSE_VNN_REQ_STS", BIT(0), 1},
{"CSMERTC_VNN_REQ_STS", BIT(1), 1},
{"CSE_VNN_REQ_STS", BIT(4), 1},
{"ISH_VNN_REQ_STS", BIT(7), 1},
{"SMT1_VNN_REQ_STS", BIT(8), 1},
{"CLINK_VNN_REQ_STS", BIT(14), 1},
{"SMS1_VNN_REQ_STS", BIT(18), 1},
{"SMS2_VNN_REQ_STS", BIT(19), 1},
{"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
{"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
{"GPIOCOM2_VNN_REQ_STS", BIT(22), 0},
{"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
{"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
{}
};
const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
{"DISP_SHIM_VNN_REQ_STS", BIT(2), 0},
{"DTS0_VNN_REQ_STS", BIT(7), 0},
{"GPIOCOM5_VNN_REQ_STS", BIT(11), 2},
{}
};
const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0), 0},
{"TS_OFF_REQ_STS", BIT(1), 0},
{"PNDE_MET_REQ_STS", BIT(2), 1},
{"PCIE_DEEP_PM_REQ_STS", BIT(3), 0},
{"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4), 0},
{"NPK_VNNAON_REQ_STS", BIT(5), 0},
{"VNN_SOC_REQ_STS", BIT(6), 1},
{"ISH_VNNAON_REQ_STS", BIT(7), 0},
{"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
{"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
{"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10), 1},
{"PLT_GREATER_REQ_STS", BIT(11), 1},
{"PCIE_CLKREQ_REQ_STS", BIT(12), 0},
{"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
{"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
{"EA_REQ_STS", BIT(15), 0},
{"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
{"BRK_EV_EN_REQ_STS", BIT(17), 0},
{"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
{"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
{"LPC_CLK_SRC_REQ_STS", BIT(20), 0},
{"ARC_IDLE_REQ_STS", BIT(21), 0},
{"MPHY_SUS_REQ_STS", BIT(22), 0},
{"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
{"UXD_CONNECTED_REQ_STS", BIT(24), 1},
{"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
{"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
{"PRE_WAKE0_REQ_STS", BIT(27), 1},
{"PRE_WAKE1_REQ_STS", BIT(28), 1},
{"PRE_WAKE2_EN_REQ_STS", BIT(29), 1},
{"WOV_REQ_STS", BIT(30), 0},
{"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31), 1},
{}
};
const struct pmc_bit_map lnl_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0), 0},
{"AON3_OFF_STS", BIT(1), 1},
{"AON4_OFF_STS", BIT(2), 1},
{"AON5_OFF_STS", BIT(3), 1},
{"AON1_OFF_STS", BIT(4), 0},
{"MPFPW1_0_PLL_OFF_STS", BIT(6), 1},
{"USB3_PLL_OFF_STS", BIT(8), 1},
{"AON3_SPL_OFF_STS", BIT(9), 1},
{"G5FPW1_PLL_OFF_STS", BIT(15), 1},
{"XTAL_AGGR_OFF_STS", BIT(17), 1},
{"USB2_PLL_OFF_STS", BIT(18), 0},
{"SAF_PLL_OFF_STS", BIT(19), 1},
{"SE_TCSS_PLL_OFF_STS", BIT(20), 1},
{"DDI_PLL_OFF_STS", BIT(21), 1},
{"FILTER_PLL_OFF_STS", BIT(22), 1},
{"ACE_PLL_OFF_STS", BIT(24), 0},
{"FABRIC_PLL_OFF_STS", BIT(25), 1},
{"SOC_PLL_OFF_STS", BIT(26), 1},
{"REF_OFF_STS", BIT(28), 1},
{"IMG_OFF_STS", BIT(29), 1},
{"RTC_PLL_OFF_STS", BIT(31), 0},
{}
};
const struct pmc_bit_map lnl_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0), 0},
{"LSX_Wake1_STS", BIT(1), 0},
{"LSX_Wake2_STS", BIT(2), 0},
{"LSX_Wake3_STS", BIT(3), 0},
{"LSX_Wake4_STS", BIT(4), 0},
{"LSX_Wake5_STS", BIT(5), 0},
{"LSX_Wake6_STS", BIT(6), 0},
{"LSX_Wake7_STS", BIT(7), 0},
{"LPSS_Wake0_STS", BIT(8), 1},
{"LPSS_Wake1_STS", BIT(9), 1},
{"Int_Timer_SS_Wake0_STS", BIT(10), 1},
{"Int_Timer_SS_Wake1_STS", BIT(11), 1},
{"Int_Timer_SS_Wake2_STS", BIT(12), 1},
{"Int_Timer_SS_Wake3_STS", BIT(13), 1},
{"Int_Timer_SS_Wake4_STS", BIT(14), 1},
{"Int_Timer_SS_Wake5_STS", BIT(15), 1},
{}
};
const struct pmc_bit_map lnl_rsc_status_map[] = {
{"Memory", 0, 1},
{"PSF0", 0, 1},
{"PSF4", 0, 1},
{"PSF6", 0, 1},
{"PSF7", 0, 1},
{"PSF8", 0, 1},
{"SAF_CFI_LINK", 0, 1},
{"SBR", 0, 1},
{}
};
const struct pmc_bit_map *lnl_lpm_maps[] = {
lnl_clocksource_status_map,
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
lnl_power_gating_status_2_map,
lnl_d3_status_0_map,
lnl_d3_status_1_map,
lnl_d3_status_2_map,
lnl_d3_status_3_map,
lnl_vnn_req_status_0_map,
lnl_vnn_req_status_1_map,
lnl_vnn_req_status_2_map,
lnl_vnn_req_status_3_map,
lnl_vnn_misc_status_map,
lnl_signal_status_map,
NULL
};
const struct pmc_bit_map *lnl_blk_maps[] = {
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
lnl_power_gating_status_2_map,
lnl_rsc_status_map,
lnl_vnn_req_status_0_map,
lnl_vnn_req_status_1_map,
lnl_vnn_req_status_2_map,
lnl_vnn_req_status_3_map,
lnl_d3_status_0_map,
lnl_d3_status_1_map,
lnl_d3_status_2_map,
lnl_d3_status_3_map,
lnl_clocksource_status_map,
lnl_vnn_misc_status_map,
lnl_signal_status_map,
NULL
};
const struct pmc_bit_map lnl_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
{"ESPISPI", BIT(2)},
{"XHCI", BIT(3)},
{"SPA", BIT(4)},
{"SPB", BIT(5)},
{"SBR16B0", BIT(6)},
{"GBE", BIT(7)},
{"SBR8B7", BIT(0)},
{"SBR8B6", BIT(1)},
{"SBR16B1", BIT(1)},
{"SBR8B8", BIT(2)},
{"ESE", BIT(3)},
{"SBR8B10", BIT(4)},
{"D2D_DISP_0", BIT(5)},
{"LPSS", BIT(6)},
{"LPC", BIT(7)},
{"SMB", BIT(0)},
{"ISH", BIT(1)},
{"SBR8B2", BIT(2)},
{"NPK_0", BIT(3)},
{"D2D_NOC_0", BIT(4)},
{"SAFSS", BIT(5)},
{"FUSE", BIT(6)},
{"D2D_DISP_1", BIT(7)},
{"MPFPW1", BIT(0)},
{"XDCI", BIT(1)},
{"EXI", BIT(2)},
{"CSE", BIT(3)},
{"KVMCC", BIT(4)},
{"PMT", BIT(5)},
{"CLINK", BIT(6)},
{"PTIO", BIT(7)},
{"USBR", BIT(0)},
{"SUSRAM", BIT(1)},
{"SMT1", BIT(2)},
{"U3FPW1", BIT(3)},
{"SMS2", BIT(4)},
{"SMS1", BIT(5)},
{"CSMERTC", BIT(6)},
{"CSMEPSF", BIT(7)},
{"FIA_PG", BIT(0)},
{"SBR16B4", BIT(1)},
{"P2SB8B", BIT(2)},
{"DBG_SBR", BIT(3)},
{"SBR8B9", BIT(4)},
{"OSSE_SMT1", BIT(5)},
{"SBR8B10", BIT(6)},
{"SBR16B3", BIT(7)},
{"G5FPW1", BIT(0)},
{"SBRG", BIT(1)},
{"PSF4", BIT(2)},
{"CNVI", BIT(3)},
{"UFSX2", BIT(4)},
{"ENDBG", BIT(5)},
{"FIACPCB_P5X4", BIT(6)},
{"SBR8B3", BIT(7)},
{"SBR8B0", BIT(0)},
{"NPK_1", BIT(1)},
{"OSSE_HOTHAM", BIT(2)},
{"D2D_NOC_2", BIT(3)},
{"SBR8B1", BIT(4)},
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"FIA_U", BIT(7)},
{"PSF8", BIT(0)},
{"SBR16B2", BIT(1)},
{"D2D_IPU", BIT(2)},
{"FIACPCB_U", BIT(3)},
{"TAM", BIT(4)},
{"D2D_NOC_1", BIT(5)},
{"TBTLSX", BIT(6)},
{"THC0", BIT(7)},
{"THC1", BIT(0)},
{"PMC_1", BIT(1)},
{"SBR8B5", BIT(2)},
{"UFSPW1", BIT(3)},
{"DBC", BIT(4)},
{"TCSS", BIT(5)},
{"FIA_P5X4", BIT(6)},
{"DISP_PGA", BIT(7)},
{"DBG_PSF", BIT(0)},
{"PSF0", BIT(1)},
{"P2SB16B", BIT(2)},
{"ACE0", BIT(3)},
{"ACE1", BIT(4)},
{"ACE2", BIT(5)},
{"ACE3", BIT(6)},
{"ACE4", BIT(7)},
{"ACE5", BIT(0)},
{"ACE6", BIT(1)},
{"ACE7", BIT(2)},
{"ACE8", BIT(3)},
{"ACE9", BIT(4)},
{"ACE10", BIT(5)},
{"FIACPCB", BIT(6)},
{"OSSE", BIT(7)},
{}
};
const struct pmc_bit_map *ext_lnl_pfear_map[] = {
lnl_pfear_map,
NULL
};
const struct pmc_reg_map lnl_socm_reg_map = {
.pfear_sts = ext_lnl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
.ltr_show_sts = lnl_ltr_show_map,
.msr_sts = msr_map,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = LNL_PMC_MMIO_REG_LEN,
.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
.ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
.ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED,
.lpm_num_maps = ADL_LPM_NUM_MAPS,
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
.etr3_offset = ETR3_OFFSET,
.lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
.lpm_priority_offset = MTL_LPM_PRI_OFFSET,
.lpm_en_offset = MTL_LPM_EN_OFFSET,
.lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
.lpm_sts = lnl_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.s0ix_blocker_maps = lnl_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
};
#define LNL_NPU_PCI_DEV 0x643e
#define LNL_IPU_PCI_DEV 0x645d
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
*/
static void lnl_d3_fixup(void)
{
pmc_core_set_device_d3(LNL_IPU_PCI_DEV);
pmc_core_set_device_d3(LNL_NPU_PCI_DEV);
}
static int lnl_resume(struct pmc_dev *pmcdev)
{
lnl_d3_fixup();
return cnl_resume(pmcdev);
}
int lnl_core_init(struct pmc_dev *pmcdev)
{
int ret;
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
lnl_d3_fixup();
pmcdev->suspend = cnl_suspend;
pmcdev->resume = lnl_resume;
pmc->map = &lnl_socm_reg_map;
ret = get_primary_reg_base(pmc);
if (ret)
return ret;
pmc_core_get_low_power_modes(pmcdev);
return 0;
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 IBM Corporation
*/
#ifndef _ASM_POWERPC_KVM_GUEST_H_
#define _ASM_POWERPC_KVM_GUEST_H_
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
#include <linux/jump_label.h>
DECLARE_STATIC_KEY_FALSE(kvm_guest);
static inline bool is_kvm_guest(void)
{
return static_branch_unlikely(&kvm_guest);
}
int __init check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
|
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Global definitions for the HIPPI interface.
*
* Version: @(#)if_hippi.h 1.0.0 05/26/97
*
* Author: Fred N. van Kempen, <[email protected]>
* Donald Becker, <[email protected]>
* Alan Cox, <[email protected]>
* Steve Whitehouse, <[email protected]>
* Jes Sorensen, <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_HIPPI_H
#define _LINUX_IF_HIPPI_H
#include <linux/types.h>
#include <asm/byteorder.h>
/*
* HIPPI magic constants.
*/
#define HIPPI_ALEN 6 /* Bytes in one HIPPI hw-addr */
#define HIPPI_HLEN sizeof(struct hippi_hdr)
#define HIPPI_ZLEN 0 /* Min. bytes in frame without FCS */
#define HIPPI_DATA_LEN 65280 /* Max. bytes in payload */
#define HIPPI_FRAME_LEN (HIPPI_DATA_LEN + HIPPI_HLEN)
/* Max. bytes in frame without FCS */
/*
* Define LLC and SNAP constants.
*/
#define HIPPI_EXTENDED_SAP 0xAA
#define HIPPI_UI_CMD 0x03
/*
* Do we need to list some sort of ID's here?
*/
/*
* HIPPI statistics collection data.
*/
struct hipnet_statistics {
int rx_packets; /* total packets received */
int tx_packets; /* total packets transmitted */
int rx_errors; /* bad packets received */
int tx_errors; /* packet transmit problems */
int rx_dropped; /* no space in linux buffers */
int tx_dropped; /* no space available in linux */
/* detailed rx_errors: */
int rx_length_errors;
int rx_over_errors; /* receiver ring buff overflow */
int rx_crc_errors; /* recved pkt with crc error */
int rx_frame_errors; /* recv'd frame alignment error */
int rx_fifo_errors; /* recv'r fifo overrun */
int rx_missed_errors; /* receiver missed packet */
/* detailed tx_errors */
int tx_aborted_errors;
int tx_carrier_errors;
int tx_fifo_errors;
int tx_heartbeat_errors;
int tx_window_errors;
};
struct hippi_fp_hdr {
#if 0
__u8 ulp; /* must contain 4 */
#if defined (__BIG_ENDIAN_BITFIELD)
__u8 d1_data_present:1; /* must be 1 */
__u8 start_d2_burst_boundary:1; /* must be zero */
__u8 reserved:6; /* must be zero */
#if 0
__u16 reserved1:5;
__u16 d1_area_size:8; /* must be 3 */
__u16 d2_offset:3; /* must be zero */
#endif
#elif defined(__LITTLE_ENDIAN_BITFIELD)
__u8 reserved:6; /* must be zero */
__u8 start_d2_burst_boundary:1; /* must be zero */
__u8 d1_data_present:1; /* must be 1 */
#if 0
__u16 d2_offset:3; /* must be zero */
__u16 d1_area_size:8; /* must be 3 */
__u16 reserved1:5; /* must be zero */
#endif
#else
#error "Please fix <asm/byteorder.h>"
#endif
#else
__be32 fixed;
#endif
__be32 d2_size;
} __attribute__((packed));
struct hippi_le_hdr {
#if defined (__BIG_ENDIAN_BITFIELD)
__u8 fc:3;
__u8 double_wide:1;
__u8 message_type:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
__u8 message_type:4;
__u8 double_wide:1;
__u8 fc:3;
#endif
__u8 dest_switch_addr[3];
#if defined (__BIG_ENDIAN_BITFIELD)
__u8 dest_addr_type:4,
src_addr_type:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
__u8 src_addr_type:4,
dest_addr_type:4;
#endif
__u8 src_switch_addr[3];
__u16 reserved;
__u8 daddr[HIPPI_ALEN];
__u16 locally_administered;
__u8 saddr[HIPPI_ALEN];
} __attribute__((packed));
#define HIPPI_OUI_LEN 3
/*
* Looks like the dsap and ssap fields have been swapped by mistake in
* RFC 2067 "IP over HIPPI".
*/
struct hippi_snap_hdr {
__u8 dsap; /* always 0xAA */
__u8 ssap; /* always 0xAA */
__u8 ctrl; /* always 0x03 */
__u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
__be16 ethertype; /* packet type ID field */
} __attribute__((packed));
struct hippi_hdr {
struct hippi_fp_hdr fp;
struct hippi_le_hdr le;
struct hippi_snap_hdr snap;
} __attribute__((packed));
#endif /* _LINUX_IF_HIPPI_H */
|
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_io.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "buckets.h"
#include "checksum.h"
#include "disk_groups.h"
#include "error.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "journal_seq_blacklist.h"
#include "replicas.h"
#include "sb-clean.h"
#include "trace.h"
void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
{
lockdep_assert_held(&c->sb_lock);
for_each_member_device(c, ca) {
struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx);
m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free);
}
}
void bch2_journal_pos_from_member_info_resume(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
for_each_member_device(c, ca) {
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
unsigned idx = le32_to_cpu(m.last_journal_bucket);
if (idx < ca->journal.nr)
ca->journal.cur_idx = idx;
unsigned offset = le32_to_cpu(m.last_journal_bucket_offset);
if (offset <= ca->mi.bucket_size)
ca->journal.sectors_free = ca->mi.bucket_size - offset;
}
mutex_unlock(&c->sb_lock);
}
void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct journal_replay *j)
{
darray_for_each(j->ptrs, i) {
if (i != j->ptrs.data)
prt_printf(out, " ");
prt_printf(out, "%u:%u:%u (sector %llu)",
i->dev, i->bucket, i->bucket_offset, i->sector);
}
}
static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c,
struct journal_replay *j)
{
prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq));
bch2_journal_ptrs_to_text(out, c, j);
for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) {
struct jset_entry_datetime *datetime =
container_of(entry, struct jset_entry_datetime, entry);
bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
break;
}
}
static struct nonce journal_nonce(const struct jset *jset)
{
return (struct nonce) {{
[0] = 0,
[1] = ((__le32 *) &jset->seq)[0],
[2] = ((__le32 *) &jset->seq)[1],
[3] = BCH_NONCE_JOURNAL,
}};
}
static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum)
{
if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) {
*csum = (struct bch_csum) {};
return false;
}
*csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
return !bch2_crc_cmp(j->csum, *csum);
}
static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
{
return (seq - c->journal_entries_base_seq) & (~0U >> 1);
}
static void __journal_replay_free(struct bch_fs *c,
struct journal_replay *i)
{
struct journal_replay **p =
genradix_ptr(&c->journal_entries,
journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
BUG_ON(*p != i);
*p = NULL;
kvfree(i);
}
static void journal_replay_free(struct bch_fs *c, struct journal_replay *i, bool blacklisted)
{
if (blacklisted)
i->ignore_blacklisted = true;
else
i->ignore_not_dirty = true;
if (!c->opts.read_entire_journal)
__journal_replay_free(c, i);
}
struct journal_list {
struct closure cl;
u64 last_seq;
struct mutex lock;
int ret;
};
#define JOURNAL_ENTRY_ADD_OK 0
#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
/*
* Given a journal entry we just read, add it to the list of journal entries to
* be replayed:
*/
static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
struct journal_ptr entry_ptr,
struct journal_list *jlist, struct jset *j)
{
struct genradix_iter iter;
struct journal_replay **_i, *i, *dup;
size_t bytes = vstruct_bytes(j);
u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
struct printbuf buf = PRINTBUF;
int ret = JOURNAL_ENTRY_ADD_OK;
if (!c->journal.oldest_seq_found_ondisk ||
le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
/* Is this entry older than the range we need? */
if (!c->opts.read_entire_journal &&
le64_to_cpu(j->seq) < jlist->last_seq)
return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
/*
* genradixes are indexed by a ulong, not a u64, so we can't index them
* by sequence number directly: Assume instead that they will all fall
* within the range of +-2billion of the filrst one we find.
*/
if (!c->journal_entries_base_seq)
c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
/* Drop entries we don't need anymore */
if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
genradix_for_each_from(&c->journal_entries, iter, _i,
journal_entry_radix_idx(c, jlist->last_seq)) {
i = *_i;
if (journal_replay_ignore(i))
continue;
if (le64_to_cpu(i->j.seq) >= last_seq)
break;
journal_replay_free(c, i, false);
}
}
jlist->last_seq = max(jlist->last_seq, last_seq);
_i = genradix_ptr_alloc(&c->journal_entries,
journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
GFP_KERNEL);
if (!_i)
return -BCH_ERR_ENOMEM_journal_entry_add;
/*
* Duplicate journal entries? If so we want the one that didn't have a
* checksum error:
*/
dup = *_i;
if (dup) {
bool identical = bytes == vstruct_bytes(&dup->j) &&
!memcmp(j, &dup->j, bytes);
bool not_identical = !identical &&
entry_ptr.csum_good &&
dup->csum_good;
bool same_device = false;
darray_for_each(dup->ptrs, ptr)
if (ptr->dev == ca->dev_idx)
same_device = true;
ret = darray_push(&dup->ptrs, entry_ptr);
if (ret)
goto out;
bch2_journal_replay_to_text(&buf, c, dup);
fsck_err_on(same_device,
c, journal_entry_dup_same_device,
"duplicate journal entry on same device\n %s",
buf.buf);
fsck_err_on(not_identical,
c, journal_entry_replicas_data_mismatch,
"found duplicate but non identical journal entries\n %s",
buf.buf);
if (entry_ptr.csum_good && !identical)
goto replace;
goto out;
}
replace:
i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
if (!i)
return -BCH_ERR_ENOMEM_journal_entry_add;
darray_init(&i->ptrs);
i->csum_good = entry_ptr.csum_good;
i->ignore_blacklisted = false;
i->ignore_not_dirty = false;
unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
if (dup) {
/* The first ptr should represent the jset we kept: */
darray_for_each(dup->ptrs, ptr)
darray_push(&i->ptrs, *ptr);
__journal_replay_free(c, dup);
} else {
darray_push(&i->ptrs, entry_ptr);
}
*_i = i;
out:
fsck_err:
printbuf_exit(&buf);
return ret;
}
/* this fills in a range with empty jset_entries: */
static void journal_entry_null_range(void *start, void *end)
{
struct jset_entry *entry;
for (entry = start; entry != end; entry = vstruct_next(entry))
memset(entry, 0, sizeof(*entry));
}
#define JOURNAL_ENTRY_REREAD 5
#define JOURNAL_ENTRY_NONE 6
#define JOURNAL_ENTRY_BAD 7
static void journal_entry_err_msg(struct printbuf *out,
u32 version,
struct jset *jset,
struct jset_entry *entry)
{
prt_str(out, "invalid journal entry, version=");
bch2_version_to_text(out, version);
if (entry) {
prt_str(out, " type=");
bch2_prt_jset_entry_type(out, entry->type);
}
if (!jset) {
prt_printf(out, " in superblock");
} else {
prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
if (entry)
prt_printf(out, " offset=%zi/%u",
(u64 *) entry - jset->_data,
le32_to_cpu(jset->u64s));
}
prt_str(out, ": ");
}
#define journal_entry_err(c, version, jset, entry, _err, msg, ...) \
({ \
struct printbuf _buf = PRINTBUF; \
\
journal_entry_err_msg(&_buf, version, jset, entry); \
prt_printf(&_buf, msg, ##__VA_ARGS__); \
\
switch (flags & BCH_VALIDATE_write) { \
case READ: \
mustfix_fsck_err(c, _err, "%s", _buf.buf); \
break; \
case WRITE: \
bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
if (bch2_fs_inconsistent(c)) { \
ret = -BCH_ERR_fsck_errors_not_fixed; \
goto fsck_err; \
} \
break; \
} \
\
printbuf_exit(&_buf); \
true; \
})
#define journal_entry_err_on(cond, ...) \
((cond) ? journal_entry_err(__VA_ARGS__) : false)
#define FSCK_DELETED_KEY 5
static int journal_validate_key(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned level, enum btree_id btree_id,
struct bkey_i *k,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
int write = flags & BCH_VALIDATE_write;
void *next = vstruct_next(entry);
int ret = 0;
if (journal_entry_err_on(!k->k.u64s,
c, version, jset, entry,
journal_entry_bkey_u64s_0,
"k->u64s 0")) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
return FSCK_DELETED_KEY;
}
if (journal_entry_err_on((void *) bkey_next(k) >
(void *) vstruct_next(entry),
c, version, jset, entry,
journal_entry_bkey_past_end,
"extends past end of journal entry")) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
return FSCK_DELETED_KEY;
}
if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
c, version, jset, entry,
journal_entry_bkey_bad_format,
"bad format %u", k->k.format)) {
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
journal_entry_null_range(vstruct_next(entry), next);
return FSCK_DELETED_KEY;
}
if (!write)
bch2_bkey_compat(level, btree_id, version, big_endian,
write, NULL, bkey_to_packed(k));
ret = bch2_bkey_validate(c, bkey_i_to_s_c(k),
__btree_node_type(level, btree_id), write);
if (ret == -BCH_ERR_fsck_delete_bkey) {
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
journal_entry_null_range(vstruct_next(entry), next);
return FSCK_DELETED_KEY;
}
if (ret)
goto fsck_err;
if (write)
bch2_bkey_compat(level, btree_id, version, big_endian,
write, NULL, bkey_to_packed(k));
fsck_err:
return ret;
}
static int journal_entry_btree_keys_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct bkey_i *k = entry->start;
while (k != vstruct_last(entry)) {
int ret = journal_validate_key(c, jset, entry,
entry->level,
entry->btree_id,
k, version, big_endian,
flags|BCH_VALIDATE_journal);
if (ret == FSCK_DELETED_KEY)
continue;
else if (ret)
return ret;
k = bkey_next(k);
}
return 0;
}
static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
bool first = true;
jset_entry_for_each_key(entry, k) {
if (!first) {
prt_newline(out);
bch2_prt_jset_entry_type(out, entry->type);
prt_str(out, ": ");
}
prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
first = false;
}
}
static int journal_entry_btree_root_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct bkey_i *k = entry->start;
int ret = 0;
if (journal_entry_err_on(!entry->u64s ||
le16_to_cpu(entry->u64s) != k->k.u64s,
c, version, jset, entry,
journal_entry_btree_root_bad_size,
"invalid btree root journal entry: wrong number of keys")) {
void *next = vstruct_next(entry);
/*
* we don't want to null out this jset_entry,
* just the contents, so that later we can tell
* we were _supposed_ to have a btree root
*/
entry->u64s = 0;
journal_entry_null_range(vstruct_next(entry), next);
return 0;
}
ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
version, big_endian, flags);
if (ret == FSCK_DELETED_KEY)
ret = 0;
fsck_err:
return ret;
}
static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
journal_entry_btree_keys_to_text(out, c, entry);
}
static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
/* obsolete, don't care: */
return 0;
}
static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
}
static int journal_entry_blacklist_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
int ret = 0;
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
c, version, jset, entry,
journal_entry_blacklist_bad_size,
"invalid journal seq blacklist entry: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
}
fsck_err:
return ret;
}
static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_blacklist *bl =
container_of(entry, struct jset_entry_blacklist, entry);
prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
}
static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct jset_entry_blacklist_v2 *bl_entry;
int ret = 0;
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
c, version, jset, entry,
journal_entry_blacklist_v2_bad_size,
"invalid journal seq blacklist entry: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
goto out;
}
bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
le64_to_cpu(bl_entry->end),
c, version, jset, entry,
journal_entry_blacklist_v2_start_past_end,
"invalid journal seq blacklist entry: start > end")) {
journal_entry_null_range(entry, vstruct_next(entry));
}
out:
fsck_err:
return ret;
}
static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_blacklist_v2 *bl =
container_of(entry, struct jset_entry_blacklist_v2, entry);
prt_printf(out, "start=%llu end=%llu",
le64_to_cpu(bl->start),
le64_to_cpu(bl->end));
}
static int journal_entry_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
int ret = 0;
if (journal_entry_err_on(bytes < sizeof(*u),
c, version, jset, entry,
journal_entry_usage_bad_size,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
fsck_err:
return ret;
}
static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
prt_str(out, "type=");
bch2_prt_fs_usage_type(out, u->entry.btree_id);
prt_printf(out, " v=%llu", le64_to_cpu(u->v));
}
static int journal_entry_data_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
struct printbuf err = PRINTBUF;
int ret = 0;
if (journal_entry_err_on(bytes < sizeof(*u) ||
bytes < sizeof(*u) + u->r.nr_devs,
c, version, jset, entry,
journal_entry_data_usage_bad_size,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
goto out;
}
if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err),
c, version, jset, entry,
journal_entry_data_usage_bad_size,
"invalid journal entry usage: %s", err.buf)) {
journal_entry_null_range(entry, vstruct_next(entry));
goto out;
}
out:
fsck_err:
printbuf_exit(&err);
return ret;
}
static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
bch2_replicas_entry_to_text(out, &u->r);
prt_printf(out, "=%llu", le64_to_cpu(u->v));
}
static int journal_entry_clock_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct jset_entry_clock *clock =
container_of(entry, struct jset_entry_clock, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
int ret = 0;
if (journal_entry_err_on(bytes != sizeof(*clock),
c, version, jset, entry,
journal_entry_clock_bad_size,
"bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
if (journal_entry_err_on(clock->rw > 1,
c, version, jset, entry,
journal_entry_clock_bad_rw,
"bad rw")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
fsck_err:
return ret;
}
static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_clock *clock =
container_of(entry, struct jset_entry_clock, entry);
prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
}
static int journal_entry_dev_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
unsigned expected = sizeof(*u);
int ret = 0;
if (journal_entry_err_on(bytes < expected,
c, version, jset, entry,
journal_entry_dev_usage_bad_size,
"bad size (%u < %u)",
bytes, expected)) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
if (journal_entry_err_on(u->pad,
c, version, jset, entry,
journal_entry_dev_usage_bad_pad,
"bad pad")) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
fsck_err:
return ret;
}
static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
if (vstruct_bytes(entry) < sizeof(*u))
return;
prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
printbuf_indent_add(out, 2);
for (i = 0; i < nr_types; i++) {
prt_newline(out);
bch2_prt_data_type(out, i);
prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
le64_to_cpu(u->d[i].buckets),
le64_to_cpu(u->d[i].sectors),
le64_to_cpu(u->d[i].fragmented));
}
printbuf_indent_sub(out, 2);
}
static int journal_entry_log_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
return 0;
}
static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
prt_printf(out, "%.*s", bytes, l->d);
}
static int journal_entry_overwrite_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
return journal_entry_btree_keys_validate(c, jset, entry,
version, big_endian, READ);
}
static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
journal_entry_btree_keys_to_text(out, c, entry);
}
static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
return journal_entry_btree_keys_validate(c, jset, entry,
version, big_endian, READ);
}
static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
journal_entry_btree_keys_to_text(out, c, entry);
}
static int journal_entry_datetime_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
unsigned bytes = vstruct_bytes(entry);
unsigned expected = 16;
int ret = 0;
if (journal_entry_err_on(vstruct_bytes(entry) < expected,
c, version, jset, entry,
journal_entry_dev_usage_bad_size,
"bad size (%u < %u)",
bytes, expected)) {
journal_entry_null_range(entry, vstruct_next(entry));
return ret;
}
fsck_err:
return ret;
}
static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
struct jset_entry_datetime *datetime =
container_of(entry, struct jset_entry_datetime, entry);
bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
}
struct jset_entry_ops {
int (*validate)(struct bch_fs *, struct jset *,
struct jset_entry *, unsigned, int,
enum bch_validate_flags);
void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
};
static const struct jset_entry_ops bch2_jset_entry_ops[] = {
#define x(f, nr) \
[BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
.validate = journal_entry_##f##_validate, \
.to_text = journal_entry_##f##_to_text, \
},
BCH_JSET_ENTRY_TYPES()
#undef x
};
int bch2_journal_entry_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
enum bch_validate_flags flags)
{
return entry->type < BCH_JSET_ENTRY_NR
? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
version, big_endian, flags)
: 0;
}
void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
bch2_prt_jset_entry_type(out, entry->type);
if (entry->type < BCH_JSET_ENTRY_NR) {
prt_str(out, ": ");
bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
}
}
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
enum bch_validate_flags flags)
{
unsigned version = le32_to_cpu(jset->version);
int ret = 0;
vstruct_for_each(jset, entry) {
if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
c, version, jset, entry,
journal_entry_past_jset_end,
"journal entry extends past end of jset")) {
jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
break;
}
ret = bch2_journal_entry_validate(c, jset, entry,
version, JSET_BIG_ENDIAN(jset), flags);
if (ret)
break;
}
fsck_err:
return ret;
}
static int jset_validate(struct bch_fs *c,
struct bch_dev *ca,
struct jset *jset, u64 sector,
enum bch_validate_flags flags)
{
unsigned version;
int ret = 0;
if (le64_to_cpu(jset->magic) != jset_magic(c))
return JOURNAL_ENTRY_NONE;
version = le32_to_cpu(jset->version);
if (journal_entry_err_on(!bch2_version_compatible(version),
c, version, jset, NULL,
jset_unsupported_version,
"%s sector %llu seq %llu: incompatible journal entry version %u.%u",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
BCH_VERSION_MAJOR(version),
BCH_VERSION_MINOR(version))) {
/* don't try to continue: */
return -EINVAL;
}
if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
c, version, jset, NULL,
jset_unknown_csum,
"%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
JSET_CSUM_TYPE(jset)))
ret = JOURNAL_ENTRY_BAD;
/* last_seq is ignored when JSET_NO_FLUSH is true */
if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
c, version, jset, NULL,
jset_last_seq_newer_than_seq,
"invalid journal entry: last_seq > seq (%llu > %llu)",
le64_to_cpu(jset->last_seq),
le64_to_cpu(jset->seq))) {
jset->last_seq = jset->seq;
return JOURNAL_ENTRY_BAD;
}
ret = jset_validate_entries(c, jset, flags);
fsck_err:
return ret;
}
static int jset_validate_early(struct bch_fs *c,
struct bch_dev *ca,
struct jset *jset, u64 sector,
unsigned bucket_sectors_left,
unsigned sectors_read)
{
size_t bytes = vstruct_bytes(jset);
unsigned version;
enum bch_validate_flags flags = BCH_VALIDATE_journal;
int ret = 0;
if (le64_to_cpu(jset->magic) != jset_magic(c))
return JOURNAL_ENTRY_NONE;
version = le32_to_cpu(jset->version);
if (journal_entry_err_on(!bch2_version_compatible(version),
c, version, jset, NULL,
jset_unsupported_version,
"%s sector %llu seq %llu: unknown journal entry version %u.%u",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq),
BCH_VERSION_MAJOR(version),
BCH_VERSION_MINOR(version))) {
/* don't try to continue: */
return -EINVAL;
}
if (bytes > (sectors_read << 9) &&
sectors_read < bucket_sectors_left)
return JOURNAL_ENTRY_REREAD;
if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
c, version, jset, NULL,
jset_past_bucket_end,
"%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ca ? ca->name : c->name,
sector, le64_to_cpu(jset->seq), bytes))
le32_add_cpu(&jset->u64s,
-((bytes - (bucket_sectors_left << 9)) / 8));
fsck_err:
return ret;
}
struct journal_read_buf {
void *data;
size_t size;
};
static int journal_read_buf_realloc(struct journal_read_buf *b,
size_t new_size)
{
void *n;
/* the bios are sized for this many pages, max: */
if (new_size > JOURNAL_ENTRY_SIZE_MAX)
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
new_size = roundup_pow_of_two(new_size);
n = kvmalloc(new_size, GFP_KERNEL);
if (!n)
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
kvfree(b->data);
b->data = n;
b->size = new_size;
return 0;
}
static int journal_read_bucket(struct bch_dev *ca,
struct journal_read_buf *buf,
struct journal_list *jlist,
unsigned bucket)
{
struct bch_fs *c = ca->fs;
struct journal_device *ja = &ca->journal;
struct jset *j = NULL;
unsigned sectors, sectors_read = 0;
u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
end = offset + ca->mi.bucket_size;
bool saw_bad = false, csum_good;
struct printbuf err = PRINTBUF;
int ret = 0;
pr_debug("reading %u", bucket);
while (offset < end) {
if (!sectors_read) {
struct bio *bio;
unsigned nr_bvecs;
reread:
sectors_read = min_t(unsigned,
end - offset, buf->size >> 9);
nr_bvecs = buf_pages(buf->data, sectors_read << 9);
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
if (!bio)
return -BCH_ERR_ENOMEM_journal_read_bucket;
bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
bio->bi_iter.bi_sector = offset;
bch2_bio_map(bio, buf->data, sectors_read << 9);
ret = submit_bio_wait(bio);
kfree(bio);
if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
"journal read error: sector %llu",
offset) ||
bch2_meta_read_fault("journal")) {
/*
* We don't error out of the recovery process
* here, since the relevant journal entry may be
* found on a different device, and missing or
* no journal entries will be handled later
*/
goto out;
}
j = buf->data;
}
ret = jset_validate_early(c, ca, j, offset,
end - offset, sectors_read);
switch (ret) {
case 0:
sectors = vstruct_sectors(j, c->block_bits);
break;
case JOURNAL_ENTRY_REREAD:
if (vstruct_bytes(j) > buf->size) {
ret = journal_read_buf_realloc(buf,
vstruct_bytes(j));
if (ret)
goto err;
}
goto reread;
case JOURNAL_ENTRY_NONE:
if (!saw_bad)
goto out;
/*
* On checksum error we don't really trust the size
* field of the journal entry we read, so try reading
* again at next block boundary:
*/
sectors = block_sectors(c);
goto next_block;
default:
goto err;
}
if (le64_to_cpu(j->seq) > ja->highest_seq_found) {
ja->highest_seq_found = le64_to_cpu(j->seq);
ja->cur_idx = bucket;
ja->sectors_free = ca->mi.bucket_size -
bucket_remainder(ca, offset) - sectors;
}
/*
* This happens sometimes if we don't have discards on -
* when we've partially overwritten a bucket with new
* journal entries. We don't need the rest of the
* bucket:
*/
if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
goto out;
ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
enum bch_csum_type csum_type = JSET_CSUM_TYPE(j);
struct bch_csum csum;
csum_good = jset_csum_good(c, j, &csum);
if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
"%s",
(printbuf_reset(&err),
prt_str(&err, "journal "),
bch2_csum_err_msg(&err, csum_type, j->csum, csum),
err.buf)))
saw_bad = true;
ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
j->encrypted_start,
vstruct_end(j) - (void *) j->encrypted_start);
bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret));
mutex_lock(&jlist->lock);
ret = journal_entry_add(c, ca, (struct journal_ptr) {
.csum_good = csum_good,
.dev = ca->dev_idx,
.bucket = bucket,
.bucket_offset = offset -
bucket_to_sector(ca, ja->buckets[bucket]),
.sector = offset,
}, jlist, j);
mutex_unlock(&jlist->lock);
switch (ret) {
case JOURNAL_ENTRY_ADD_OK:
break;
case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
break;
default:
goto err;
}
next_block:
pr_debug("next");
offset += sectors;
sectors_read -= sectors;
j = ((void *) j) + (sectors << 9);
}
out:
ret = 0;
err:
printbuf_exit(&err);
return ret;
}
static CLOSURE_CALLBACK(bch2_journal_read_device)
{
closure_type(ja, struct journal_device, read);
struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
struct bch_fs *c = ca->fs;
struct journal_list *jlist =
container_of(cl->parent, struct journal_list, cl);
struct journal_read_buf buf = { NULL, 0 };
unsigned i;
int ret = 0;
if (!ja->nr)
goto out;
ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
if (ret)
goto err;
pr_debug("%u journal buckets", ja->nr);
for (i = 0; i < ja->nr; i++) {
ret = journal_read_bucket(ca, &buf, jlist, i);
if (ret)
goto err;
}
/*
* Set dirty_idx to indicate the entire journal is full and needs to be
* reclaimed - journal reclaim will immediately reclaim whatever isn't
* pinned when it first runs:
*/
ja->discard_idx = ja->dirty_idx_ondisk =
ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
out:
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvfree(buf.data);
percpu_ref_put(&ca->io_ref);
closure_return(cl);
return;
err:
mutex_lock(&jlist->lock);
jlist->ret = ret;
mutex_unlock(&jlist->lock);
goto out;
}
int bch2_journal_read(struct bch_fs *c,
u64 *last_seq,
u64 *blacklist_seq,
u64 *start_seq)
{
struct journal_list jlist;
struct journal_replay *i, **_i, *prev = NULL;
struct genradix_iter radix_iter;
struct printbuf buf = PRINTBUF;
bool degraded = false, last_write_torn = false;
u64 seq;
int ret = 0;
closure_init_stack(&jlist.cl);
mutex_init(&jlist.lock);
jlist.last_seq = 0;
jlist.ret = 0;
for_each_member_device(c, ca) {
if (!c->opts.fsck &&
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue;
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro) &&
percpu_ref_tryget(&ca->io_ref))
closure_call(&ca->journal.read,
bch2_journal_read_device,
system_unbound_wq,
&jlist.cl);
else
degraded = true;
}
closure_sync(&jlist.cl);
if (jlist.ret)
return jlist.ret;
*last_seq = 0;
*start_seq = 0;
*blacklist_seq = 0;
/*
* Find most recent flush entry, and ignore newer non flush entries -
* those entries will be blacklisted:
*/
genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
enum bch_validate_flags flags = BCH_VALIDATE_journal;
i = *_i;
if (journal_replay_ignore(i))
continue;
if (!*start_seq)
*blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
if (JSET_NO_FLUSH(&i->j)) {
i->ignore_blacklisted = true;
continue;
}
if (!last_write_torn && !i->csum_good) {
last_write_torn = true;
i->ignore_blacklisted = true;
continue;
}
if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
c, le32_to_cpu(i->j.version), &i->j, NULL,
jset_last_seq_newer_than_seq,
"invalid journal entry: last_seq > seq (%llu > %llu)",
le64_to_cpu(i->j.last_seq),
le64_to_cpu(i->j.seq)))
i->j.last_seq = i->j.seq;
*last_seq = le64_to_cpu(i->j.last_seq);
*blacklist_seq = le64_to_cpu(i->j.seq) + 1;
break;
}
if (!*start_seq) {
bch_info(c, "journal read done, but no entries found");
return 0;
}
if (!*last_seq) {
fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
"journal read done, but no entries found after dropping non-flushes");
return 0;
}
bch_info(c, "journal read done, replaying entries %llu-%llu",
*last_seq, *blacklist_seq - 1);
if (*start_seq != *blacklist_seq)
bch_info(c, "dropped unflushed entries %llu-%llu",
*blacklist_seq, *start_seq - 1);
/* Drop blacklisted entries and entries older than last_seq: */
genradix_for_each(&c->journal_entries, radix_iter, _i) {
i = *_i;
if (journal_replay_ignore(i))
continue;
seq = le64_to_cpu(i->j.seq);
if (seq < *last_seq) {
journal_replay_free(c, i, false);
continue;
}
if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
jset_seq_blacklisted,
"found blacklisted journal entry %llu", seq);
i->ignore_blacklisted = true;
}
}
/* Check for missing entries: */
seq = *last_seq;
genradix_for_each(&c->journal_entries, radix_iter, _i) {
i = *_i;
if (journal_replay_ignore(i))
continue;
BUG_ON(seq > le64_to_cpu(i->j.seq));
while (seq < le64_to_cpu(i->j.seq)) {
u64 missing_start, missing_end;
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
while (seq < le64_to_cpu(i->j.seq) &&
bch2_journal_seq_is_blacklisted(c, seq, false))
seq++;
if (seq == le64_to_cpu(i->j.seq))
break;
missing_start = seq;
while (seq < le64_to_cpu(i->j.seq) &&
!bch2_journal_seq_is_blacklisted(c, seq, false))
seq++;
if (prev) {
bch2_journal_ptrs_to_text(&buf1, c, prev);
prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
} else
prt_printf(&buf1, "(none)");
bch2_journal_ptrs_to_text(&buf2, c, i);
missing_end = seq - 1;
fsck_err(c, journal_entries_missing,
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
" prev at %s\n"
" next at %s, continue?",
missing_start, missing_end,
*last_seq, *blacklist_seq - 1,
buf1.buf, buf2.buf);
printbuf_exit(&buf1);
printbuf_exit(&buf2);
}
prev = i;
seq++;
}
genradix_for_each(&c->journal_entries, radix_iter, _i) {
struct bch_replicas_padded replicas = {
.e.data_type = BCH_DATA_journal,
.e.nr_devs = 0,
.e.nr_required = 1,
};
i = *_i;
if (journal_replay_ignore(i))
continue;
darray_for_each(i->ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
if (!ptr->csum_good)
bch_err_dev_offset(ca, ptr->sector,
"invalid journal checksum, seq %llu%s",
le64_to_cpu(i->j.seq),
i->csum_good ? " (had good copy on another device)" : "");
}
ret = jset_validate(c,
bch2_dev_have_ref(c, i->ptrs.data[0].dev),
&i->j,
i->ptrs.data[0].sector,
READ);
if (ret)
goto err;
darray_for_each(i->ptrs, ptr)
replicas_entry_add_dev(&replicas.e, ptr->dev);
bch2_replicas_entry_sort(&replicas.e);
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, &replicas.e);
if (!degraded &&
!bch2_replicas_marked(c, &replicas.e) &&
(le64_to_cpu(i->j.seq) == *last_seq ||
fsck_err(c, journal_entry_replicas_not_marked,
"superblock not marked as containing replicas for journal entry %llu\n %s",
le64_to_cpu(i->j.seq), buf.buf))) {
ret = bch2_mark_replicas(c, &replicas.e);
if (ret)
goto err;
}
}
err:
fsck_err:
printbuf_exit(&buf);
return ret;
}
/* journal write: */
static void __journal_write_alloc(struct journal *j,
struct journal_buf *w,
struct dev_alloc_list *devs_sorted,
unsigned sectors,
unsigned *replicas,
unsigned replicas_want)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_device *ja;
struct bch_dev *ca;
unsigned i;
if (*replicas >= replicas_want)
return;
for (i = 0; i < devs_sorted->nr; i++) {
ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
if (!ca)
continue;
ja = &ca->journal;
/*
* Check that we can use this device, and aren't already using
* it:
*/
if (!ca->mi.durability ||
ca->mi.state != BCH_MEMBER_STATE_rw ||
!ja->nr ||
bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
sectors > ja->sectors_free)
continue;
bch2_dev_stripe_increment(ca, &j->wp.stripe);
bch2_bkey_append_ptr(&w->key,
(struct bch_extent_ptr) {
.offset = bucket_to_sector(ca,
ja->buckets[ja->cur_idx]) +
ca->mi.bucket_size -
ja->sectors_free,
.dev = ca->dev_idx,
});
ja->sectors_free -= sectors;
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
*replicas += ca->mi.durability;
if (*replicas >= replicas_want)
break;
}
}
/**
* journal_write_alloc - decide where to write next journal entry
*
* @j: journal object
* @w: journal buf (entry to be written)
*
* Returns: 0 on success, or -EROFS on failure
*/
static int journal_write_alloc(struct journal *j, struct journal_buf *w)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_devs_mask devs;
struct journal_device *ja;
struct bch_dev *ca;
struct dev_alloc_list devs_sorted;
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
unsigned target = c->opts.metadata_target ?:
c->opts.foreground_target;
unsigned i, replicas = 0, replicas_want =
READ_ONCE(c->opts.metadata_replicas);
unsigned replicas_need = min_t(unsigned, replicas_want,
READ_ONCE(c->opts.metadata_replicas_required));
rcu_read_lock();
retry:
devs = target_rw_devs(c, BCH_DATA_journal, target);
devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
if (replicas >= replicas_want)
goto done;
for (i = 0; i < devs_sorted.nr; i++) {
ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
if (!ca)
continue;
ja = &ca->journal;
if (sectors > ja->sectors_free &&
sectors <= ca->mi.bucket_size &&
bch2_journal_dev_buckets_available(j, ja,
journal_space_discarded)) {
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
ja->sectors_free = ca->mi.bucket_size;
/*
* ja->bucket_seq[ja->cur_idx] must always have
* something sensible:
*/
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
}
}
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
if (replicas < replicas_want && target) {
/* Retry from all devices: */
target = 0;
goto retry;
}
done:
rcu_read_unlock();
BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
return replicas >= replicas_need ? 0 : -EROFS;
}
static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
/* we aren't holding j->lock: */
unsigned new_size = READ_ONCE(j->buf_size_want);
void *new_buf;
if (buf->buf_size >= new_size)
return;
size_t btree_write_buffer_size = new_size / 64;
if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
return;
new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
if (!new_buf)
return;
memcpy(new_buf, buf->data, buf->buf_size);
spin_lock(&j->lock);
swap(buf->data, new_buf);
swap(buf->buf_size, new_size);
spin_unlock(&j->lock);
kvfree(new_buf);
}
static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
{
return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
}
static CLOSURE_CALLBACK(journal_write_done)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_replicas_padded replicas;
union journal_res_state old, new;
u64 seq = le64_to_cpu(w->data->seq);
int err = 0;
bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
? j->flush_write_time
: j->noflush_write_time, j->write_start_time);
if (!w->devs_written.nr) {
bch_err(c, "unable to write journal to sufficient devices");
err = -EIO;
} else {
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
w->devs_written);
if (bch2_mark_replicas(c, &replicas.e))
err = -EIO;
}
if (err)
bch2_fatal_error(c);
closure_debug_destroy(cl);
spin_lock(&j->lock);
if (seq >= j->pin.front)
journal_seq_pin(j, seq)->devs = w->devs_written;
if (err && (!j->err_seq || seq < j->err_seq))
j->err_seq = seq;
w->write_done = true;
bool completed = false;
for (seq = journal_last_unwritten_seq(j);
seq <= journal_cur_seq(j);
seq++) {
w = j->buf + (seq & JOURNAL_BUF_MASK);
if (!w->write_done)
break;
if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
j->flushed_seq_ondisk = seq;
j->last_seq_ondisk = w->last_seq;
bch2_do_discards(c);
closure_wake_up(&c->freelist_wait);
bch2_reset_alloc_cursors(c);
}
j->seq_ondisk = seq;
/*
* Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
* more buckets:
*
* Must come before signaling write completion, for
* bch2_fs_journal_stop():
*/
if (j->watermark != BCH_WATERMARK_stripe)
journal_reclaim_kick(&c->journal);
old.v = atomic64_read(&j->reservations.counter);
do {
new.v = old.v;
BUG_ON(journal_state_count(new, new.unwritten_idx));
BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
new.unwritten_idx++;
} while (!atomic64_try_cmpxchg(&j->reservations.counter,
&old.v, new.v));
closure_wake_up(&w->wait);
completed = true;
}
if (completed) {
bch2_journal_reclaim_fast(j);
bch2_journal_space_available(j);
track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false);
journal_wake(j);
}
if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
struct journal_buf *buf = journal_cur_buf(j);
long delta = buf->expires - jiffies;
/*
* We don't close a journal entry to write it while there's
* previous entries still in flight - the current journal entry
* might want to be written now:
*/
mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
}
/*
* We don't typically trigger journal writes from her - the next journal
* write will be triggered immediately after the previous one is
* allocated, in bch2_journal_write() - but the journal write error path
* is special:
*/
bch2_journal_do_writes(j);
spin_unlock(&j->lock);
}
static void journal_write_endio(struct bio *bio)
{
struct journal_bio *jbio = container_of(bio, struct journal_bio, bio);
struct bch_dev *ca = jbio->ca;
struct journal *j = &ca->fs->journal;
struct journal_buf *w = j->buf + jbio->buf_idx;
if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
"error writing journal entry %llu: %s",
le64_to_cpu(w->data->seq),
bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("journal")) {
unsigned long flags;
spin_lock_irqsave(&j->err_lock, flags);
bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
spin_unlock_irqrestore(&j->err_lock, flags);
}
closure_put(&w->io);
percpu_ref_put(&ca->io_ref);
}
static CLOSURE_CALLBACK(journal_write_submit)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
if (!ca) {
/* XXX: fix this */
bch_err(c, "missing device for journal write\n");
continue;
}
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
sectors);
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = ptr->offset;
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
ca->prev_journal_sector = bio->bi_iter.bi_sector;
if (!JSET_NO_FLUSH(w->data))
bio->bi_opf |= REQ_FUA;
if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
bio->bi_opf |= REQ_PREFLUSH;
bch2_bio_map(bio, w->data, sectors << 9);
trace_and_count(c, journal_write, bio);
closure_bio_submit(bio, cl);
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
}
continue_at(cl, journal_write_done, j->wq);
}
static CLOSURE_CALLBACK(journal_write_preflush)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
spin_lock(&j->lock);
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
closure_wait(&j->async_wait, cl);
spin_unlock(&j->lock);
continue_at(cl, journal_write_preflush, j->wq);
return;
}
spin_unlock(&j->lock);
}
if (w->separate_flush) {
for_each_rw_member(c, ca) {
percpu_ref_get(&ca->io_ref);
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
bio_reset(bio, ca->disk_sb.bdev,
REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
closure_bio_submit(bio, cl);
}
continue_at(cl, journal_write_submit, j->wq);
} else {
/*
* no need to punt to another work item if we're not waiting on
* preflushes
*/
journal_write_submit(&cl->work);
}
}
static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct jset_entry *start, *end;
struct jset *jset = w->data;
struct journal_keys_to_wb wb = { NULL };
unsigned sectors, bytes, u64s;
unsigned long btree_roots_have = 0;
bool validate_before_checksum = false;
u64 seq = le64_to_cpu(jset->seq);
int ret;
/*
* Simple compaction, dropping empty jset_entries (from journal
* reservations that weren't fully used) and merging jset_entries that
* can be.
*
* If we wanted to be really fancy here, we could sort all the keys in
* the jset and drop keys that were overwritten - probably not worth it:
*/
vstruct_for_each(jset, i) {
unsigned u64s = le16_to_cpu(i->u64s);
/* Empty entry: */
if (!u64s)
continue;
/*
* New btree roots are set by journalling them; when the journal
* entry gets written we have to propagate them to
* c->btree_roots
*
* But, every journal entry we write has to contain all the
* btree roots (at least for now); so after we copy btree roots
* to c->btree_roots we have to get any missing btree roots and
* add them to this journal entry:
*/
switch (i->type) {
case BCH_JSET_ENTRY_btree_root:
bch2_journal_entry_to_btree_root(c, i);
__set_bit(i->btree_id, &btree_roots_have);
break;
case BCH_JSET_ENTRY_write_buffer_keys:
EBUG_ON(!w->need_flush_to_write_buffer);
if (!wb.wb)
bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
jset_entry_for_each_key(i, k) {
ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
if (ret) {
bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s",
bch2_err_str(ret));
bch2_journal_keys_to_write_buffer_end(c, &wb);
return ret;
}
}
i->type = BCH_JSET_ENTRY_btree_keys;
break;
}
}
if (wb.wb) {
ret = bch2_journal_keys_to_write_buffer_end(c, &wb);
if (ret) {
bch2_fs_fatal_error(c, "error flushing journal keys to btree write buffer: %s",
bch2_err_str(ret));
return ret;
}
}
spin_lock(&c->journal.lock);
w->need_flush_to_write_buffer = false;
spin_unlock(&c->journal.lock);
start = end = vstruct_last(jset);
end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
struct jset_entry_datetime *d =
container_of(jset_entry_init(&end, sizeof(*d)), struct jset_entry_datetime, entry);
d->entry.type = BCH_JSET_ENTRY_datetime;
d->seconds = cpu_to_le64(ktime_get_real_seconds());
bch2_journal_super_entries_add_common(c, &end, seq);
u64s = (u64 *) end - (u64 *) start;
WARN_ON(u64s > j->entry_u64s_reserved);
le32_add_cpu(&jset->u64s, u64s);
sectors = vstruct_sectors(jset, c->block_bits);
bytes = vstruct_bytes(jset);
if (sectors > w->sectors) {
bch2_fs_fatal_error(c, ": journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
vstruct_bytes(jset), w->sectors << 9,
u64s, w->u64s_reserved, j->entry_u64s_reserved);
return -EINVAL;
}
jset->magic = cpu_to_le64(jset_magic(c));
jset->version = cpu_to_le32(c->sb.version);
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
j->last_empty_seq = seq;
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
validate_before_checksum = true;
if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
validate_before_checksum = true;
if (validate_before_checksum &&
(ret = jset_validate(c, NULL, jset, 0, WRITE)))
return ret;
ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
jset->encrypted_start,
vstruct_end(jset) - (void *) jset->encrypted_start);
if (bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret)))
return ret;
jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
journal_nonce(jset), jset);
if (!validate_before_checksum &&
(ret = jset_validate(c, NULL, jset, 0, WRITE)))
return ret;
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
return 0;
}
static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
int error = bch2_journal_error(j);
/*
* If the journal is in an error state - we did an emergency shutdown -
* we prefer to continue doing journal writes. We just mark them as
* noflush so they'll never be used, but they'll still be visible by the
* list_journal tool - this helps in debugging.
*
* There's a caveat: the first journal write after marking the
* superblock dirty must always be a flush write, because on startup
* from a clean shutdown we didn't necessarily read the journal and the
* new journal write might overwrite whatever was in the journal
* previously - we can't leave the journal without any flush writes in
* it.
*
* So if we're in an error state, and we're still starting up, we don't
* write anything at all.
*/
if (error && test_bit(JOURNAL_need_flush_write, &j->flags))
return -EIO;
if (error ||
w->noflush ||
(!w->must_flush &&
time_before(jiffies, j->last_flush_write +
msecs_to_jiffies(c->opts.journal_flush_delay)) &&
test_bit(JOURNAL_may_skip_flush, &j->flags))) {
w->noflush = true;
SET_JSET_NO_FLUSH(w->data, true);
w->data->last_seq = 0;
w->last_seq = 0;
j->nr_noflush_writes++;
} else {
w->must_flush = true;
j->last_flush_write = jiffies;
j->nr_flush_writes++;
clear_bit(JOURNAL_need_flush_write, &j->flags);
}
return 0;
}
CLOSURE_CALLBACK(bch2_journal_write)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_replicas_padded replicas;
unsigned nr_rw_members = 0;
int ret;
for_each_rw_member(c, ca)
nr_rw_members++;
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
BUG_ON(!w->write_started);
BUG_ON(w->write_allocated);
BUG_ON(w->write_done);
j->write_start_time = local_clock();
spin_lock(&j->lock);
if (nr_rw_members > 1)
w->separate_flush = true;
ret = bch2_journal_write_pick_flush(j, w);
spin_unlock(&j->lock);
if (ret)
goto err;
mutex_lock(&j->buf_lock);
journal_buf_realloc(j, w);
ret = bch2_journal_write_prep(j, w);
mutex_unlock(&j->buf_lock);
if (ret)
goto err;
j->entry_bytes_written += vstruct_bytes(w->data);
while (1) {
spin_lock(&j->lock);
ret = journal_write_alloc(j, w);
if (!ret || !j->can_discard)
break;
spin_unlock(&j->lock);
bch2_journal_do_discards(j);
}
if (ret) {
struct printbuf buf = PRINTBUF;
buf.atomic++;
prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu: %s"),
le64_to_cpu(w->data->seq),
bch2_err_str(ret));
__bch2_journal_debug_to_text(&buf, j);
spin_unlock(&j->lock);
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
goto err;
}
/*
* write is allocated, no longer need to account for it in
* bch2_journal_space_available():
*/
w->sectors = 0;
w->write_allocated = true;
/*
* journal entry has been compacted and allocated, recalculate space
* available:
*/
bch2_journal_space_available(j);
bch2_journal_do_writes(j);
spin_unlock(&j->lock);
w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
if (c->opts.nochanges)
goto no_io;
/*
* Mark journal replicas before we submit the write to guarantee
* recovery will find the journal entries after a crash.
*/
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
w->devs_written);
ret = bch2_mark_replicas(c, &replicas.e);
if (ret)
goto err;
if (!JSET_NO_FLUSH(w->data))
continue_at(cl, journal_write_preflush, j->wq);
else
continue_at(cl, journal_write_submit, j->wq);
return;
no_io:
continue_at(cl, journal_write_done, j->wq);
return;
err:
bch2_fatal_error(c);
continue_at(cl, journal_write_done, j->wq);
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* AM43xx Clock domains framework
*
* Copyright (C) 2013 Texas Instruments, Inc.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include "clockdomain.h"
#include "prcm44xx.h"
#include "prcm43xx.h"
static struct clockdomain l4_cefuse_43xx_clkdm = {
.name = "l4_cefuse_clkdm",
.pwrdm = { .name = "cefuse_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_CEFUSE_INST,
.clkdm_offs = AM43XX_CM_CEFUSE_CEFUSE_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain mpu_43xx_clkdm = {
.name = "mpu_clkdm",
.pwrdm = { .name = "mpu_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_MPU_INST,
.clkdm_offs = AM43XX_CM_MPU_MPU_CDOFFS,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l4ls_43xx_clkdm = {
.name = "l4ls_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_L4LS_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain tamper_43xx_clkdm = {
.name = "tamper_clkdm",
.pwrdm = { .name = "tamper_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_TAMPER_INST,
.clkdm_offs = AM43XX_CM_TAMPER_TAMPER_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l4_rtc_43xx_clkdm = {
.name = "l4_rtc_clkdm",
.pwrdm = { .name = "rtc_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_RTC_INST,
.clkdm_offs = AM43XX_CM_RTC_RTC_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain pruss_ocp_43xx_clkdm = {
.name = "pruss_ocp_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_ICSS_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain ocpwp_l3_43xx_clkdm = {
.name = "ocpwp_l3_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_OCPWP_L3_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3s_tsc_43xx_clkdm = {
.name = "l3s_tsc_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_WKUP_INST,
.clkdm_offs = AM43XX_CM_WKUP_L3S_TSC_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain lcdc_43xx_clkdm = {
.name = "lcdc_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_LCDC_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain dss_43xx_clkdm = {
.name = "dss_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_DSS_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_aon_43xx_clkdm = {
.name = "l3_aon_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_WKUP_INST,
.clkdm_offs = AM43XX_CM_WKUP_L3_AON_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain emif_43xx_clkdm = {
.name = "emif_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_EMIF_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l4_wkup_aon_43xx_clkdm = {
.name = "l4_wkup_aon_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_WKUP_INST,
.clkdm_offs = AM43XX_CM_WKUP_L4_WKUP_AON_CDOFFS,
};
static struct clockdomain l3_43xx_clkdm = {
.name = "l3_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_L3_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l4_wkup_43xx_clkdm = {
.name = "l4_wkup_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_WKUP_INST,
.clkdm_offs = AM43XX_CM_WKUP_WKUP_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain cpsw_125mhz_43xx_clkdm = {
.name = "cpsw_125mhz_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_CPSW_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain gfx_l3_43xx_clkdm = {
.name = "gfx_l3_clkdm",
.pwrdm = { .name = "gfx_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_GFX_INST,
.clkdm_offs = AM43XX_CM_GFX_GFX_L3_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3s_43xx_clkdm = {
.name = "l3s_clkdm",
.pwrdm = { .name = "per_pwrdm" },
.prcm_partition = AM43XX_CM_PARTITION,
.cm_inst = AM43XX_CM_PER_INST,
.clkdm_offs = AM43XX_CM_PER_L3S_CDOFFS,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain *clockdomains_am43xx[] __initdata = {
&l4_cefuse_43xx_clkdm,
&mpu_43xx_clkdm,
&l4ls_43xx_clkdm,
&tamper_43xx_clkdm,
&l4_rtc_43xx_clkdm,
&pruss_ocp_43xx_clkdm,
&ocpwp_l3_43xx_clkdm,
&l3s_tsc_43xx_clkdm,
&lcdc_43xx_clkdm,
&dss_43xx_clkdm,
&l3_aon_43xx_clkdm,
&emif_43xx_clkdm,
&l4_wkup_aon_43xx_clkdm,
&l3_43xx_clkdm,
&l4_wkup_43xx_clkdm,
&cpsw_125mhz_43xx_clkdm,
&gfx_l3_43xx_clkdm,
&l3s_43xx_clkdm,
NULL
};
void __init am43xx_clockdomains_init(void)
{
clkdm_register_platform_funcs(&am43xx_clkdm_operations);
clkdm_register_clkdms(clockdomains_am43xx);
clkdm_complete_init();
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
*
* Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
* Author: Srinivas Kandagatla <[email protected]>
* Contributors: Giuseppe Cavallaro <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include "stmmac_platform.h"
#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
#define DWMAC_25MHZ 25000000
#define DWMAC_2_5MHZ 2500000
#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
iface == PHY_INTERFACE_MODE_RGMII_ID || \
iface == PHY_INTERFACE_MODE_RGMII_RXID || \
iface == PHY_INTERFACE_MODE_RGMII_TXID)
#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
*
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes with link speeds.
* ________________________________________________
*| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
* ------------------------------------------------
*| MII | n/a | 25Mhz |
*| | | txclk |
* ------------------------------------------------
*| GMII | 125Mhz | 25Mhz |
*| | clk-125/txclk | txclk |
* ------------------------------------------------
*| RGMII | 125Mhz | 25Mhz |
*| | clk-125/txclk | clkgen |
*| | clkgen | |
* ------------------------------------------------
*| RMII | n/a | 25Mhz |
*| | |clkgen/phyclk-in |
* ------------------------------------------------
*
* Register Configuration
*-------------------------------
* src |BIT(8)| BIT(7)| BIT(6)|
*-------------------------------
* txclk | 0 | n/a | 1 |
*-------------------------------
* ck_125| 0 | n/a | 0 |
*-------------------------------
* phyclk| 1 | 0 | n/a |
*-------------------------------
* clkgen| 1 | 1 | n/a |
*-------------------------------
*/
#define STIH4XX_RETIME_SRC_MASK GENMASK(8, 6)
#define STIH4XX_ETH_SEL_TX_RETIME_CLK BIT(8)
#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
#define ENMII_MASK GENMASK(5, 5)
#define ENMII BIT(5)
#define EN_MASK GENMASK(1, 1)
#define EN BIT(1)
/*
* 3 bits [4:2]
* 000-GMII/MII
* 001-RGMII
* 010-SGMII
* 100-RMII
*/
#define MII_PHY_SEL_MASK GENMASK(4, 2)
#define ETH_PHY_SEL_RMII BIT(4)
#define ETH_PHY_SEL_SGMII BIT(3)
#define ETH_PHY_SEL_RGMII BIT(2)
#define ETH_PHY_SEL_GMII 0x0
#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
phy_interface_t interface; /* MII interface */
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
u32 ctrl_reg; /* GMAC glue-logic control register */
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
u32 speed;
void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
[PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
[PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
[PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
[PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
[PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
[PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
};
enum {
TX_RETIME_SRC_NA = 0,
TX_RETIME_SRC_TXCLK = 1,
TX_RETIME_SRC_CLK_125,
TX_RETIME_SRC_PHYCLK,
TX_RETIME_SRC_CLKGEN,
};
static u32 stih4xx_tx_retime_val[] = {
[TX_RETIME_SRC_TXCLK] = STIH4XX_ETH_SEL_TXCLK_NOT_CLK125,
[TX_RETIME_SRC_CLK_125] = 0x0,
[TX_RETIME_SRC_PHYCLK] = STIH4XX_ETH_SEL_TX_RETIME_CLK,
[TX_RETIME_SRC_CLKGEN] = STIH4XX_ETH_SEL_TX_RETIME_CLK
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
u32 freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
if (dwmac->ext_phyclk) {
src = TX_RETIME_SRC_PHYCLK;
} else {
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
if (spd == SPEED_1000) {
freq = DWMAC_125MHZ;
} else {
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
if (spd == SPEED_100)
freq = DWMAC_25MHZ;
else if (spd == SPEED_10)
freq = DWMAC_2_5MHZ;
}
}
if (src == TX_RETIME_SRC_CLKGEN && freq)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
stih4xx_tx_retime_val[src]);
}
static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
{
struct regmap *regmap = dwmac->regmap;
int iface = dwmac->interface;
u32 reg = dwmac->ctrl_reg;
u32 val;
if (dwmac->gmac_en)
regmap_update_bits(regmap, reg, EN_MASK, EN);
regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
dwmac->fix_retime_src(dwmac, dwmac->speed, 0);
return 0;
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
struct platform_device *pdev)
{
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap *regmap;
int err;
/* clk selection from extra syscfg register */
dwmac->clk_sel_reg = -ENXIO;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
if (res)
dwmac->clk_sel_reg = res->start;
regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
if (IS_ERR(regmap))
return PTR_ERR(regmap);
err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
if (err) {
dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
return err;
}
err = of_get_phy_mode(np, &dwmac->interface);
if (err && err != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
return err;
}
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
dwmac->tx_retime_src = TX_RETIME_SRC_NA;
dwmac->speed = SPEED_100;
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
const char *rs;
dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
err = of_property_read_string(np, "st,tx-retime-src", &rs);
if (err < 0) {
dev_warn(dev, "Use internal clock source\n");
} else {
if (!strcasecmp(rs, "clk_125"))
dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
else if (!strcasecmp(rs, "txclk"))
dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
}
dwmac->speed = SPEED_1000;
}
dwmac->clk = devm_clk_get(dev, "sti-ethclk");
if (IS_ERR(dwmac->clk)) {
dev_warn(dev, "No phy clock provided...\n");
dwmac->clk = NULL;
}
return 0;
}
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
const struct sti_dwmac_of_data *data;
struct stmmac_resources stmmac_res;
struct sti_dwmac *dwmac;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "No OF match data provided\n");
return -EINVAL;
}
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
}
dwmac->fix_retime_src = data->fix_retime_src;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = data->fix_retime_src;
ret = clk_prepare_enable(dwmac->clk);
if (ret)
return ret;
ret = sti_dwmac_set_mode(dwmac);
if (ret)
goto disable_clk;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
goto disable_clk;
return 0;
disable_clk:
clk_disable_unprepare(dwmac->clk);
return ret;
}
static void sti_dwmac_remove(struct platform_device *pdev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
clk_disable_unprepare(dwmac->clk);
}
#ifdef CONFIG_PM_SLEEP
static int sti_dwmac_suspend(struct device *dev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
int ret = stmmac_suspend(dev);
clk_disable_unprepare(dwmac->clk);
return ret;
}
static int sti_dwmac_resume(struct device *dev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
clk_prepare_enable(dwmac->clk);
sti_dwmac_set_mode(dwmac);
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
sti_dwmac_resume);
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
static const struct of_device_id sti_dwmac_match[] = {
{ .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
{ }
};
MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
.remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
.pm = &sti_dwmac_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
module_platform_driver(sti_dwmac_driver);
MODULE_AUTHOR("Srinivas Kandagatla <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2024 Thomas Bonnefille <[email protected]>
*/
/dts-v1/;
#include "sg2002.dtsi"
/ {
model = "LicheeRV Nano B";
compatible = "sipeed,licheerv-nano-b", "sipeed,licheerv-nano", "sophgo,sg2002";
aliases {
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
gpio3 = &gpio3;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
};
chosen {
stdout-path = "serial0:115200n8";
};
};
&osc {
clock-frequency = <25000000>;
};
&pinctrl {
uart0_cfg: uart0-cfg {
uart0-pins {
pinmux = <PINMUX(PIN_UART0_TX, 0)>,
<PINMUX(PIN_UART0_RX, 0)>;
bias-pull-up;
drive-strength-microamp = <10800>;
power-source = <3300>;
};
};
sdhci0_cfg: sdhci0-cfg {
sdhci0-clk-pins {
pinmux = <PINMUX(PIN_SD0_CLK, 0)>;
bias-pull-up;
drive-strength-microamp = <16100>;
power-source = <3300>;
};
sdhci0-cmd-pins {
pinmux = <PINMUX(PIN_SD0_CMD, 0)>;
bias-pull-up;
drive-strength-microamp = <10800>;
power-source = <3300>;
};
sdhci0-data-pins {
pinmux = <PINMUX(PIN_SD0_D0, 0)>,
<PINMUX(PIN_SD0_D1, 0)>,
<PINMUX(PIN_SD0_D2, 0)>,
<PINMUX(PIN_SD0_D3, 0)>;
bias-pull-up;
drive-strength-microamp = <10800>;
power-source = <3300>;
};
sdhci0-cd-pins {
pinmux = <PINMUX(PIN_SD0_CD, 0)>;
bias-pull-up;
drive-strength-microamp = <10800>;
power-source = <3300>;
};
};
};
&sdhci0 {
pinctrl-0 = <&sdhci0_cfg>;
pinctrl-names = "default";
status = "okay";
bus-width = <4>;
no-1-8-v;
no-mmc;
no-sdio;
disable-wp;
};
&uart0 {
pinctrl-0 = <&uart0_cfg>;
pinctrl-names = "default";
status = "okay";
};
|
// SPDX-License-Identifier: GPL-2.0
//
// mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
//
// Copyright (c) 2009 Mauro Carvalho Chehab <[email protected]>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/i2c/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL v2");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define R00_MT9V011_CHIP_VERSION 0x00
#define R01_MT9V011_ROWSTART 0x01
#define R02_MT9V011_COLSTART 0x02
#define R03_MT9V011_HEIGHT 0x03
#define R04_MT9V011_WIDTH 0x04
#define R05_MT9V011_HBLANK 0x05
#define R06_MT9V011_VBLANK 0x06
#define R07_MT9V011_OUT_CTRL 0x07
#define R09_MT9V011_SHUTTER_WIDTH 0x09
#define R0A_MT9V011_CLK_SPEED 0x0a
#define R0B_MT9V011_RESTART 0x0b
#define R0C_MT9V011_SHUTTER_DELAY 0x0c
#define R0D_MT9V011_RESET 0x0d
#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
#define R20_MT9V011_READ_MODE 0x20
#define R2B_MT9V011_GREEN_1_GAIN 0x2b
#define R2C_MT9V011_BLUE_GAIN 0x2c
#define R2D_MT9V011_RED_GAIN 0x2d
#define R2E_MT9V011_GREEN_2_GAIN 0x2e
#define R35_MT9V011_GLOBAL_GAIN 0x35
#define RF1_MT9V011_CHIP_ENABLE 0xf1
#define MT9V011_VERSION 0x8232
#define MT9V011_REV_B_VERSION 0x8243
struct mt9v011 {
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrls;
unsigned width, height;
unsigned xtal;
unsigned hflip:1;
unsigned vflip:1;
u16 global_gain, exposure;
s16 red_bal, blue_bal;
};
static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
{
return container_of(sd, struct mt9v011, sd);
}
static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
__be16 buffer;
int rc, val;
rc = i2c_master_send(c, &addr, 1);
if (rc != 1)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 1)\n", rc);
msleep(10);
rc = i2c_master_recv(c, (char *)&buffer, 2);
if (rc != 2)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 2)\n", rc);
val = be16_to_cpu(buffer);
v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val);
return val;
}
static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr,
u16 value)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
unsigned char buffer[3];
int rc;
buffer[0] = addr;
buffer[1] = value >> 8;
buffer[2] = value & 0xff;
v4l2_dbg(2, debug, sd,
"mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value);
rc = i2c_master_send(c, buffer, 3);
if (rc != 3)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 3)\n", rc);
}
struct i2c_reg_value {
unsigned char reg;
u16 value;
};
/*
* Values used at the original driver
* Some values are marked as Reserved at the datasheet
*/
static const struct i2c_reg_value mt9v011_init_default[] = {
{ R0D_MT9V011_RESET, 0x0001 },
{ R0D_MT9V011_RESET, 0x0000 },
{ R0C_MT9V011_SHUTTER_DELAY, 0x0000 },
{ R09_MT9V011_SHUTTER_WIDTH, 0x1fc },
{ R0A_MT9V011_CLK_SPEED, 0x0000 },
{ R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
{ R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
};
static u16 calc_mt9v011_gain(s16 lineargain)
{
u16 digitalgain = 0;
u16 analogmult = 0;
u16 analoginit = 0;
if (lineargain < 0)
lineargain = 0;
/* recommended minimum */
lineargain += 0x0020;
if (lineargain > 2047)
lineargain = 2047;
if (lineargain > 1023) {
digitalgain = 3;
analogmult = 3;
analoginit = lineargain / 16;
} else if (lineargain > 511) {
digitalgain = 1;
analogmult = 3;
analoginit = lineargain / 8;
} else if (lineargain > 255) {
analogmult = 3;
analoginit = lineargain / 4;
} else if (lineargain > 127) {
analogmult = 1;
analoginit = lineargain / 2;
} else
analoginit = lineargain;
return analoginit + (analogmult << 7) + (digitalgain << 9);
}
static void set_balance(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
u16 green_gain, blue_gain, red_gain;
u16 exposure;
s16 bal;
exposure = core->exposure;
green_gain = calc_mt9v011_gain(core->global_gain);
bal = core->global_gain;
bal += (core->blue_bal * core->global_gain / (1 << 7));
blue_gain = calc_mt9v011_gain(bal);
bal = core->global_gain;
bal += (core->red_bal * core->global_gain / (1 << 7));
red_gain = calc_mt9v011_gain(bal);
mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green_gain);
mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green_gain);
mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
mt9v011_write(sd, R09_MT9V011_SHUTTER_WIDTH, exposure);
}
static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned height, width, hblank, vblank, speed;
unsigned row_time, t_time;
u64 frames_per_ms;
unsigned tmp;
height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
width = mt9v011_read(sd, R04_MT9V011_WIDTH);
hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED);
row_time = (width + 113 + hblank) * (speed + 2);
t_time = row_time * (height + vblank + 1);
frames_per_ms = core->xtal * 1000l;
do_div(frames_per_ms, t_time);
tmp = frames_per_ms;
v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n",
tmp / 1000, tmp % 1000, t_time);
if (numerator && denominator) {
*numerator = 1000;
*denominator = (u32)frames_per_ms;
}
}
static u16 calc_speed(struct v4l2_subdev *sd, u32 numerator, u32 denominator)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned height, width, hblank, vblank;
unsigned row_time, line_time;
u64 t_time, speed;
/* Avoid bogus calculus */
if (!numerator || !denominator)
return 0;
height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
width = mt9v011_read(sd, R04_MT9V011_WIDTH);
hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
row_time = width + 113 + hblank;
line_time = height + vblank + 1;
t_time = core->xtal * ((u64)numerator);
/* round to the closest value */
t_time += denominator / 2;
do_div(t_time, denominator);
speed = t_time;
do_div(speed, row_time * line_time);
/* Avoid having a negative value for speed */
if (speed < 2)
speed = 0;
else
speed -= 2;
/* Avoid speed overflow */
if (speed > 15)
return 15;
return (u16)speed;
}
static void set_res(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned vstart, hstart;
/*
* The mt9v011 doesn't have scaling. So, in order to select the desired
* resolution, we're cropping at the middle of the sensor.
* hblank and vblank should be adjusted, in order to warrant that
* we'll preserve the line timings for 30 fps, no matter what resolution
* is selected.
* NOTE: datasheet says that width (and height) should be filled with
* width-1. However, this doesn't work, since one pixel per line will
* be missing.
*/
hstart = 20 + (640 - core->width) / 2;
mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
vstart = 8 + (480 - core->height) / 2;
mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart);
mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
calc_fps(sd, NULL, NULL);
};
static void set_read_mode(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned mode = 0x1000;
if (core->hflip)
mode |= 0x4000;
if (core->vflip)
mode |= 0x8000;
mt9v011_write(sd, R20_MT9V011_READ_MODE, mode);
}
static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
{
int i;
for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++)
mt9v011_write(sd, mt9v011_init_default[i].reg,
mt9v011_init_default[i].value);
set_balance(sd);
set_res(sd);
set_read_mode(sd);
return 0;
}
static int mt9v011_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad || code->index > 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_SGRBG8_1X8;
return 0;
}
static int mt9v011_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
struct mt9v011 *core = to_mt9v011(sd);
if (format->pad || fmt->code != MEDIA_BUS_FMT_SGRBG8_1X8)
return -EINVAL;
v4l_bound_align_image(&fmt->width, 48, 639, 1,
&fmt->height, 32, 480, 1, 0);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
core->width = fmt->width;
core->height = fmt->height;
set_res(sd);
} else {
*v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
}
return 0;
}
static int mt9v011_get_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *ival)
{
/*
* FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
* subdev active state API.
*/
if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
calc_fps(sd,
&ival->interval.numerator,
&ival->interval.denominator);
return 0;
}
static int mt9v011_set_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *ival)
{
struct v4l2_fract *tpf = &ival->interval;
u16 speed;
/*
* FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
* subdev active state API.
*/
if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
speed = calc_speed(sd, tpf->numerator, tpf->denominator);
mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed);
v4l2_dbg(1, debug, sd, "Setting speed to %d\n", speed);
/* Recalculate and update fps info */
calc_fps(sd, &tpf->numerator, &tpf->denominator);
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9v011_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
reg->val = mt9v011_read(sd, reg->reg & 0xff);
reg->size = 2;
return 0;
}
static int mt9v011_s_register(struct v4l2_subdev *sd,
const struct v4l2_dbg_register *reg)
{
mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff);
return 0;
}
#endif
static int mt9v011_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct mt9v011 *core =
container_of(ctrl->handler, struct mt9v011, ctrls);
struct v4l2_subdev *sd = &core->sd;
switch (ctrl->id) {
case V4L2_CID_GAIN:
core->global_gain = ctrl->val;
break;
case V4L2_CID_EXPOSURE:
core->exposure = ctrl->val;
break;
case V4L2_CID_RED_BALANCE:
core->red_bal = ctrl->val;
break;
case V4L2_CID_BLUE_BALANCE:
core->blue_bal = ctrl->val;
break;
case V4L2_CID_HFLIP:
core->hflip = ctrl->val;
set_read_mode(sd);
return 0;
case V4L2_CID_VFLIP:
core->vflip = ctrl->val;
set_read_mode(sd);
return 0;
default:
return -EINVAL;
}
set_balance(sd);
return 0;
}
static const struct v4l2_ctrl_ops mt9v011_ctrl_ops = {
.s_ctrl = mt9v011_s_ctrl,
};
static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.reset = mt9v011_reset,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v011_g_register,
.s_register = mt9v011_s_register,
#endif
};
static const struct v4l2_subdev_pad_ops mt9v011_pad_ops = {
.enum_mbus_code = mt9v011_enum_mbus_code,
.set_fmt = mt9v011_set_fmt,
.get_frame_interval = mt9v011_get_frame_interval,
.set_frame_interval = mt9v011_set_frame_interval,
};
static const struct v4l2_subdev_ops mt9v011_ops = {
.core = &mt9v011_core_ops,
.pad = &mt9v011_pad_ops,
};
/****************************************************************************
I2C Client & Driver
****************************************************************************/
static int mt9v011_probe(struct i2c_client *c)
{
u16 version;
struct mt9v011 *core;
struct v4l2_subdev *sd;
int ret;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EIO;
core = devm_kzalloc(&c->dev, sizeof(struct mt9v011), GFP_KERNEL);
if (!core)
return -ENOMEM;
sd = &core->sd;
v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
core->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
if (ret < 0)
return ret;
/* Check if the sensor is really a MT9V011 */
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
if ((version != MT9V011_VERSION) &&
(version != MT9V011_REV_B_VERSION)) {
v4l2_info(sd, "*** unknown micron chip detected (0x%04x).\n",
version);
return -EINVAL;
}
v4l2_ctrl_handler_init(&core->ctrls, 5);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_GAIN, 0, (1 << 12) - 1 - 0x20, 1, 0x20);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 2047, 1, 0x01fc);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_RED_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_BLUE_BALANCE, -(1 << 9), (1 << 9) - 1, 1, 0);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&core->ctrls, &mt9v011_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
if (core->ctrls.error) {
int ret = core->ctrls.error;
v4l2_err(sd, "control initialization error %d\n", ret);
v4l2_ctrl_handler_free(&core->ctrls);
return ret;
}
core->sd.ctrl_handler = &core->ctrls;
core->global_gain = 0x0024;
core->exposure = 0x01fc;
core->width = 640;
core->height = 480;
core->xtal = 27000000; /* Hz */
if (c->dev.platform_data) {
struct mt9v011_platform_data *pdata = c->dev.platform_data;
core->xtal = pdata->xtal;
v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
core->xtal / 1000000, (core->xtal / 1000) % 1000);
}
v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
c->addr << 1, c->adapter->name, version);
return 0;
}
static void mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct mt9v011 *core = to_mt9v011(sd);
v4l2_dbg(1, debug, sd,
"mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
c->addr << 1);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&core->ctrls);
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id mt9v011_id[] = {
{ "mt9v011" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
static struct i2c_driver mt9v011_driver = {
.driver = {
.name = "mt9v011",
},
.probe = mt9v011_probe,
.remove = mt9v011_remove,
.id_table = mt9v011_id,
};
module_i2c_driver(mt9v011_driver);
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2023 Intel Corporation. */
#ifndef _I40E_IO_H_
#define _I40E_IO_H_
/* get readq/writeq support for 32 bit kernels, use the low-first version */
#include <linux/io-64-nonatomic-lo-hi.h>
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
#endif /* _I40E_IO_H_ */
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/mman.h>
#include <asm/tlb.h>
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
}
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
}
void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
/*
* Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
*/
if (end - start >= PUD_SIZE)
radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
else
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
}
void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long psize = huge_page_size(hstate_vma(vma));
/*
* POWER9 NMMU must flush the TLB after clearing the PTE before
* installing a PTE with more relaxed access permissions, see
* radix__ptep_set_access_flags.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
atomic_read(&mm->context.copros) > 0)
radix__flush_hugetlb_page(vma, addr);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Atheros AR933X SoC built-in UART driver
*
* Copyright (C) 2011 Gabor Juhos <[email protected]>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/div64.h>
#include <asm/mach-ath79/ar933x_uart.h>
#include "serial_mctrl_gpio.h"
#define DRIVER_NAME "ar933x-uart"
#define AR933X_UART_MAX_SCALE 0xff
#define AR933X_UART_MAX_STEP 0xffff
#define AR933X_UART_MIN_BAUD 300
#define AR933X_UART_MAX_BAUD 3000000
#define AR933X_DUMMY_STATUS_RD 0x01
static struct uart_driver ar933x_uart_driver;
struct ar933x_uart_port {
struct uart_port port;
unsigned int ier; /* shadow Interrupt Enable Register */
unsigned int min_baud;
unsigned int max_baud;
struct clk *clk;
struct mctrl_gpios *gpios;
struct gpio_desc *rts_gpiod;
};
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
return readl(up->port.membase + offset);
}
static inline void ar933x_uart_write(struct ar933x_uart_port *up,
int offset, unsigned int value)
{
writel(value, up->port.membase + offset);
}
static inline void ar933x_uart_rmw(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int mask,
unsigned int val)
{
unsigned int t;
t = ar933x_uart_read(up, offset);
t &= ~mask;
t |= val;
ar933x_uart_write(up, offset, t);
}
static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int val)
{
ar933x_uart_rmw(up, offset, 0, val);
}
static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up,
unsigned int offset,
unsigned int val)
{
ar933x_uart_rmw(up, offset, val, 0);
}
static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up)
{
up->ier |= AR933X_UART_INT_TX_EMPTY;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up)
{
up->ier &= ~AR933X_UART_INT_TX_EMPTY;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_start_rx_interrupt(struct ar933x_uart_port *up)
{
up->ier |= AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_stop_rx_interrupt(struct ar933x_uart_port *up)
{
up->ier &= ~AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
}
static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
{
unsigned int rdata;
rdata = ch & AR933X_UART_DATA_TX_RX_MASK;
rdata |= AR933X_UART_DATA_TX_CSR;
ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata);
}
static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
unsigned int rdata;
uart_port_lock_irqsave(&up->port, &flags);
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
uart_port_unlock_irqrestore(&up->port, flags);
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
}
static unsigned int ar933x_uart_get_mctrl(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
int ret = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
mctrl_gpio_get(up->gpios, &ret);
return ret;
}
static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
mctrl_gpio_set(up->gpios, mctrl);
}
static void ar933x_uart_start_tx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_start_tx_interrupt(up);
}
static void ar933x_uart_wait_tx_complete(struct ar933x_uart_port *up)
{
unsigned int status;
unsigned int timeout = 60000;
/* Wait up to 60ms for the character(s) to be sent. */
do {
status = ar933x_uart_read(up, AR933X_UART_CS_REG);
if (--timeout == 0)
break;
udelay(1);
} while (status & AR933X_UART_CS_TX_BUSY);
if (timeout == 0)
dev_err(up->port.dev, "waiting for TX timed out\n");
}
static void ar933x_uart_rx_flush(struct ar933x_uart_port *up)
{
unsigned int status;
/* clear RX_VALID interrupt */
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_RX_VALID);
/* remove characters from the RX FIFO */
do {
ar933x_uart_write(up, AR933X_UART_DATA_REG, AR933X_UART_DATA_RX_CSR);
status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
} while (status & AR933X_UART_DATA_RX_CSR);
}
static void ar933x_uart_stop_tx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_stop_tx_interrupt(up);
}
static void ar933x_uart_stop_rx(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_stop_rx_interrupt(up);
}
static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
uart_port_lock_irqsave(&up->port, &flags);
if (break_state == -1)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
else
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
uart_port_unlock_irqrestore(&up->port, flags);
}
/*
* baudrate = (clk / (scale + 1)) * (step * (1 / 2^17))
*/
static unsigned long ar933x_uart_get_baud(unsigned int clk,
unsigned int scale,
unsigned int step)
{
u64 t;
u32 div;
div = (2 << 16) * (scale + 1);
t = clk;
t *= step;
t += (div / 2);
do_div(t, div);
return t;
}
static void ar933x_uart_get_scale_step(unsigned int clk,
unsigned int baud,
unsigned int *scale,
unsigned int *step)
{
unsigned int tscale;
long min_diff;
*scale = 0;
*step = 0;
min_diff = baud;
for (tscale = 0; tscale < AR933X_UART_MAX_SCALE; tscale++) {
u64 tstep;
int diff;
tstep = baud * (tscale + 1);
tstep *= (2 << 16);
do_div(tstep, clk);
if (tstep > AR933X_UART_MAX_STEP)
break;
diff = abs(ar933x_uart_get_baud(clk, tscale, tstep) - baud);
if (diff < min_diff) {
min_diff = diff;
*scale = tscale;
*step = tstep;
}
}
}
static void ar933x_uart_set_termios(struct uart_port *port,
struct ktermios *new,
const struct ktermios *old)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned int cs;
unsigned long flags;
unsigned int baud, scale, step;
/* Only CS8 is supported */
new->c_cflag &= ~CSIZE;
new->c_cflag |= CS8;
/* Only one stop bit is supported */
new->c_cflag &= ~CSTOPB;
cs = 0;
if (new->c_cflag & PARENB) {
if (!(new->c_cflag & PARODD))
cs |= AR933X_UART_CS_PARITY_EVEN;
else
cs |= AR933X_UART_CS_PARITY_ODD;
} else {
cs |= AR933X_UART_CS_PARITY_NONE;
}
/* Mark/space parity is not supported */
new->c_cflag &= ~CMSPAR;
baud = uart_get_baud_rate(port, new, old, up->min_baud, up->max_baud);
ar933x_uart_get_scale_step(port->uartclk, baud, &scale, &step);
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
uart_port_lock_irqsave(&up->port, &flags);
/* disable the UART */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S);
/* Update the per-port timeout. */
uart_update_timeout(port, new->c_cflag, baud);
up->port.ignore_status_mask = 0;
/* ignore all characters if CREAD is not set */
if ((new->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD;
ar933x_uart_write(up, AR933X_UART_CLOCK_REG,
scale << AR933X_UART_CLOCK_SCALE_S | step);
/* setup configuration register */
ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs);
/* enable host interrupt */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* reenable the UART */
ar933x_uart_rmw(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
uart_port_unlock_irqrestore(&up->port, flags);
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
}
static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
{
struct tty_port *port = &up->port.state->port;
int max_count = 256;
do {
unsigned int rdata;
unsigned char ch;
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if ((rdata & AR933X_UART_DATA_RX_CSR) == 0)
break;
/* remove the character from the FIFO */
ar933x_uart_write(up, AR933X_UART_DATA_REG,
AR933X_UART_DATA_RX_CSR);
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
if (uart_prepare_sysrq_char(&up->port, ch))
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (max_count-- > 0);
tty_flip_buffer_push(port);
}
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
{
struct tty_port *tport = &up->port.state->port;
struct serial_rs485 *rs485conf = &up->port.rs485;
int count;
bool half_duplex_send = false;
if (uart_tx_stopped(&up->port))
return;
if ((rs485conf->flags & SER_RS485_ENABLED) &&
(up->port.x_char || !kfifo_is_empty(&tport->xmit_fifo))) {
ar933x_uart_stop_rx_interrupt(up);
gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_ON_SEND));
half_duplex_send = true;
}
count = up->port.fifosize;
do {
unsigned int rdata;
unsigned char c;
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if ((rdata & AR933X_UART_DATA_TX_CSR) == 0)
break;
if (up->port.x_char) {
ar933x_uart_putc(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
continue;
}
if (!uart_fifo_get(&up->port, &c))
break;
ar933x_uart_putc(up, c);
} while (--count > 0);
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (!kfifo_is_empty(&tport->xmit_fifo)) {
ar933x_uart_start_tx_interrupt(up);
} else if (half_duplex_send) {
ar933x_uart_wait_tx_complete(up);
ar933x_uart_rx_flush(up);
ar933x_uart_start_rx_interrupt(up);
gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
}
}
static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
{
struct ar933x_uart_port *up = dev_id;
unsigned int status;
status = ar933x_uart_read(up, AR933X_UART_CS_REG);
if ((status & AR933X_UART_CS_HOST_INT) == 0)
return IRQ_NONE;
uart_port_lock(&up->port);
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
if (status & AR933X_UART_INT_RX_VALID) {
ar933x_uart_write(up, AR933X_UART_INT_REG,
AR933X_UART_INT_RX_VALID);
ar933x_uart_rx_chars(up);
}
if (status & AR933X_UART_INT_TX_EMPTY) {
ar933x_uart_write(up, AR933X_UART_INT_REG,
AR933X_UART_INT_TX_EMPTY);
ar933x_uart_stop_tx_interrupt(up);
ar933x_uart_tx_chars(up);
}
uart_unlock_and_check_sysrq(&up->port);
return IRQ_HANDLED;
}
static int ar933x_uart_startup(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
unsigned long flags;
int ret;
ret = request_irq(up->port.irq, ar933x_uart_interrupt,
up->port.irqflags, dev_name(up->port.dev), up);
if (ret)
return ret;
uart_port_lock_irqsave(&up->port, &flags);
/* Enable HOST interrupts */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* Enable RX interrupts */
ar933x_uart_start_rx_interrupt(up);
uart_port_unlock_irqrestore(&up->port, flags);
return 0;
}
static void ar933x_uart_shutdown(struct uart_port *port)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
/* Disable all interrupts */
up->ier = 0;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
/* Disable break condition */
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_BREAK);
free_irq(up->port.irq, up);
}
static const char *ar933x_uart_type(struct uart_port *port)
{
return (port->type == PORT_AR933X) ? "AR933X UART" : NULL;
}
static void ar933x_uart_release_port(struct uart_port *port)
{
/* Nothing to release ... */
}
static int ar933x_uart_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
static void ar933x_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_AR933X;
}
static int ar933x_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
if (ser->type != PORT_UNKNOWN &&
ser->type != PORT_AR933X)
return -EINVAL;
if (ser->irq < 0 || ser->irq >= NR_IRQS)
return -EINVAL;
if (ser->baud_base < up->min_baud ||
ser->baud_base > up->max_baud)
return -EINVAL;
return 0;
}
static const struct uart_ops ar933x_uart_ops = {
.tx_empty = ar933x_uart_tx_empty,
.set_mctrl = ar933x_uart_set_mctrl,
.get_mctrl = ar933x_uart_get_mctrl,
.stop_tx = ar933x_uart_stop_tx,
.start_tx = ar933x_uart_start_tx,
.stop_rx = ar933x_uart_stop_rx,
.break_ctl = ar933x_uart_break_ctl,
.startup = ar933x_uart_startup,
.shutdown = ar933x_uart_shutdown,
.set_termios = ar933x_uart_set_termios,
.type = ar933x_uart_type,
.release_port = ar933x_uart_release_port,
.request_port = ar933x_uart_request_port,
.config_port = ar933x_uart_config_port,
.verify_port = ar933x_uart_verify_port,
};
static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
if (port->rs485.flags & SER_RS485_ENABLED)
gpiod_set_value(up->rts_gpiod,
!!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
return 0;
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up)
{
unsigned int status;
unsigned int timeout = 60000;
/* Wait up to 60ms for the character(s) to be sent. */
do {
status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
if (--timeout == 0)
break;
udelay(1);
} while ((status & AR933X_UART_DATA_TX_CSR) == 0);
}
static void ar933x_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
ar933x_uart_wait_xmitr(up);
ar933x_uart_putc(up, ch);
}
static void ar933x_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct ar933x_uart_port *up = ar933x_console_ports[co->index];
unsigned long flags;
unsigned int int_en;
int locked = 1;
if (oops_in_progress)
locked = uart_port_trylock_irqsave(&up->port, &flags);
else
uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
*/
int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0);
uart_console_write(&up->port, s, count, ar933x_uart_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
ar933x_uart_wait_xmitr(up);
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en);
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
uart_port_unlock_irqrestore(&up->port, flags);
}
static int ar933x_uart_console_setup(struct console *co, char *options)
{
struct ar933x_uart_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
up = ar933x_console_ports[co->index];
if (!up)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console ar933x_uart_console = {
.name = "ttyATH",
.write = ar933x_uart_console_write,
.device = uart_console_device,
.setup = ar933x_uart_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &ar933x_uart_driver,
};
#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = "ttyATH",
.nr = CONFIG_SERIAL_AR933X_NR_UARTS,
.cons = NULL, /* filled in runtime */
};
static const struct serial_rs485 ar933x_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND,
};
static int ar933x_uart_probe(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
struct uart_port *port;
struct resource *mem_res;
struct device_node *np;
unsigned int baud;
int id;
int ret;
int irq;
np = pdev->dev.of_node;
if (IS_ENABLED(CONFIG_OF) && np) {
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
id);
return id;
}
} else {
id = pdev->id;
if (id == -1)
id = 0;
}
if (id >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port),
GFP_KERNEL);
if (!up)
return -ENOMEM;
up->clk = devm_clk_get(&pdev->dev, "uart");
if (IS_ERR(up->clk)) {
dev_err(&pdev->dev, "unable to get UART clock\n");
return PTR_ERR(up->clk);
}
port = &up->port;
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
ret = clk_prepare_enable(up->clk);
if (ret)
return ret;
port->uartclk = clk_get_rate(up->clk);
if (!port->uartclk) {
ret = -EINVAL;
goto err_disable_clk;
}
port->mapbase = mem_res->start;
port->line = id;
port->irq = irq;
port->dev = &pdev->dev;
port->type = PORT_AR933X;
port->iotype = UPIO_MEM32;
port->regshift = 2;
port->fifosize = AR933X_UART_FIFO_SIZE;
port->ops = &ar933x_uart_ops;
port->rs485_config = ar933x_config_rs485;
port->rs485_supported = ar933x_rs485_supported;
baud = ar933x_uart_get_baud(port->uartclk, AR933X_UART_MAX_SCALE, 1);
up->min_baud = max_t(unsigned int, baud, AR933X_UART_MIN_BAUD);
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
ret = uart_get_rs485_mode(port);
if (ret)
goto err_disable_clk;
up->gpios = mctrl_gpio_init(port, 0);
if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS) {
ret = PTR_ERR(up->gpios);
goto err_disable_clk;
}
up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
if (!up->rts_gpiod) {
port->rs485_supported.flags &= ~SER_RS485_ENABLED;
if (port->rs485.flags & SER_RS485_ENABLED) {
dev_err(&pdev->dev, "lacking rts-gpio, disabling RS485\n");
port->rs485.flags &= ~SER_RS485_ENABLED;
}
}
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
ar933x_console_ports[up->port.line] = up;
#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
goto err_disable_clk;
platform_set_drvdata(pdev, up);
return 0;
err_disable_clk:
clk_disable_unprepare(up->clk);
return ret;
}
static void ar933x_uart_remove(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
up = platform_get_drvdata(pdev);
if (up) {
uart_remove_one_port(&ar933x_uart_driver, &up->port);
clk_disable_unprepare(up->clk);
}
}
#ifdef CONFIG_OF
static const struct of_device_id ar933x_uart_of_ids[] = {
{ .compatible = "qca,ar9330-uart" },
{},
};
MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids);
#endif
static struct platform_driver ar933x_uart_platform_driver = {
.probe = ar933x_uart_probe,
.remove = ar933x_uart_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ar933x_uart_of_ids),
},
};
static int __init ar933x_uart_init(void)
{
int ret;
#ifdef CONFIG_SERIAL_AR933X_CONSOLE
ar933x_uart_driver.cons = &ar933x_uart_console;
#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
goto err_out;
ret = platform_driver_register(&ar933x_uart_platform_driver);
if (ret)
goto err_unregister_uart_driver;
return 0;
err_unregister_uart_driver:
uart_unregister_driver(&ar933x_uart_driver);
err_out:
return ret;
}
static void __exit ar933x_uart_exit(void)
{
platform_driver_unregister(&ar933x_uart_platform_driver);
uart_unregister_driver(&ar933x_uart_driver);
}
module_init(ar933x_uart_init);
module_exit(ar933x_uart_exit);
MODULE_DESCRIPTION("Atheros AR933X UART driver");
MODULE_AUTHOR("Gabor Juhos <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/llist.h>
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "uc/intel_guc_submission.h"
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
struct rb_node *p = i915->uabi_engines.rb_node;
while (p) {
struct intel_engine_cs *it =
rb_entry(p, typeof(*it), uabi_node);
if (class < it->uabi_class)
p = p->rb_left;
else if (class > it->uabi_class ||
instance > it->uabi_instance)
p = p->rb_right;
else if (instance < it->uabi_instance)
p = p->rb_left;
else
return it;
}
return NULL;
}
void intel_engine_add_user(struct intel_engine_cs *engine)
{
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
#define I915_NO_UABI_CLASS ((u16)(-1))
static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
const struct list_head *B)
{
const struct intel_engine_cs *a =
container_of(A, typeof(*a), uabi_list);
const struct intel_engine_cs *b =
container_of(B, typeof(*b), uabi_list);
if (uabi_classes[a->class] < uabi_classes[b->class])
return -1;
if (uabi_classes[a->class] > uabi_classes[b->class])
return 1;
if (a->instance < b->instance)
return -1;
if (a->instance > b->instance)
return 1;
return 0;
}
static struct llist_node *get_engines(struct drm_i915_private *i915)
{
return llist_del_all(&i915->uabi_engines_llist);
}
static void sort_engines(struct drm_i915_private *i915,
struct list_head *engines)
{
struct llist_node *pos, *next;
llist_for_each_safe(pos, next, get_engines(i915)) {
struct intel_engine_cs *engine =
container_of(pos, typeof(*engine), uabi_llist);
list_add(&engine->uabi_list, engines);
}
list_sort(NULL, engines, engine_cmp);
}
static void set_scheduler_caps(struct drm_i915_private *i915)
{
static const struct {
u8 engine;
u8 sched;
} map[] = {
#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
MAP(HAS_PREEMPTION, PREEMPTION),
MAP(HAS_SEMAPHORES, SEMAPHORES),
MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
#undef MAP
};
struct intel_engine_cs *engine;
u32 enabled, disabled;
enabled = 0;
disabled = 0;
for_each_uabi_engine(engine, i915) { /* all engines must agree! */
int i;
if (engine->sched_engine->schedule)
enabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
else
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
if (intel_uc_uses_guc_submission(&engine->gt->uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) {
if (engine->flags & BIT(map[i].engine))
enabled |= BIT(map[i].sched);
else
disabled |= BIT(map[i].sched);
}
}
i915->caps.scheduler = enabled & ~disabled;
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
i915->caps.scheduler = 0;
}
const char *intel_engine_class_repr(u8 class)
{
static const char * const uabi_names[] = {
[RENDER_CLASS] = "rcs",
[COPY_ENGINE_CLASS] = "bcs",
[VIDEO_DECODE_CLASS] = "vcs",
[VIDEO_ENHANCEMENT_CLASS] = "vecs",
[OTHER_CLASS] = "other",
[COMPUTE_CLASS] = "ccs",
};
if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
return "xxx";
return uabi_names[class];
}
struct legacy_ring {
struct intel_gt *gt;
u8 class;
u8 instance;
};
static int legacy_ring_idx(const struct legacy_ring *ring)
{
static const struct {
u8 base, max;
} map[] = {
[RENDER_CLASS] = { RCS0, 1 },
[COPY_ENGINE_CLASS] = { BCS0, 1 },
[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
[COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
};
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
return INVALID_ENGINE;
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
return INVALID_ENGINE;
return map[ring->class].base + ring->instance;
}
static void add_legacy_ring(struct legacy_ring *ring,
struct intel_engine_cs *engine)
{
if (engine->gt != ring->gt || engine->class != ring->class) {
ring->gt = engine->gt;
ring->class = engine->class;
ring->instance = 0;
}
engine->legacy_idx = legacy_ring_idx(ring);
if (engine->legacy_idx != INVALID_ENGINE)
ring->instance++;
}
static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
{
char old[sizeof(engine->name)];
memcpy(old, engine->name, sizeof(engine->name));
scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
}
void intel_engines_driver_register(struct drm_i915_private *i915)
{
u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
LIST_HEAD(engines);
sort_engines(i915, &engines);
prev = NULL;
p = &i915->uabi_engines.rb_node;
list_for_each_safe(it, next, &engines) {
struct intel_engine_cs *engine =
container_of(it, typeof(*engine), uabi_list);
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
if (engine->uabi_class == I915_NO_UABI_CLASS) {
name_instance = other_instance++;
} else {
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
name_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
}
engine->uabi_instance = name_instance;
/*
* Replace the internal name with the final user and log facing
* name.
*/
engine_rename(engine,
intel_engine_class_repr(engine->class),
name_instance);
if (engine->uabi_class == I915_NO_UABI_CLASS)
continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
GEM_BUG_ON(intel_engine_lookup_user(i915,
engine->uabi_class,
engine->uabi_instance) != engine);
/* Fix up the mapping to match default execbuf::user_map[] */
add_legacy_ring(&ring, engine);
prev = &engine->uabi_node;
p = &prev->rb_right;
}
if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
struct intel_engine_cs *engine;
unsigned int isolation;
int class, inst;
int errors = 0;
for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
engine = intel_engine_lookup_user(i915,
class, inst);
if (!engine) {
pr_err("UABI engine not found for { class:%d, instance:%d }\n",
class, inst);
errors++;
continue;
}
if (engine->uabi_class != class ||
engine->uabi_instance != inst) {
pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
engine->name,
engine->uabi_class,
engine->uabi_instance,
class, inst);
errors++;
continue;
}
}
}
/*
* Make sure that classes with multiple engine instances all
* share the same basic configuration.
*/
isolation = intel_engines_has_context_isolation(i915);
for_each_uabi_engine(engine, i915) {
unsigned int bit = BIT(engine->uabi_class);
unsigned int expected = engine->default_state ? bit : 0;
if ((isolation & bit) != expected) {
pr_err("mismatching default context state for class %d on engine %s\n",
engine->uabi_class, engine->name);
errors++;
}
}
if (drm_WARN(&i915->drm, errors,
"Invalid UABI engine mapping found"))
i915->uabi_engines = RB_ROOT;
}
set_scheduler_caps(i915);
}
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
unsigned int which;
which = 0;
for_each_uabi_engine(engine, i915)
if (engine->default_state)
which |= BIT(engine->uabi_class);
return which;
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* NVMe Over Fabrics Target Passthrough command implementation.
*
* Copyright (c) 2017-2018 Western Digital Corporation or its
* affiliates.
* Copyright (c) 2019-2020, Eideticom Inc.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include "../host/nvme.h"
#include "nvmet.h"
MODULE_IMPORT_NS("NVME_TARGET_PASSTHRU");
/*
* xarray to maintain one passthru subsystem per nvme controller.
*/
static DEFINE_XARRAY(passthru_subsystems);
void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
{
/*
* Multiple command set support can only be declared if the underlying
* controller actually supports it.
*/
if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
ctrl->cap &= ~(1ULL << 43);
}
static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
u16 status = NVME_SC_SUCCESS;
int pos, len;
bool csi_seen = false;
void *data;
u8 csi;
if (!ctrl->subsys->clear_ids)
return status;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!data)
return NVME_SC_INTERNAL;
status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
if (status)
goto out_free;
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos;
if (cur->nidl == 0)
break;
if (cur->nidt == NVME_NIDT_CSI) {
memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
csi_seen = true;
break;
}
len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
}
memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
if (csi_seen) {
struct nvme_ns_id_desc *cur = data;
cur->nidt = NVME_NIDT_CSI;
cur->nidl = NVME_NIDT_CSI_LEN;
memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
}
status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
out_free:
kfree(data);
return status;
}
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ctrl *id;
unsigned int max_hw_sectors;
int page_shift;
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id)
return NVME_SC_INTERNAL;
status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
if (status)
goto out_free;
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
/*
* The passthru NVMe driver may have a limit on the number of segments
* which depends on the host's memory fragementation. To solve this,
* ensure mdts is limited to the pages equal to the number of segments.
*/
max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,
pctrl->max_hw_sectors);
/*
* nvmet_passthru_map_sg is limitted to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well
*/
max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
max_hw_sectors);
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
id->acl = 3;
/*
* We export aerl limit for the fabrics controller, update this when
* passthru based aerl support is added.
*/
id->aerl = NVMET_ASYNC_EVENTS - 1;
/* emulate kas as most of the PCIe ctrl don't have a support for kas */
id->kas = cpu_to_le16(NVMET_KAS);
/* don't support host memory buffer */
id->hmpre = 0;
id->hmmin = 0;
id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
/* don't support fuse commands */
id->fuses = 0;
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
/*
* When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
* code path with duplicate ctr subsynqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
/* use fabric id-ctrl values */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
req->port->inline_data_size) / 16);
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
id->msdbd = ctrl->ops->msdbd;
/* Support multipath connections with fabrics */
id->cmic |= 1 << 1;
/* Disable reservations, see nvmet_parse_passthru_io_cmd() */
id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
out_free:
kfree(id);
return status;
}
static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
{
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ns *id;
int i;
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id)
return NVME_SC_INTERNAL;
status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
if (status)
goto out_free;
for (i = 0; i < (id->nlbaf + 1); i++)
if (id->lbaf[i].ms)
memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
id->flbas = id->flbas & ~(1 << 4);
/*
* Presently the NVMEof target code does not support sending
* metadata, so we must disable it here. This should be updated
* once target starts supporting metadata.
*/
id->mc = 0;
if (req->sq->ctrl->subsys->clear_ids) {
memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out_free:
kfree(id);
return status;
}
static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
u32 effects;
int status;
effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode);
status = nvme_execute_rq(rq, false);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
status = NVME_SC_INTERNAL;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
blk_mq_free_request(rq);
if (effects)
nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
}
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
blk_status_t blk_status)
{
struct nvmet_req *req = rq->end_io_data;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
blk_mq_free_request(rq);
return RQ_END_IO_NONE;
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
struct scatterlist *sg;
struct bio *bio;
int i;
if (req->sg_cnt > BIO_MAX_VECS)
return -EINVAL;
if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
bio_init(bio, NULL, req->inline_bvec,
ARRAY_SIZE(req->inline_bvec), req_op(rq));
} else {
bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
GFP_KERNEL);
bio->bi_end_io = bio_put;
}
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
sg->offset) < sg->length) {
nvmet_req_bio_put(req, bio);
return -EINVAL;
}
}
blk_rq_bio_prep(rq, bio, req->sg_cnt);
return 0;
}
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
unsigned int timeout;
u32 effects;
u16 status;
int ret;
if (likely(req->sq->qid != 0)) {
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
ns = nvme_find_get_ns(ctrl, nsid);
if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid);
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out;
}
q = ns->queue;
timeout = nvmet_req_subsys(req)->io_timeout;
} else {
timeout = nvmet_req_subsys(req)->admin_timeout;
}
rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
}
nvme_init_request(rq, req->cmd);
if (timeout)
rq->timeout = timeout;
if (req->sg_cnt) {
ret = nvmet_passthru_map_sg(req, rq);
if (unlikely(ret)) {
status = NVME_SC_INTERNAL;
goto out_put_req;
}
}
/*
* If a command needs post-execution fixups, or there are any
* non-trivial effects, make sure to execute the command synchronously
* in a workqueue so that nvme_passthru_end gets called.
*/
effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
if (req->p.use_workqueue ||
(effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io = nvmet_passthru_req_done;
rq->end_io_data = req;
blk_execute_rq_nowait(rq, false);
}
if (ns)
nvme_put_ns(ns);
return;
out_put_req:
blk_mq_free_request(rq);
out_put_ns:
if (ns)
nvme_put_ns(ns);
out:
nvmet_req_complete(req, status);
}
/*
* We need to emulate set host behaviour to ensure that any requested
* behaviour of the target's host matches the requested behaviour
* of the device's host and fail otherwise.
*/
static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
{
struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct nvme_feat_host_behavior *host;
u16 status = NVME_SC_INTERNAL;
int ret;
host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
if (!host)
goto out_complete_req;
ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
host, sizeof(*host), NULL);
if (ret)
goto out_free_host;
status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
if (status)
goto out_free_host;
if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
pr_warn("target host has requested different behaviour from the local host\n");
status = NVME_SC_INTERNAL;
}
out_free_host:
kfree(host);
out_complete_req:
nvmet_req_complete(req, status);
}
static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
{
req->p.use_workqueue = false;
req->execute = nvmet_passthru_execute_cmd;
return NVME_SC_SUCCESS;
}
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
/* Reject any commands with non-sgl flags set (ie. fused commands) */
if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
return NVME_SC_INVALID_FIELD;
switch (req->cmd->common.opcode) {
case nvme_cmd_resv_register:
case nvme_cmd_resv_report:
case nvme_cmd_resv_acquire:
case nvme_cmd_resv_release:
/*
* Reservations cannot be supported properly because the
* underlying device has no way of differentiating different
* hosts that connect via fabrics. This could potentially be
* emulated in the future if regular targets grow support for
* this feature.
*/
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
return nvmet_setup_passthru_command(req);
}
/*
* Only features that are emulated or specifically allowed in the list are
* passed down to the controller. This function implements the allow list for
* both get and set features.
*/
static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
{
switch (le32_to_cpu(req->cmd->features.fid)) {
case NVME_FEAT_ARBITRATION:
case NVME_FEAT_POWER_MGMT:
case NVME_FEAT_LBA_RANGE:
case NVME_FEAT_TEMP_THRESH:
case NVME_FEAT_ERR_RECOVERY:
case NVME_FEAT_VOLATILE_WC:
case NVME_FEAT_WRITE_ATOMIC:
case NVME_FEAT_AUTO_PST:
case NVME_FEAT_TIMESTAMP:
case NVME_FEAT_HCTM:
case NVME_FEAT_NOPSC:
case NVME_FEAT_RRL:
case NVME_FEAT_PLM_CONFIG:
case NVME_FEAT_PLM_WINDOW:
case NVME_FEAT_HOST_BEHAVIOR:
case NVME_FEAT_SANITIZE:
case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
return nvmet_setup_passthru_command(req);
case NVME_FEAT_ASYNC_EVENT:
/* There is no support for forwarding ASYNC events */
case NVME_FEAT_IRQ_COALESCE:
case NVME_FEAT_IRQ_CONFIG:
/* The IRQ settings will not apply to the target controller */
case NVME_FEAT_HOST_MEM_BUF:
/*
* Any HMB that's set will not be passed through and will
* not work as expected
*/
case NVME_FEAT_SW_PROGRESS:
/*
* The Pre-Boot Software Load Count doesn't make much
* sense for a target to export
*/
case NVME_FEAT_RESV_MASK:
case NVME_FEAT_RESV_PERSIST:
/* No reservations, see nvmet_parse_passthru_io_cmd() */
default:
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
}
}
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
{
/* Reject any commands with non-sgl flags set (ie. fused commands) */
if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
return NVME_SC_INVALID_FIELD;
/*
* Passthru all vendor specific commands
*/
if (req->cmd->common.opcode >= nvme_admin_vendor_start)
return nvmet_setup_passthru_command(req);
switch (req->cmd->common.opcode) {
case nvme_admin_async_event:
req->execute = nvmet_execute_async_event;
return NVME_SC_SUCCESS;
case nvme_admin_keep_alive:
/*
* Most PCIe ctrls don't support keep alive cmd, we route keep
* alive to the non-passthru mode. In future please change this
* code when PCIe ctrls with keep alive support available.
*/
req->execute = nvmet_execute_keep_alive;
return NVME_SC_SUCCESS;
case nvme_admin_set_features:
switch (le32_to_cpu(req->cmd->features.fid)) {
case NVME_FEAT_ASYNC_EVENT:
case NVME_FEAT_KATO:
case NVME_FEAT_NUM_QUEUES:
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_set_features;
return NVME_SC_SUCCESS;
case NVME_FEAT_HOST_BEHAVIOR:
req->execute = nvmet_passthru_set_host_behaviour;
return NVME_SC_SUCCESS;
default:
return nvmet_passthru_get_set_features(req);
}
break;
case nvme_admin_get_features:
switch (le32_to_cpu(req->cmd->features.fid)) {
case NVME_FEAT_ASYNC_EVENT:
case NVME_FEAT_KATO:
case NVME_FEAT_NUM_QUEUES:
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
default:
return nvmet_passthru_get_set_features(req);
}
break;
case nvme_admin_identify:
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CS_CTRL:
switch (req->cmd->identify.csi) {
case NVME_CSI_ZNS:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_CTRL:
case NVME_ID_CNS_NS:
case NVME_ID_CNS_NS_DESC_LIST:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
case NVME_ID_CNS_CS_NS:
switch (req->cmd->identify.csi) {
case NVME_CSI_ZNS:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
}
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
default:
return nvmet_setup_passthru_command(req);
}
case nvme_admin_get_log_page:
return nvmet_setup_passthru_command(req);
default:
/* Reject commands not in the allowlist above */
return nvmet_report_invalid_opcode(req);
}
}
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
{
struct nvme_ctrl *ctrl;
struct file *file;
int ret = -EINVAL;
void *old;
mutex_lock(&subsys->lock);
if (!subsys->passthru_ctrl_path)
goto out_unlock;
if (subsys->passthru_ctrl)
goto out_unlock;
if (subsys->nr_namespaces) {
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
goto out_unlock;
}
file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto out_unlock;
}
ctrl = nvme_ctrl_from_file(file);
if (!ctrl) {
pr_err("failed to open nvme controller %s\n",
subsys->passthru_ctrl_path);
goto out_put_file;
}
old = xa_cmpxchg(&passthru_subsystems, ctrl->instance, NULL,
subsys, GFP_KERNEL);
if (xa_is_err(old)) {
ret = xa_err(old);
goto out_put_file;
}
if (old)
goto out_put_file;
subsys->passthru_ctrl = ctrl;
subsys->ver = ctrl->vs;
if (subsys->ver < NVME_VS(1, 2, 1)) {
pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
NVME_TERTIARY(subsys->ver));
subsys->ver = NVME_VS(1, 2, 1);
}
nvme_get_ctrl(ctrl);
__module_get(subsys->passthru_ctrl->ops->module);
ret = 0;
out_put_file:
filp_close(file, NULL);
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
}
static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
{
if (subsys->passthru_ctrl) {
xa_erase(&passthru_subsystems, subsys->passthru_ctrl->instance);
module_put(subsys->passthru_ctrl->ops->module);
nvme_put_ctrl(subsys->passthru_ctrl);
}
subsys->passthru_ctrl = NULL;
subsys->ver = NVMET_DEFAULT_VS;
}
void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
{
mutex_lock(&subsys->lock);
__nvmet_passthru_ctrl_disable(subsys);
mutex_unlock(&subsys->lock);
}
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
{
mutex_lock(&subsys->lock);
__nvmet_passthru_ctrl_disable(subsys);
mutex_unlock(&subsys->lock);
kfree(subsys->passthru_ctrl_path);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* PFSM (Pre-configurable Finite State Machine) driver for TI TPS65224/TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/tps6594.h>
#include <linux/tps6594_pfsm.h>
#define TPS6594_STARTUP_DEST_MCU_ONLY_VAL 2
#define TPS6594_STARTUP_DEST_ACTIVE_VAL 3
#define TPS6594_STARTUP_DEST_SHIFT 5
#define TPS6594_STARTUP_DEST_MCU_ONLY (TPS6594_STARTUP_DEST_MCU_ONLY_VAL \
<< TPS6594_STARTUP_DEST_SHIFT)
#define TPS6594_STARTUP_DEST_ACTIVE (TPS6594_STARTUP_DEST_ACTIVE_VAL \
<< TPS6594_STARTUP_DEST_SHIFT)
/*
* To update the PMIC firmware, the user must be able to access
* page 0 (user registers) and page 1 (NVM control and configuration).
*/
#define TPS6594_PMIC_MAX_POS 0x200
#define TPS6594_FILE_TO_PFSM(f) container_of((f)->private_data, struct tps6594_pfsm, miscdev)
/**
* struct tps6594_pfsm - device private data structure
*
* @miscdev: misc device infos
* @regmap: regmap for accessing the device registers
* @chip_id: chip identifier of the device
*/
struct tps6594_pfsm {
struct miscdevice miscdev;
struct regmap *regmap;
unsigned long chip_id;
};
static ssize_t tps6594_pfsm_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
loff_t pos = *ppos;
unsigned int val;
int ret;
int i;
if (pos < 0)
return -EINVAL;
if (pos >= TPS6594_PMIC_MAX_POS)
return 0;
if (count > TPS6594_PMIC_MAX_POS - pos)
count = TPS6594_PMIC_MAX_POS - pos;
for (i = 0 ; i < count ; i++) {
ret = regmap_read(pfsm->regmap, pos + i, &val);
if (ret)
return ret;
if (put_user(val, buf + i))
return -EFAULT;
}
*ppos = pos + count;
return count;
}
static ssize_t tps6594_pfsm_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
loff_t pos = *ppos;
char val;
int ret;
int i;
if (pos < 0)
return -EINVAL;
if (pos >= TPS6594_PMIC_MAX_POS || !count)
return 0;
if (count > TPS6594_PMIC_MAX_POS - pos)
count = TPS6594_PMIC_MAX_POS - pos;
for (i = 0 ; i < count ; i++) {
if (get_user(val, buf + i))
return -EFAULT;
ret = regmap_write(pfsm->regmap, pos + i, val);
if (ret)
return ret;
}
*ppos = pos + count;
return count;
}
static int tps6594_pfsm_configure_ret_trig(struct regmap *regmap, u8 gpio_ret, u8 ddr_ret)
{
int ret;
if (gpio_ret)
ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
else
ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
if (ret)
return ret;
if (ddr_ret)
ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(7));
else
ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(7));
return ret;
}
static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
struct pmic_state_opt state_opt;
void __user *argp = (void __user *)arg;
unsigned int regmap_reg, mask;
int ret = -ENOIOCTLCMD;
switch (cmd) {
case PMIC_GOTO_STANDBY:
/* Disable LP mode on TPS6594 Family PMIC */
if (pfsm->chip_id != TPS65224) {
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_BIT_LP_STANDBY_SEL);
if (ret)
return ret;
}
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
break;
case PMIC_GOTO_LP_STANDBY:
/* TPS65224 does not support LP STANDBY */
if (pfsm->chip_id == TPS65224)
return ret;
/* Enable LP mode */
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_BIT_LP_STANDBY_SEL);
if (ret)
return ret;
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
break;
case PMIC_UPDATE_PGM:
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(3), TPS6594_BIT_TRIGGER_I2C(3));
break;
case PMIC_SET_ACTIVE_STATE:
/* Modify NSLEEP1-2 bits */
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B);
break;
case PMIC_SET_MCU_ONLY_STATE:
/* TPS65224 does not support MCU_ONLY_STATE */
if (pfsm->chip_id == TPS65224)
return ret;
if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
return -EFAULT;
/* Configure retention triggers */
ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
state_opt.ddr_retention);
if (ret)
return ret;
/* Modify NSLEEP1-2 bits */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP1B);
if (ret)
return ret;
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP2B);
break;
case PMIC_SET_RETENTION_STATE:
if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
return -EFAULT;
/* Configure wake-up destination */
if (pfsm->chip_id == TPS65224) {
regmap_reg = TPS65224_REG_STARTUP_CTRL;
mask = TPS65224_MASK_STARTUP_DEST;
} else {
regmap_reg = TPS6594_REG_RTC_CTRL_2;
mask = TPS6594_MASK_STARTUP_DEST;
}
if (state_opt.mcu_only_startup_dest)
ret = regmap_write_bits(pfsm->regmap, regmap_reg,
mask, TPS6594_STARTUP_DEST_MCU_ONLY);
else
ret = regmap_write_bits(pfsm->regmap, regmap_reg,
mask, TPS6594_STARTUP_DEST_ACTIVE);
if (ret)
return ret;
/* Configure retention triggers */
ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
state_opt.ddr_retention);
if (ret)
return ret;
/* Modify NSLEEP1-2 bits */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
pfsm->chip_id == TPS65224 ?
TPS6594_BIT_NSLEEP1B : TPS6594_BIT_NSLEEP2B);
break;
}
return ret;
}
static const struct file_operations tps6594_pfsm_fops = {
.owner = THIS_MODULE,
.llseek = generic_file_llseek,
.read = tps6594_pfsm_read,
.write = tps6594_pfsm_write,
.unlocked_ioctl = tps6594_pfsm_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static irqreturn_t tps6594_pfsm_isr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
int i;
for (i = 0 ; i < pdev->num_resources ; i++) {
if (irq == platform_get_irq_byname(pdev, pdev->resource[i].name)) {
dev_err(pdev->dev.parent, "%s event detected\n", pdev->resource[i].name);
return IRQ_HANDLED;
}
}
return IRQ_NONE;
}
static int tps6594_pfsm_probe(struct platform_device *pdev)
{
struct tps6594_pfsm *pfsm;
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
int irq;
int ret;
int i;
pfsm = devm_kzalloc(dev, sizeof(struct tps6594_pfsm), GFP_KERNEL);
if (!pfsm)
return -ENOMEM;
pfsm->regmap = tps->regmap;
pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
tps->chip_id, tps->reg);
pfsm->miscdev.fops = &tps6594_pfsm_fops;
pfsm->miscdev.parent = dev->parent;
pfsm->chip_id = tps->chip_id;
for (i = 0 ; i < pdev->num_resources ; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, NULL,
tps6594_pfsm_isr, IRQF_ONESHOT,
pdev->resource[i].name, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to request irq\n");
}
platform_set_drvdata(pdev, pfsm);
return misc_register(&pfsm->miscdev);
}
static void tps6594_pfsm_remove(struct platform_device *pdev)
{
struct tps6594_pfsm *pfsm = platform_get_drvdata(pdev);
misc_deregister(&pfsm->miscdev);
}
static struct platform_driver tps6594_pfsm_driver = {
.driver = {
.name = "tps6594-pfsm",
},
.probe = tps6594_pfsm_probe,
.remove = tps6594_pfsm_remove,
};
module_platform_driver(tps6594_pfsm_driver);
MODULE_ALIAS("platform:tps6594-pfsm");
MODULE_AUTHOR("Julien Panis <[email protected]>");
MODULE_DESCRIPTION("TPS6594 Pre-configurable Finite State Machine Driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
#define MLX5_TTC_MAX_NUM_GROUPS 4
#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
struct mlx5_fs_ttc_groups {
bool use_l4_type;
int num_groups;
int group_size[MLX5_TTC_MAX_NUM_GROUPS];
};
static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
{
int i, sz = 0;
for (i = 0; i < groups->num_groups; i++)
sz += groups->group_size[i];
return sz;
}
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
int num_groups;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
struct mlx5_ttc_rule rules[MLX5_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
};
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
{
return ttc->t;
}
static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
{
int i;
for (i = 0; i < MLX5_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
mlx5_del_flow_rules(ttc->rules[i].rule);
ttc->rules[i].rule = NULL;
}
}
for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
mlx5_del_flow_rules(ttc->tunnel_rules[i]);
ttc->tunnel_rules[i] = NULL;
}
}
}
struct mlx5_etype_proto {
u16 etype;
u8 proto;
};
static struct mlx5_etype_proto ttc_rules[] = {
[MLX5_TT_IPV4_TCP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_TCP,
},
[MLX5_TT_IPV6_TCP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_TCP,
},
[MLX5_TT_IPV4_UDP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_UDP,
},
[MLX5_TT_IPV6_UDP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_UDP,
},
[MLX5_TT_IPV4_IPSEC_AH] = {
.etype = ETH_P_IP,
.proto = IPPROTO_AH,
},
[MLX5_TT_IPV6_IPSEC_AH] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_AH,
},
[MLX5_TT_IPV4_IPSEC_ESP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_ESP,
},
[MLX5_TT_IPV6_IPSEC_ESP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_ESP,
},
[MLX5_TT_IPV4] = {
.etype = ETH_P_IP,
.proto = 0,
},
[MLX5_TT_IPV6] = {
.etype = ETH_P_IPV6,
.proto = 0,
},
[MLX5_TT_ANY] = {
.etype = 0,
.proto = 0,
},
};
static struct mlx5_etype_proto ttc_tunnel_rules[] = {
[MLX5_TT_IPV4_GRE] = {
.etype = ETH_P_IP,
.proto = IPPROTO_GRE,
},
[MLX5_TT_IPV6_GRE] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_GRE,
},
[MLX5_TT_IPV4_IPIP] = {
.etype = ETH_P_IP,
.proto = IPPROTO_IPIP,
},
[MLX5_TT_IPV6_IPIP] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_IPIP,
},
[MLX5_TT_IPV4_IPV6] = {
.etype = ETH_P_IP,
.proto = IPPROTO_IPV6,
},
[MLX5_TT_IPV6_IPV6] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_IPV6,
},
};
enum TTC_GROUP_TYPE {
TTC_GROUPS_DEFAULT = 0,
TTC_GROUPS_USE_L4_TYPE = 1,
};
static const struct mlx5_fs_ttc_groups ttc_groups[] = {
[TTC_GROUPS_DEFAULT] = {
.num_groups = 3,
.group_size = {
BIT(3) + MLX5_NUM_TUNNEL_TT,
BIT(1),
BIT(0),
},
},
[TTC_GROUPS_USE_L4_TYPE] = {
.use_l4_type = true,
.num_groups = 4,
.group_size = {
MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(1),
BIT(0),
},
},
};
static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
[TTC_GROUPS_DEFAULT] = {
.num_groups = 3,
.group_size = {
BIT(3),
BIT(1),
BIT(0),
},
},
[TTC_GROUPS_USE_L4_TYPE] = {
.use_l4_type = true,
.num_groups = 4,
.group_size = {
MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(1),
BIT(0),
},
},
};
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
}
static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
u8 proto_type)
{
switch (proto_type) {
case IPPROTO_GRE:
return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
case IPPROTO_IPIP:
case IPPROTO_IPV6:
return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
default:
return false;
}
}
static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
{
int tt;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
if (mlx5_tunnel_proto_supported_rx(mdev,
ttc_tunnel_rules[tt].proto))
return true;
}
return false;
}
bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.inner_ip_version));
}
static u8 mlx5_etype_to_ipv(u16 ethertype)
{
if (ethertype == ETH_P_IP)
return 4;
if (ethertype == ETH_P_IPV6)
return 6;
return 0;
}
static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
u8 proto, bool use_l4_type)
{
int l4_type;
if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
if (proto == IPPROTO_TCP)
l4_type = MLX5_PACKET_L4_TYPE_TCP;
else
l4_type = MLX5_PACKET_L4_TYPE_UDP;
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
} else {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
}
}
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, u16 etype, u8 proto,
bool use_l4_type)
{
int match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
spec->match_criteria,
outer_headers),
MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers),
proto, use_l4_type);
}
ipv = mlx5_etype_to_ipv(etype);
if (match_ipv_outer && ipv) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
} else if (etype) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(dev, "%s: add rule failed\n", __func__);
}
kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
struct mlx5_ttc_table *ttc,
bool use_l4_type)
{
struct mlx5_flow_handle **trules;
struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft;
int tt;
int err;
ft = ttc->t;
rules = ttc->rules;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto,
use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
goto del_rules;
}
rule->default_dest = params->dests[tt];
}
if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
return 0;
trules = ttc->tunnel_rules;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
if (test_bit(tt, params->ignore_tunnel_dests))
continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
¶ms->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto,
use_l4_type);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
goto del_rules;
}
}
return 0;
del_rules:
mlx5_cleanup_ttc_rules(ttc);
return err;
}
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
bool use_ipv,
const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
int err;
u8 *mc;
ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ttc->g);
ttc->g = NULL;
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
if (use_ipv)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
else
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
/* TCP UDP group */
if (groups->use_l4_type) {
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
}
/* L4 Group */
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
kvfree(in);
return 0;
err:
err = PTR_ERR(ttc->g[ttc->num_groups]);
ttc->g[ttc->num_groups] = NULL;
kvfree(in);
return err;
}
static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
u16 etype, u8 proto, bool use_l4_type)
{
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
ipv = mlx5_etype_to_ipv(etype);
if (etype && ipv) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
}
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
spec->match_criteria,
inner_headers),
MLX5_ADDR_OF(fte_match_param,
spec->match_value,
inner_headers),
proto, use_l4_type);
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
}
kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
struct mlx5_ttc_table *ttc,
bool use_l4_type)
{
struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft;
int err;
int tt;
ft = ttc->t;
rules = ttc->rules;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
¶ms->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto,
use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
goto del_rules;
}
rule->default_dest = params->dests[tt];
}
return 0;
del_rules:
mlx5_cleanup_ttc_rules(ttc);
return err;
}
static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
int err;
u8 *mc;
ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ttc->g);
ttc->g = NULL;
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
/* TCP UDP group */
if (groups->use_l4_type) {
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
}
/* L4 Group */
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
/* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
kvfree(in);
return 0;
err:
err = PTR_ERR(ttc->g[ttc->num_groups]);
ttc->g[ttc->num_groups] = NULL;
kvfree(in);
return err;
}
struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params)
{
const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
break;
default:
return ERR_PTR(-EINVAL);
}
ns = mlx5_get_flow_namespace(dev, params->ns_type);
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
WARN_ON_ONCE(params->ft_attr.max_fte);
params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
err = mlx5_create_inner_ttc_table_groups(ttc, groups);
if (err)
goto destroy_ft;
err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
return ttc;
destroy_ft:
mlx5_destroy_ttc_table(ttc);
return ERR_PTR(err);
}
void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
{
int i;
mlx5_cleanup_ttc_rules(ttc);
for (i = ttc->num_groups - 1; i >= 0; i--) {
if (!IS_ERR_OR_NULL(ttc->g[i]))
mlx5_destroy_flow_group(ttc->g[i]);
ttc->g[i] = NULL;
}
kfree(ttc->g);
mlx5_destroy_flow_table(ttc->t);
kvfree(ttc);
}
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params)
{
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
break;
default:
return ERR_PTR(-EINVAL);
}
ns = mlx5_get_flow_namespace(dev, params->ns_type);
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
WARN_ON_ONCE(params->ft_attr.max_fte);
params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
if (err)
goto destroy_ft;
err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
return ttc;
destroy_ft:
mlx5_destroy_ttc_table(ttc);
return ERR_PTR(err);
}
int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
struct mlx5_flow_destination *new_dest)
{
return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
NULL);
}
struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
enum mlx5_traffic_types type)
{
struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
"TTC[%d] default dest is not setup yet", type);
return *dest;
}
int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
enum mlx5_traffic_types type)
{
struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
return mlx5_ttc_fwd_dest(ttc, type, &dest);
}
|
// SPDX-License-Identifier: GPL-2.0+
//
// max77802.c - Regulator driver for the Maxim 77802
//
// Copyright (C) 2013-2014 Google, Inc
// Simon Glass <[email protected]>
//
// Copyright (C) 2012 Samsung Electronics
// Chiwoong Byun <[email protected]>
// Jonghwa Lee <[email protected]>
//
// This driver is based on max8997.c
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#include <dt-bindings/regulator/maxim,max77802.h>
/* Default ramp delay in case it is not manually set */
#define MAX77802_RAMP_DELAY 100000 /* uV/us */
#define MAX77802_OPMODE_SHIFT_LDO 6
#define MAX77802_OPMODE_BUCK234_SHIFT 4
#define MAX77802_OPMODE_MASK 0x3
#define MAX77802_VSEL_MASK 0x3F
#define MAX77802_DVS_VSEL_MASK 0xFF
#define MAX77802_RAMP_RATE_MASK_2BIT 0xC0
#define MAX77802_RAMP_RATE_SHIFT_2BIT 6
#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0
#define MAX77802_RAMP_RATE_SHIFT_4BIT 4
#define MAX77802_STATUS_OFF 0x0
#define MAX77802_OFF_PWRREQ 0x1
#define MAX77802_LP_PWRREQ 0x2
static const unsigned int max77802_buck234_ramp_table[] = {
12500,
25000,
50000,
100000,
};
static const unsigned int max77802_buck16_ramp_table[] = {
1000, 2000, 3030, 4000,
5000, 5880, 7140, 8330,
9090, 10000, 11110, 12500,
16670, 25000, 50000, 100000,
};
struct max77802_regulator_prv {
/* Array indexed by regulator id */
unsigned int opmode[MAX77802_REG_MAX];
};
static inline unsigned int max77802_map_mode(unsigned int mode)
{
return mode == MAX77802_OPMODE_NORMAL ?
REGULATOR_MODE_NORMAL : REGULATOR_MODE_STANDBY;
}
static int max77802_get_opmode_shift(int id)
{
if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 &&
id <= MAX77802_BUCK10))
return 0;
if (id >= MAX77802_BUCK2 && id <= MAX77802_BUCK4)
return MAX77802_OPMODE_BUCK234_SHIFT;
if (id >= MAX77802_LDO1 && id <= MAX77802_LDO35)
return MAX77802_OPMODE_SHIFT_LDO;
return -EINVAL;
}
/**
* max77802_set_suspend_disable - Disable the regulator during system suspend
* @rdev: regulator to mark as disabled
*
* All regulators expect LDO 1, 3, 20 and 21 support OFF by PWRREQ.
* Configure the regulator so the PMIC will turn it OFF during system suspend.
*/
static int max77802_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val = MAX77802_OFF_PWRREQ;
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
/*
* Some LDOs support Low Power Mode while the system is running.
*
* LDOs 1, 3, 20, 21.
*/
static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
switch (mode) {
case REGULATOR_MODE_STANDBY:
val = MAX77802_OPMODE_LP; /* ON in Low Power Mode */
break;
case REGULATOR_MODE_NORMAL:
val = MAX77802_OPMODE_NORMAL; /* ON in Normal Mode */
break;
default:
dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
rdev->desc->name, mode);
return -EINVAL;
}
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
static unsigned max77802_get_mode(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
return max77802_map_mode(max77802->opmode[id]);
}
/**
* max77802_set_suspend_mode - set regulator opmode when the system is suspended
* @rdev: regulator to change mode
* @mode: operating mode to be set
*
* Will set the operating mode for the regulators during system suspend.
* This function is valid for the three different enable control logics:
*
* Enable Control Logic1 by PWRREQ (BUCK 2-4 and LDOs 2, 4-19, 22-35)
* Enable Control Logic2 by PWRREQ (LDOs 1, 20, 21)
* Enable Control Logic3 by PWRREQ (LDO 3)
*
* If setting the regulator mode fails, the function only warns but does
* not return a negative error number to avoid the regulator core to stop
* setting the operating mode for the remaining regulators.
*/
static int max77802_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
/*
* If the regulator has been disabled for suspend
* then is invalid to try setting a suspend mode.
*/
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ) {
dev_warn(&rdev->dev, "%s: is disabled, mode: 0x%x not set\n",
rdev->desc->name, mode);
return 0;
}
switch (mode) {
case REGULATOR_MODE_STANDBY:
/*
* If the regulator opmode is normal then enable
* ON in Low Power Mode by PWRREQ. If the mode is
* already Low Power then no action is required.
*/
if (max77802->opmode[id] == MAX77802_OPMODE_NORMAL)
val = MAX77802_LP_PWRREQ;
else
return 0;
break;
case REGULATOR_MODE_NORMAL:
/*
* If the regulator operating mode is Low Power then
* normal is not a valid opmode in suspend. If the
* mode is already normal then no action is required.
*/
if (max77802->opmode[id] == MAX77802_OPMODE_LP)
dev_warn(&rdev->dev, "%s: in Low Power: 0x%x invalid\n",
rdev->desc->name, mode);
return 0;
default:
dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n",
rdev->desc->name, mode);
return -EINVAL;
}
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
}
static int max77802_enable(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
return -EINVAL;
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask,
max77802->opmode[id] << shift);
}
/*
* LDOs 2, 4-19, 22-35
*/
static const struct regulator_ops max77802_ldo_ops_logic1 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
/*
* LDOs 1, 20, 21, 3
*/
static const struct regulator_ops max77802_ldo_ops_logic2 = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_mode = max77802_set_mode,
.get_mode = max77802_get_mode,
.set_suspend_mode = max77802_set_suspend_mode,
};
/* BUCKS 1, 6 */
static const struct regulator_ops max77802_buck_16_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
};
/* BUCKs 2-4 */
static const struct regulator_ops max77802_buck_234_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = regulator_set_ramp_delay_regmap,
.set_suspend_disable = max77802_set_suspend_disable,
.set_suspend_mode = max77802_set_suspend_mode,
};
/* BUCKs 5, 7-10 */
static const struct regulator_ops max77802_buck_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
.enable = max77802_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_suspend_disable = max77802_set_suspend_disable,
};
/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */
#define regulator_77802_desc_p_ldo(num, supply, log) { \
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_LDO##num, \
.supply_name = "inl"#supply, \
.ops = &max77802_ldo_ops_logic##log, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 800000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
.of_map_mode = max77802_map_mode, \
}
/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */
#define regulator_77802_desc_n_ldo(num, supply, log) { \
.name = "LDO"#num, \
.of_match = of_match_ptr("LDO"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_LDO##num, \
.supply_name = "inl"#supply, \
.ops = &max77802_ldo_ops_logic##log, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 800000, \
.uV_step = 25000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \
.enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \
.of_map_mode = max77802_map_mode, \
}
/* BUCKs 1, 6 */
#define regulator_77802_desc_16_buck(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_16_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 612500, \
.uV_step = 6250, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 8, \
.vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
.ramp_reg = MAX77802_REG_BUCK ## num ## CTRL, \
.ramp_mask = MAX77802_RAMP_RATE_MASK_4BIT, \
.ramp_delay_table = max77802_buck16_ramp_table, \
.n_ramp_values = ARRAY_SIZE(max77802_buck16_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
/* BUCKS 2-4 */
#define regulator_77802_desc_234_buck(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_234_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 600000, \
.uV_step = 6250, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 0x91, \
.vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \
.vsel_mask = MAX77802_DVS_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.enable_mask = MAX77802_OPMODE_MASK << \
MAX77802_OPMODE_BUCK234_SHIFT, \
.ramp_reg = MAX77802_REG_BUCK ## num ## CTRL1, \
.ramp_mask = MAX77802_RAMP_RATE_MASK_2BIT, \
.ramp_delay_table = max77802_buck234_ramp_table, \
.n_ramp_values = ARRAY_SIZE(max77802_buck234_ramp_table), \
.of_map_mode = max77802_map_mode, \
}
/* BUCK 5 */
#define regulator_77802_desc_buck5(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 750000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_BUCK5OUT, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK5CTRL, \
.enable_mask = MAX77802_OPMODE_MASK, \
.of_map_mode = max77802_map_mode, \
}
/* BUCKs 7-10 */
#define regulator_77802_desc_buck7_10(num) { \
.name = "BUCK"#num, \
.of_match = of_match_ptr("BUCK"#num), \
.regulators_node = of_match_ptr("regulators"), \
.id = MAX77802_BUCK##num, \
.supply_name = "inb"#num, \
.ops = &max77802_buck_dvs_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = 750000, \
.uV_step = 50000, \
.ramp_delay = MAX77802_RAMP_DELAY, \
.n_voltages = 1 << 6, \
.vsel_reg = MAX77802_REG_BUCK7OUT + (num - 7) * 3, \
.vsel_mask = MAX77802_VSEL_MASK, \
.enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \
.enable_mask = MAX77802_OPMODE_MASK, \
.of_map_mode = max77802_map_mode, \
}
static const struct regulator_desc regulators[] = {
regulator_77802_desc_16_buck(1),
regulator_77802_desc_234_buck(2),
regulator_77802_desc_234_buck(3),
regulator_77802_desc_234_buck(4),
regulator_77802_desc_buck5(5),
regulator_77802_desc_16_buck(6),
regulator_77802_desc_buck7_10(7),
regulator_77802_desc_buck7_10(8),
regulator_77802_desc_buck7_10(9),
regulator_77802_desc_buck7_10(10),
regulator_77802_desc_n_ldo(1, 10, 2),
regulator_77802_desc_n_ldo(2, 10, 1),
regulator_77802_desc_p_ldo(3, 3, 2),
regulator_77802_desc_p_ldo(4, 6, 1),
regulator_77802_desc_p_ldo(5, 3, 1),
regulator_77802_desc_p_ldo(6, 3, 1),
regulator_77802_desc_p_ldo(7, 3, 1),
regulator_77802_desc_n_ldo(8, 1, 1),
regulator_77802_desc_p_ldo(9, 5, 1),
regulator_77802_desc_p_ldo(10, 4, 1),
regulator_77802_desc_p_ldo(11, 4, 1),
regulator_77802_desc_p_ldo(12, 9, 1),
regulator_77802_desc_p_ldo(13, 4, 1),
regulator_77802_desc_p_ldo(14, 4, 1),
regulator_77802_desc_n_ldo(15, 1, 1),
regulator_77802_desc_n_ldo(17, 2, 1),
regulator_77802_desc_p_ldo(18, 7, 1),
regulator_77802_desc_p_ldo(19, 5, 1),
regulator_77802_desc_p_ldo(20, 7, 2),
regulator_77802_desc_p_ldo(21, 6, 2),
regulator_77802_desc_p_ldo(23, 9, 1),
regulator_77802_desc_p_ldo(24, 6, 1),
regulator_77802_desc_p_ldo(25, 9, 1),
regulator_77802_desc_p_ldo(26, 9, 1),
regulator_77802_desc_n_ldo(27, 2, 1),
regulator_77802_desc_p_ldo(28, 7, 1),
regulator_77802_desc_p_ldo(29, 7, 1),
regulator_77802_desc_n_ldo(30, 2, 1),
regulator_77802_desc_p_ldo(32, 9, 1),
regulator_77802_desc_p_ldo(33, 6, 1),
regulator_77802_desc_p_ldo(34, 9, 1),
regulator_77802_desc_n_ldo(35, 2, 1),
};
static int max77802_pmic_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77802_regulator_prv *max77802;
int i, val;
struct regulator_config config = { };
max77802 = devm_kzalloc(&pdev->dev,
sizeof(struct max77802_regulator_prv),
GFP_KERNEL);
if (!max77802)
return -ENOMEM;
config.dev = iodev->dev;
config.regmap = iodev->regmap;
config.driver_data = max77802;
platform_set_drvdata(pdev, max77802);
for (i = 0; i < MAX77802_REG_MAX; i++) {
struct regulator_dev *rdev;
unsigned int id = regulators[i].id;
int shift = max77802_get_opmode_shift(id);
int ret;
ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val);
if (ret < 0) {
dev_warn(&pdev->dev,
"cannot read current mode for %d\n", i);
val = MAX77802_OPMODE_NORMAL;
} else {
val = val >> shift & MAX77802_OPMODE_MASK;
}
/*
* If the regulator is disabled and the system warm rebooted,
* the hardware reports OFF as the regulator operating mode.
* Default to operating mode NORMAL in that case.
*/
if (id < ARRAY_SIZE(max77802->opmode)) {
if (val == MAX77802_STATUS_OFF)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
else
max77802->opmode[id] = val;
}
rdev = devm_regulator_register(&pdev->dev,
®ulators[i], &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
dev_err(&pdev->dev,
"regulator init failed for %d: %d\n", i, ret);
return ret;
}
}
return 0;
}
static const struct platform_device_id max77802_pmic_id[] = {
{"max77802-pmic", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, max77802_pmic_id);
static struct platform_driver max77802_pmic_driver = {
.driver = {
.name = "max77802-pmic",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = max77802_pmic_probe,
.id_table = max77802_pmic_id,
};
module_platform_driver(max77802_pmic_driver);
MODULE_DESCRIPTION("MAXIM 77802 Regulator Driver");
MODULE_AUTHOR("Simon Glass <[email protected]>");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_panel.h"
struct panel_module {
struct tilcdc_module base;
struct tilcdc_panel_info *info;
struct display_timings *timings;
struct backlight_device *backlight;
struct gpio_desc *enable_gpio;
};
#define to_panel_module(x) container_of(x, struct panel_module, base)
/*
* Encoder:
*/
struct panel_encoder {
struct drm_encoder base;
struct panel_module *mod;
};
#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
struct backlight_device *backlight = panel_encoder->mod->backlight;
struct gpio_desc *gpio = panel_encoder->mod->enable_gpio;
if (backlight) {
backlight->props.power = mode == DRM_MODE_DPMS_ON ?
BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
backlight_update_status(backlight);
}
if (gpio)
gpiod_set_value_cansleep(gpio,
mode == DRM_MODE_DPMS_ON ? 1 : 0);
}
static void panel_encoder_prepare(struct drm_encoder *encoder)
{
panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void panel_encoder_commit(struct drm_encoder *encoder)
{
panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void panel_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* nothing needed */
}
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.prepare = panel_encoder_prepare,
.commit = panel_encoder_commit,
.mode_set = panel_encoder_mode_set,
};
static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
struct panel_module *mod)
{
struct panel_encoder *panel_encoder;
struct drm_encoder *encoder;
int ret;
panel_encoder = devm_kzalloc(dev->dev, sizeof(*panel_encoder),
GFP_KERNEL);
if (!panel_encoder)
return NULL;
panel_encoder->mod = mod;
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
return encoder;
fail:
drm_encoder_cleanup(encoder);
return NULL;
}
/*
* Connector:
*/
struct panel_connector {
struct drm_connector base;
struct drm_encoder *encoder; /* our connected encoder */
struct panel_module *mod;
};
#define to_panel_connector(x) container_of(x, struct panel_connector, base)
static void panel_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static int panel_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct panel_connector *panel_connector = to_panel_connector(connector);
struct display_timings *timings = panel_connector->mod->timings;
int i;
for (i = 0; i < timings->num_timings; i++) {
struct drm_display_mode *mode;
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
mode = drm_mode_create(dev);
if (!mode)
break;
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;
if (timings->native_mode == i)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
return i;
}
static struct drm_encoder *panel_connector_best_encoder(
struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
return panel_connector->encoder;
}
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
.get_modes = panel_connector_get_modes,
.best_encoder = panel_connector_best_encoder,
};
static struct drm_connector *panel_connector_create(struct drm_device *dev,
struct panel_module *mod, struct drm_encoder *encoder)
{
struct panel_connector *panel_connector;
struct drm_connector *connector;
int ret;
panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector),
GFP_KERNEL);
if (!panel_connector)
return NULL;
panel_connector->encoder = encoder;
panel_connector->mod = mod;
connector = &panel_connector->base;
drm_connector_init(dev, connector, &panel_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &panel_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
return connector;
fail:
panel_connector_destroy(connector);
return NULL;
}
/*
* Module:
*/
static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
{
struct panel_module *panel_mod = to_panel_module(mod);
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
encoder = panel_encoder_create(dev, panel_mod);
if (!encoder)
return -ENOMEM;
connector = panel_connector_create(dev, panel_mod, encoder);
if (!connector)
return -ENOMEM;
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
tilcdc_crtc_set_panel_info(priv->crtc,
to_panel_encoder(encoder)->mod->info);
return 0;
}
static const struct tilcdc_module_ops panel_module_ops = {
.modeset_init = panel_modeset_init,
};
/*
* Device:
*/
/* maybe move this somewhere common if it is needed by other outputs? */
static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
{
struct device_node *info_np;
struct tilcdc_panel_info *info;
int ret = 0;
if (!np) {
pr_err("%s: no devicenode given\n", __func__);
return NULL;
}
info_np = of_get_child_by_name(np, "panel-info");
if (!info_np) {
pr_err("%s: could not find panel-info node\n", __func__);
return NULL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
goto put_node;
ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
/* optional: */
info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
if (ret) {
pr_err("%s: error reading panel-info properties\n", __func__);
kfree(info);
info = NULL;
}
put_node:
of_node_put(info_np);
return info;
}
static int panel_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct backlight_device *backlight;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
int ret;
/* bail out early if no DT data: */
if (!node) {
dev_err(&pdev->dev, "device-tree data is missing\n");
return -ENXIO;
}
panel_mod = devm_kzalloc(&pdev->dev, sizeof(*panel_mod), GFP_KERNEL);
if (!panel_mod)
return -ENOMEM;
backlight = devm_of_find_backlight(&pdev->dev);
if (IS_ERR(backlight))
return PTR_ERR(backlight);
panel_mod->backlight = backlight;
panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(panel_mod->enable_gpio)) {
ret = PTR_ERR(panel_mod->enable_gpio);
dev_err(&pdev->dev, "failed to request enable GPIO\n");
goto fail_backlight;
}
if (panel_mod->enable_gpio)
dev_info(&pdev->dev, "found enable GPIO\n");
mod = &panel_mod->base;
pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "panel", &panel_module_ops);
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
ret = -EINVAL;
goto fail_free;
}
panel_mod->info = of_get_panel_info(node);
if (!panel_mod->info) {
dev_err(&pdev->dev, "could not get panel info\n");
ret = -EINVAL;
goto fail_timings;
}
return 0;
fail_timings:
display_timings_release(panel_mod->timings);
fail_free:
tilcdc_module_cleanup(mod);
fail_backlight:
if (panel_mod->backlight)
put_device(&panel_mod->backlight->dev);
return ret;
}
static void panel_remove(struct platform_device *pdev)
{
struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
struct panel_module *panel_mod = to_panel_module(mod);
struct backlight_device *backlight = panel_mod->backlight;
if (backlight)
put_device(&backlight->dev);
display_timings_release(panel_mod->timings);
tilcdc_module_cleanup(mod);
kfree(panel_mod->info);
}
static const struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
static struct platform_driver panel_driver = {
.probe = panel_probe,
.remove = panel_remove,
.driver = {
.name = "tilcdc-panel",
.of_match_table = panel_of_match,
},
};
int __init tilcdc_panel_init(void)
{
return platform_driver_register(&panel_driver);
}
void __exit tilcdc_panel_fini(void)
{
platform_driver_unregister(&panel_driver);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbgtty.c - tty glue for xHCI debug capability
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/idr.h>
#include "xhci.h"
#include "xhci-dbgcap.h"
static struct tty_driver *dbc_tty_driver;
static struct idr dbc_tty_minors;
static DEFINE_MUTEX(dbc_tty_minors_lock);
static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
{
return dbc->priv;
}
static unsigned int
dbc_kfifo_to_req(struct dbc_port *port, char *packet)
{
unsigned int len;
len = kfifo_len(&port->port.xmit_fifo);
if (len == 0)
return 0;
len = min(len, DBC_MAX_PACKET);
if (port->tx_boundary)
len = min(port->tx_boundary, len);
len = kfifo_out(&port->port.xmit_fifo, packet, len);
if (port->tx_boundary)
port->tx_boundary -= len;
return len;
}
static int dbc_start_tx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
{
int len;
struct dbc_request *req;
int status = 0;
bool do_tty_wake = false;
struct list_head *pool = &port->write_pool;
while (!list_empty(pool)) {
req = list_entry(pool->next, struct dbc_request, list_pool);
len = dbc_kfifo_to_req(port, req->buf);
if (len == 0)
break;
do_tty_wake = true;
req->length = len;
list_del(&req->list_pool);
spin_unlock(&port->port_lock);
status = dbc_ep_queue(req);
spin_lock(&port->port_lock);
if (status) {
list_add(&req->list_pool, pool);
break;
}
}
if (do_tty_wake && port->port.tty)
tty_wakeup(port->port.tty);
return status;
}
static void dbc_start_rx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
{
struct dbc_request *req;
int status;
struct list_head *pool = &port->read_pool;
while (!list_empty(pool)) {
if (!port->port.tty)
break;
req = list_entry(pool->next, struct dbc_request, list_pool);
list_del(&req->list_pool);
req->length = DBC_MAX_PACKET;
spin_unlock(&port->port_lock);
status = dbc_ep_queue(req);
spin_lock(&port->port_lock);
if (status) {
list_add(&req->list_pool, pool);
break;
}
}
}
static void
dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
spin_lock_irqsave(&port->port_lock, flags);
list_add_tail(&req->list_pool, &port->read_queue);
tasklet_schedule(&port->push);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
spin_lock_irqsave(&port->port_lock, flags);
list_add(&req->list_pool, &port->write_pool);
switch (req->status) {
case 0:
dbc_start_tx(port);
break;
case -ESHUTDOWN:
break;
default:
dev_warn(dbc->dev, "unexpected write complete status %d\n",
req->status);
break;
}
spin_unlock_irqrestore(&port->port_lock, flags);
}
static void xhci_dbc_free_req(struct dbc_request *req)
{
kfree(req->buf);
dbc_free_request(req);
}
static int
xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
struct list_head *head,
void (*fn)(struct xhci_dbc *, struct dbc_request *))
{
int i;
struct dbc_request *req;
for (i = 0; i < DBC_QUEUE_SIZE; i++) {
req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
if (!req)
break;
req->length = DBC_MAX_PACKET;
req->buf = kmalloc(req->length, GFP_KERNEL);
if (!req->buf) {
dbc_free_request(req);
break;
}
req->complete = fn;
list_add_tail(&req->list_pool, head);
}
return list_empty(head) ? -ENOMEM : 0;
}
static void
xhci_dbc_free_requests(struct list_head *head)
{
struct dbc_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct dbc_request, list_pool);
list_del(&req->list_pool);
xhci_dbc_free_req(req);
}
}
static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct dbc_port *port;
mutex_lock(&dbc_tty_minors_lock);
port = idr_find(&dbc_tty_minors, tty->index);
mutex_unlock(&dbc_tty_minors_lock);
if (!port)
return -ENXIO;
tty->driver_data = port;
return tty_port_install(&port->port, driver, tty);
}
static int dbc_tty_open(struct tty_struct *tty, struct file *file)
{
struct dbc_port *port = tty->driver_data;
return tty_port_open(&port->port, tty, file);
}
static void dbc_tty_close(struct tty_struct *tty, struct file *file)
{
struct dbc_port *port = tty->driver_data;
tty_port_close(&port->port, tty, file);
}
static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
unsigned int written = 0;
spin_lock_irqsave(&port->port_lock, flags);
/*
* Treat tty write as one usb transfer. Make sure the writes are turned
* into TRB request having the same size boundaries as the tty writes.
* Don't add data to kfifo before previous write is turned into TRBs
*/
if (port->tx_boundary) {
spin_unlock_irqrestore(&port->port_lock, flags);
return 0;
}
if (count) {
written = kfifo_in(&port->port.xmit_fifo, buf, count);
if (written == count)
port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
dbc_start_tx(port);
}
spin_unlock_irqrestore(&port->port_lock, flags);
return written;
}
static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
int status;
spin_lock_irqsave(&port->port_lock, flags);
status = kfifo_put(&port->port.xmit_fifo, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
}
static void dbc_tty_flush_chars(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
dbc_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static unsigned int dbc_tty_write_room(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
room = kfifo_avail(&port->port.xmit_fifo);
if (port->tx_boundary)
room = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
return room;
}
static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
chars = kfifo_len(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return chars;
}
static void dbc_tty_unthrottle(struct tty_struct *tty)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&port->port_lock, flags);
tasklet_schedule(&port->push);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static const struct tty_operations dbc_tty_ops = {
.install = dbc_tty_install,
.open = dbc_tty_open,
.close = dbc_tty_close,
.write = dbc_tty_write,
.put_char = dbc_tty_put_char,
.flush_chars = dbc_tty_flush_chars,
.write_room = dbc_tty_write_room,
.chars_in_buffer = dbc_tty_chars_in_buffer,
.unthrottle = dbc_tty_unthrottle,
};
static void dbc_rx_push(struct tasklet_struct *t)
{
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
bool do_push = false;
bool disconnect = false;
struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
spin_lock_irqsave(&port->port_lock, flags);
tty = port->port.tty;
while (!list_empty(queue)) {
req = list_first_entry(queue, struct dbc_request, list_pool);
if (tty && tty_throttled(tty))
break;
switch (req->status) {
case 0:
break;
case -ESHUTDOWN:
disconnect = true;
break;
default:
pr_warn("ttyDBC0: unexpected RX status %d\n",
req->status);
break;
}
if (req->actual) {
char *packet = req->buf;
unsigned int n, size = req->actual;
int count;
n = port->n_read;
if (n) {
packet += n;
size -= n;
}
count = tty_insert_flip_string(&port->port, packet,
size);
if (count)
do_push = true;
if (count != size) {
port->n_read += count;
break;
}
port->n_read = 0;
}
list_move_tail(&req->list_pool, &port->read_pool);
}
if (do_push)
tty_flip_buffer_push(&port->port);
if (!list_empty(queue) && tty) {
if (!tty_throttled(tty)) {
if (do_push)
tasklet_schedule(&port->push);
else
pr_warn("ttyDBC0: RX not scheduled?\n");
}
}
if (!disconnect)
dbc_start_rx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
}
static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
{
unsigned long flags;
struct dbc_port *port = container_of(_port, struct dbc_port, port);
spin_lock_irqsave(&port->port_lock, flags);
dbc_start_rx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return 0;
}
static const struct tty_port_operations dbc_port_ops = {
.activate = dbc_port_activate,
};
static void
xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
{
tty_port_init(&port->port);
spin_lock_init(&port->port_lock);
tasklet_setup(&port->push, dbc_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port.ops = &dbc_port_ops;
port->n_read = 0;
}
static void
xhci_dbc_tty_exit_port(struct dbc_port *port)
{
tasklet_kill(&port->push);
tty_port_destroy(&port->port);
}
static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
{
int ret;
struct device *tty_dev;
struct dbc_port *port = dbc_to_port(dbc);
if (port->registered)
return -EBUSY;
xhci_dbc_tty_init_port(dbc, port);
mutex_lock(&dbc_tty_minors_lock);
port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
mutex_unlock(&dbc_tty_minors_lock);
if (port->minor < 0) {
ret = port->minor;
goto err_idr;
}
ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
GFP_KERNEL);
if (ret)
goto err_exit_port;
ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
dbc_read_complete);
if (ret)
goto err_free_fifo;
ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
dbc_write_complete);
if (ret)
goto err_free_requests;
tty_dev = tty_port_register_device(&port->port,
dbc_tty_driver, port->minor, NULL);
if (IS_ERR(tty_dev)) {
ret = PTR_ERR(tty_dev);
goto err_free_requests;
}
port->registered = true;
return 0;
err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
kfifo_free(&port->port.xmit_fifo);
err_exit_port:
idr_remove(&dbc_tty_minors, port->minor);
err_idr:
xhci_dbc_tty_exit_port(port);
dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
return ret;
}
static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
{
struct dbc_port *port = dbc_to_port(dbc);
if (!port->registered)
return;
tty_unregister_device(dbc_tty_driver, port->minor);
xhci_dbc_tty_exit_port(port);
port->registered = false;
mutex_lock(&dbc_tty_minors_lock);
idr_remove(&dbc_tty_minors, port->minor);
mutex_unlock(&dbc_tty_minors_lock);
kfifo_free(&port->port.xmit_fifo);
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->read_queue);
xhci_dbc_free_requests(&port->write_pool);
}
static const struct dbc_driver dbc_driver = {
.configure = xhci_dbc_tty_register_device,
.disconnect = xhci_dbc_tty_unregister_device,
};
int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
{
struct xhci_dbc *dbc;
struct dbc_port *port;
int status;
if (!dbc_tty_driver)
return -ENODEV;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
if (!dbc) {
status = -ENOMEM;
goto out2;
}
dbc->priv = port;
/* get rid of xhci once this is a real driver binding to a device */
xhci->dbc = dbc;
return 0;
out2:
kfree(port);
return status;
}
/*
* undo what probe did, assume dbc is stopped already.
* we also assume tty_unregister_device() is called before this
*/
void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
{
struct dbc_port *port = dbc_to_port(dbc);
xhci_dbc_remove(dbc);
kfree(port);
}
int dbc_tty_init(void)
{
int ret;
idr_init(&dbc_tty_minors);
dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(dbc_tty_driver)) {
idr_destroy(&dbc_tty_minors);
return PTR_ERR(dbc_tty_driver);
}
dbc_tty_driver->driver_name = "dbc_serial";
dbc_tty_driver->name = "ttyDBC";
dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
dbc_tty_driver->init_termios = tty_std_termios;
dbc_tty_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
dbc_tty_driver->init_termios.c_ispeed = 9600;
dbc_tty_driver->init_termios.c_ospeed = 9600;
tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
ret = tty_register_driver(dbc_tty_driver);
if (ret) {
pr_err("Can't register dbc tty driver\n");
tty_driver_kref_put(dbc_tty_driver);
idr_destroy(&dbc_tty_minors);
}
return ret;
}
void dbc_tty_exit(void)
{
if (dbc_tty_driver) {
tty_unregister_driver(dbc_tty_driver);
tty_driver_kref_put(dbc_tty_driver);
dbc_tty_driver = NULL;
}
idr_destroy(&dbc_tty_minors);
}
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_ARCFB_H__
#define __LINUX_ARCFB_H__
#define FBIO_WAITEVENT _IO('F', 0x88)
#define FBIO_GETCONTROL2 _IOR('F', 0x89, size_t)
#endif
|
// SPDX-License-Identifier: GPL-2.0
//
// mt8186-audsys-clk.h -- Mediatek 8186 audsys clock control
//
// Copyright (c) 2022 MediaTek Inc.
// Author: Jiaxin Yu <[email protected]>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include "mt8186-afe-common.h"
#include "mt8186-audsys-clk.h"
#include "mt8186-audsys-clkid.h"
#include "mt8186-reg.h"
struct afe_gate {
int id;
const char *name;
const char *parent_name;
int reg;
u8 bit;
const struct clk_ops *ops;
unsigned long flags;
u8 cg_flags;
};
#define GATE_AFE_FLAGS(_id, _name, _parent, _reg, _bit, _flags, _cgflags) {\
.id = _id, \
.name = _name, \
.parent_name = _parent, \
.reg = _reg, \
.bit = _bit, \
.flags = _flags, \
.cg_flags = _cgflags, \
}
#define GATE_AFE(_id, _name, _parent, _reg, _bit) \
GATE_AFE_FLAGS(_id, _name, _parent, _reg, _bit, \
CLK_SET_RATE_PARENT, CLK_GATE_SET_TO_DISABLE)
#define GATE_AUD0(_id, _name, _parent, _bit) \
GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON0, _bit)
#define GATE_AUD1(_id, _name, _parent, _bit) \
GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON1, _bit)
#define GATE_AUD2(_id, _name, _parent, _bit) \
GATE_AFE(_id, _name, _parent, AUDIO_TOP_CON2, _bit)
static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
/* AUD0 */
GATE_AUD0(CLK_AUD_AFE, "aud_afe_clk", "top_audio", 2),
GATE_AUD0(CLK_AUD_22M, "aud_apll22m_clk", "top_aud_engen1", 8),
GATE_AUD0(CLK_AUD_24M, "aud_apll24m_clk", "top_aud_engen2", 9),
GATE_AUD0(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner_clk", "top_aud_engen2", 18),
GATE_AUD0(CLK_AUD_APLL_TUNER, "aud_apll_tuner_clk", "top_aud_engen1", 19),
GATE_AUD0(CLK_AUD_TDM, "aud_tdm_clk", "top_aud_1", 20),
GATE_AUD0(CLK_AUD_ADC, "aud_adc_clk", "top_audio", 24),
GATE_AUD0(CLK_AUD_DAC, "aud_dac_clk", "top_audio", 25),
GATE_AUD0(CLK_AUD_DAC_PREDIS, "aud_dac_predis_clk", "top_audio", 26),
GATE_AUD0(CLK_AUD_TML, "aud_tml_clk", "top_audio", 27),
GATE_AUD0(CLK_AUD_NLE, "aud_nle_clk", "top_audio", 28),
/* AUD1 */
GATE_AUD1(CLK_AUD_I2S1_BCLK, "aud_i2s1_bclk", "top_audio", 4),
GATE_AUD1(CLK_AUD_I2S2_BCLK, "aud_i2s2_bclk", "top_audio", 5),
GATE_AUD1(CLK_AUD_I2S3_BCLK, "aud_i2s3_bclk", "top_audio", 6),
GATE_AUD1(CLK_AUD_I2S4_BCLK, "aud_i2s4_bclk", "top_audio", 7),
GATE_AUD1(CLK_AUD_CONNSYS_I2S_ASRC, "aud_connsys_i2s_asrc", "top_audio", 12),
GATE_AUD1(CLK_AUD_GENERAL1_ASRC, "aud_general1_asrc", "top_audio", 13),
GATE_AUD1(CLK_AUD_GENERAL2_ASRC, "aud_general2_asrc", "top_audio", 14),
GATE_AUD1(CLK_AUD_DAC_HIRES, "aud_dac_hires_clk", "top_audio_h", 15),
GATE_AUD1(CLK_AUD_ADC_HIRES, "aud_adc_hires_clk", "top_audio_h", 16),
GATE_AUD1(CLK_AUD_ADC_HIRES_TML, "aud_adc_hires_tml", "top_audio_h", 17),
GATE_AUD1(CLK_AUD_ADDA6_ADC, "aud_adda6_adc", "top_audio", 20),
GATE_AUD1(CLK_AUD_ADDA6_ADC_HIRES, "aud_adda6_adc_hires", "top_audio_h", 21),
GATE_AUD1(CLK_AUD_3RD_DAC, "aud_3rd_dac", "top_audio", 28),
GATE_AUD1(CLK_AUD_3RD_DAC_PREDIS, "aud_3rd_dac_predis", "top_audio", 29),
GATE_AUD1(CLK_AUD_3RD_DAC_TML, "aud_3rd_dac_tml", "top_audio", 30),
GATE_AUD1(CLK_AUD_3RD_DAC_HIRES, "aud_3rd_dac_hires", "top_audio_h", 31),
/* AUD2 */
GATE_AUD2(CLK_AUD_ETDM_IN1_BCLK, "aud_etdm_in1_bclk", "top_audio", 23),
GATE_AUD2(CLK_AUD_ETDM_OUT1_BCLK, "aud_etdm_out1_bclk", "top_audio", 24),
};
static void mt8186_audsys_clk_unregister(void *data)
{
struct mtk_base_afe *afe = data;
struct mt8186_afe_private *afe_priv = afe->platform_priv;
struct clk *clk;
struct clk_lookup *cl;
int i;
if (!afe_priv)
return;
for (i = 0; i < CLK_AUD_NR_CLK; i++) {
cl = afe_priv->lookup[i];
if (!cl)
continue;
clk = cl->clk;
clk_unregister_gate(clk);
clkdev_drop(cl);
}
}
int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
{
struct mt8186_afe_private *afe_priv = afe->platform_priv;
struct clk *clk;
struct clk_lookup *cl;
int i;
afe_priv->lookup = devm_kcalloc(afe->dev, CLK_AUD_NR_CLK,
sizeof(*afe_priv->lookup),
GFP_KERNEL);
if (!afe_priv->lookup)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
const struct afe_gate *gate = &aud_clks[i];
clk = clk_register_gate(afe->dev, gate->name, gate->parent_name,
gate->flags, afe->base_addr + gate->reg,
gate->bit, gate->cg_flags, NULL);
if (IS_ERR(clk)) {
dev_err(afe->dev, "Failed to register clk %s: %ld\n",
gate->name, PTR_ERR(clk));
continue;
}
/* add clk_lookup for devm_clk_get(SND_SOC_DAPM_CLOCK_SUPPLY) */
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return -ENOMEM;
cl->clk = clk;
cl->con_id = gate->name;
cl->dev_id = dev_name(afe->dev);
clkdev_add(cl);
afe_priv->lookup[i] = cl;
}
return devm_add_action_or_reset(afe->dev, mt8186_audsys_clk_unregister, afe);
}
|
/*
* Broadcom specific AMBA
* GBIT MAC COMMON Core
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
{
mutex_init(&gc->phy_mutex);
}
|
/*
* Broadcom specific AMBA
* SPROM reading
*
* Copyright 2011, 2012, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
/**
* bcma_arch_register_fallback_sprom - Registers a method providing a
* fallback SPROM if no SPROM is found.
*
* @sprom_callback: The callback function.
*
* With this function the architecture implementation may register a
* callback handler which fills the SPROM data structure. The fallback is
* used for PCI based BCMA devices, where no valid SPROM can be found
* in the shadow registers and to provide the SPROM for SoCs where BCMA is
* to control the system bus.
*
* This function is useful for weird architectures that have a half-assed
* BCMA device hardwired to their PCI bus.
*
* This function is available for architecture code, only. So it is not
* exported.
*/
int bcma_arch_register_fallback_sprom(int (*sprom_callback)(struct bcma_bus *bus,
struct ssb_sprom *out))
{
if (get_fallback_sprom)
return -EEXIST;
get_fallback_sprom = sprom_callback;
return 0;
}
static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
struct ssb_sprom *out)
{
int err;
if (!get_fallback_sprom) {
err = -ENOENT;
goto fail;
}
err = get_fallback_sprom(bus, out);
if (err)
goto fail;
bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
bus->sprom.revision);
return 0;
fail:
bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
return err;
}
/**************************************************
* R/W ops.
**************************************************/
static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom,
size_t words)
{
int i;
for (i = 0; i < words; i++)
sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2));
}
/**************************************************
* Validation.
**************************************************/
static inline u8 bcma_crc8(u8 crc, u8 data)
{
/* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */
static const u8 t[] = {
0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
};
return t[crc ^ data];
}
static u8 bcma_sprom_crc(const u16 *sprom, size_t words)
{
int word;
u8 crc = 0xFF;
for (word = 0; word < words - 1; word++) {
crc = bcma_crc8(crc, sprom[word] & 0x00FF);
crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
}
crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF);
crc ^= 0xFF;
return crc;
}
static int bcma_sprom_check_crc(const u16 *sprom, size_t words)
{
u8 crc;
u8 expected_crc;
u16 tmp;
crc = bcma_sprom_crc(sprom, words);
tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC;
expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
if (crc != expected_crc)
return -EPROTO;
return 0;
}
static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom,
size_t words)
{
u16 revision;
int err;
err = bcma_sprom_check_crc(sprom, words);
if (err)
return err;
revision = sprom[words - 1] & SSB_SPROM_REVISION_REV;
if (revision < 8 || revision > 11) {
pr_err("Unsupported SPROM revision: %d\n", revision);
return -ENOENT;
}
bus->sprom.revision = revision;
bcma_debug(bus, "Found SPROM revision %d\n", revision);
return 0;
}
/**************************************************
* SPROM extraction.
**************************************************/
#define SPOFF(offset) ((offset) / sizeof(u16))
#define SPEX(_field, _offset, _mask, _shift) \
bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
#define SPEX32(_field, _offset, _mask, _shift) \
bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
do { \
SPEX(_field[0], _offset + 0, _mask, _shift); \
SPEX(_field[1], _offset + 2, _mask, _shift); \
SPEX(_field[2], _offset + 4, _mask, _shift); \
SPEX(_field[3], _offset + 6, _mask, _shift); \
SPEX(_field[4], _offset + 8, _mask, _shift); \
SPEX(_field[5], _offset + 10, _mask, _shift); \
SPEX(_field[6], _offset + 12, _mask, _shift); \
SPEX(_field[7], _offset + 14, _mask, _shift); \
} while (0)
static s8 sprom_extract_antgain(const u16 *in, u16 offset, u16 mask, u16 shift)
{
u16 v;
u8 gain;
v = in[SPOFF(offset)];
gain = (v & mask) >> shift;
if (gain == 0xFF) {
gain = 8; /* If unset use 2dBm */
} else {
/* Q5.2 Fractional part is stored in 0xC0 */
gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2);
}
return (s8)gain;
}
static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
{
u16 v, o;
int i;
static const u16 pwr_info_offset[] = {
SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
};
BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
ARRAY_SIZE(bus->sprom.core_pwr_info));
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
}
SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0);
SPEX(board_type, SSB_SPROM1_SPID, ~0, 0);
SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0,
SSB_SPROM4_TXPID2G0_SHIFT);
SPEX(txpid2g[1], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G1,
SSB_SPROM4_TXPID2G1_SHIFT);
SPEX(txpid2g[2], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G2,
SSB_SPROM4_TXPID2G2_SHIFT);
SPEX(txpid2g[3], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G3,
SSB_SPROM4_TXPID2G3_SHIFT);
SPEX(txpid5gl[0], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL0,
SSB_SPROM4_TXPID5GL0_SHIFT);
SPEX(txpid5gl[1], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL1,
SSB_SPROM4_TXPID5GL1_SHIFT);
SPEX(txpid5gl[2], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL2,
SSB_SPROM4_TXPID5GL2_SHIFT);
SPEX(txpid5gl[3], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL3,
SSB_SPROM4_TXPID5GL3_SHIFT);
SPEX(txpid5g[0], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G0,
SSB_SPROM4_TXPID5G0_SHIFT);
SPEX(txpid5g[1], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G1,
SSB_SPROM4_TXPID5G1_SHIFT);
SPEX(txpid5g[2], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G2,
SSB_SPROM4_TXPID5G2_SHIFT);
SPEX(txpid5g[3], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G3,
SSB_SPROM4_TXPID5G3_SHIFT);
SPEX(txpid5gh[0], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH0,
SSB_SPROM4_TXPID5GH0_SHIFT);
SPEX(txpid5gh[1], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH1,
SSB_SPROM4_TXPID5GH1_SHIFT);
SPEX(txpid5gh[2], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH2,
SSB_SPROM4_TXPID5GH2_SHIFT);
SPEX(txpid5gh[3], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH3,
SSB_SPROM4_TXPID5GH3_SHIFT);
SPEX(boardflags_lo, SSB_SPROM8_BFLLO, ~0, 0);
SPEX(boardflags_hi, SSB_SPROM8_BFLHI, ~0, 0);
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
/* Extract core's power info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
o = pwr_info_offset[i];
SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT);
SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
SSB_SPROM8_2G_MAXP, 0);
SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0);
SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT);
SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
SSB_SPROM8_5G_MAXP, 0);
SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP,
SSB_SPROM8_5GH_MAXP, 0);
SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP,
SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT);
SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0);
}
SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TSSIPOS,
SSB_SROM8_FEM_TSSIPOS_SHIFT);
SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_EXTPA_GAIN,
SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_PDET_RANGE,
SSB_SROM8_FEM_PDET_RANGE_SHIFT);
SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TR_ISO,
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TSSIPOS,
SSB_SROM8_FEM_TSSIPOS_SHIFT);
SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_EXTPA_GAIN,
SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_PDET_RANGE,
SSB_SROM8_FEM_PDET_RANGE_SHIFT);
SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TR_ISO,
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
SSB_SPROM8_ANTAVAIL_A_SHIFT);
SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
SSB_SPROM8_ANTAVAIL_BG_SHIFT);
SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
SSB_SPROM8_ITSSI_BG_SHIFT);
SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
SSB_SPROM8_ITSSI_A_SHIFT);
SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
SSB_SPROM8_MAXP_AL_SHIFT);
SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
SSB_SPROM8_GPIOA_P1_SHIFT);
SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
SSB_SPROM8_GPIOB_P3_SHIFT);
SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
SSB_SPROM8_TRI5G_SHIFT);
SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
SSB_SPROM8_TRI5GH_SHIFT);
SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
SSB_SPROM8_RXPO2G_SHIFT);
SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
SSB_SPROM8_RXPO5G_SHIFT);
SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
SSB_SPROM8_RSSISMC2G_SHIFT);
SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
SSB_SPROM8_RSSISAV2G_SHIFT);
SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
SSB_SPROM8_BXA2G_SHIFT);
SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
SSB_SPROM8_RSSISMC5G_SHIFT);
SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
SSB_SPROM8_RSSISAV5G_SHIFT);
SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
SSB_SPROM8_BXA5G_SHIFT);
SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
/* Extract the antenna gain values. */
bus->sprom.antenna_gain.a0 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN0,
SSB_SPROM8_AGAIN0_SHIFT);
bus->sprom.antenna_gain.a1 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN1,
SSB_SPROM8_AGAIN1_SHIFT);
bus->sprom.antenna_gain.a2 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN2,
SSB_SPROM8_AGAIN2_SHIFT);
bus->sprom.antenna_gain.a3 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN3,
SSB_SPROM8_AGAIN3_SHIFT);
SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
SSB_SPROM8_LEDDC_ON_SHIFT);
SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
SSB_SPROM8_LEDDC_OFF_SHIFT);
SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
SSB_SPROM8_TXRXC_SWITCH_SHIFT);
SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
SSB_SPROM8_THERMAL_TRESH_SHIFT);
SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
SSB_SPROM8_THERMAL_OFFSET_SHIFT);
SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
SSB_SPROM8_TEMPDELTA_PHYCAL,
SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
SSB_SPROM8_TEMPDELTA_HYSTERESIS,
SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
}
/*
* Indicates the presence of external SPROM.
*/
static bool bcma_sprom_ext_available(struct bcma_bus *bus)
{
u32 chip_status;
u32 srom_control;
u32 present_mask;
if (bus->drv_cc.core->id.rev >= 31) {
if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
return false;
srom_control = bcma_read32(bus->drv_cc.core,
BCMA_CC_SROM_CONTROL);
return srom_control & BCMA_CC_SROM_CONTROL_PRESENT;
}
/* older chipcommon revisions use chip status register */
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
break;
case BCMA_CHIP_ID_BCM4331:
present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
break;
default:
return true;
}
return chip_status & present_mask;
}
/*
* Indicates that on-chip OTP memory is present and enabled.
*/
static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
{
u32 chip_status;
u32 otpsize = 0;
bool present;
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
break;
case BCMA_CHIP_ID_BCM4331:
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
break;
case BCMA_CHIP_ID_BCM43142:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
/* for these chips OTP is always available */
present = true;
break;
case BCMA_CHIP_ID_BCM43131:
case BCMA_CHIP_ID_BCM43217:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
break;
default:
present = false;
break;
}
if (present) {
otpsize = bus->drv_cc.capabilities & BCMA_CC_CAP_OTPS;
otpsize >>= BCMA_CC_CAP_OTPS_SHIFT;
}
return otpsize != 0;
}
/*
* Verify OTP is filled and determine the byte
* offset where SPROM data is located.
*
* On error, returns 0; byte offset otherwise.
*/
static int bcma_sprom_onchip_offset(struct bcma_bus *bus)
{
struct bcma_device *cc = bus->drv_cc.core;
u32 offset;
/* verify OTP status */
if ((bcma_read32(cc, BCMA_CC_OTPS) & BCMA_CC_OTPS_GU_PROG_HW) == 0)
return 0;
/* obtain bit offset from otplayout register */
offset = (bcma_read32(cc, BCMA_CC_OTPL) & BCMA_CC_OTPL_GURGN_OFFSET);
return BCMA_CC_SPROM + (offset >> 3);
}
int bcma_sprom_get(struct bcma_bus *bus)
{
u16 offset = BCMA_CC_SPROM;
u16 *sprom;
static const size_t sprom_sizes[] = {
SSB_SPROMSIZE_WORDS_R4,
SSB_SPROMSIZE_WORDS_R10,
SSB_SPROMSIZE_WORDS_R11,
};
int i, err = 0;
if (!bus->drv_cc.core)
return -EOPNOTSUPP;
if (!bcma_sprom_ext_available(bus)) {
bool sprom_onchip;
/*
* External SPROM takes precedence so check
* on-chip OTP only when no external SPROM
* is present.
*/
sprom_onchip = bcma_sprom_onchip_available(bus);
if (sprom_onchip) {
/* determine offset */
offset = bcma_sprom_onchip_offset(bus);
}
if (!offset || !sprom_onchip) {
/*
* Maybe there is no SPROM on the device?
* Now we ask the arch code if there is some sprom
* available for this device in some other storage.
*/
err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
return err;
}
}
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
bcma_debug(bus, "SPROM offset 0x%x\n", offset);
for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) {
size_t words = sprom_sizes[i];
sprom = kcalloc(words, sizeof(u16), GFP_KERNEL);
if (!sprom)
return -ENOMEM;
bcma_sprom_read(bus, offset, sprom, words);
err = bcma_sprom_valid(bus, sprom, words);
if (!err)
break;
kfree(sprom);
}
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
if (err) {
bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n");
err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
} else {
bcma_sprom_extract_r8(bus, sprom);
kfree(sprom);
}
return err;
}
|
#include "juno.dts"
#include "juno-scmi.dtsi"
&A57_0 {
clocks = <&scmi_dvfs 0>;
};
&A57_1 {
clocks = <&scmi_dvfs 0>;
};
|
/*
* linux/fs/nls/nls_base.c
*
* Native language support--charsets and unicode translations.
* By Gordon Chaffee 1996, 1997
*
* Unicode based case conversion 1999 by Wolfram Pienkoss
*
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
static struct nls_table default_table;
static struct nls_table *tables = &default_table;
static DEFINE_SPINLOCK(nls_lock);
/*
* Sample implementation from Unicode home page.
* http://www.stonehand.com/unicode/standard/fss-utf.html
*/
struct utf8_table {
int cmask;
int cval;
int shift;
long lmask;
long lval;
};
static const struct utf8_table utf8_table[] =
{
{0x80, 0x00, 0*6, 0x7F, 0, /* 1 byte sequence */},
{0xE0, 0xC0, 1*6, 0x7FF, 0x80, /* 2 byte sequence */},
{0xF0, 0xE0, 2*6, 0xFFFF, 0x800, /* 3 byte sequence */},
{0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */},
{0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */},
{0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */},
{0, /* end of table */}
};
#define UNICODE_MAX 0x0010ffff
#define PLANE_SIZE 0x00010000
#define SURROGATE_MASK 0xfffff800
#define SURROGATE_PAIR 0x0000d800
#define SURROGATE_LOW 0x00000400
#define SURROGATE_BITS 0x000003ff
int utf8_to_utf32(const u8 *s, int inlen, unicode_t *pu)
{
unsigned long l;
int c0, c, nc;
const struct utf8_table *t;
nc = 0;
c0 = *s;
l = c0;
for (t = utf8_table; t->cmask; t++) {
nc++;
if ((c0 & t->cmask) == t->cval) {
l &= t->lmask;
if (l < t->lval || l > UNICODE_MAX ||
(l & SURROGATE_MASK) == SURROGATE_PAIR)
return -1;
*pu = (unicode_t) l;
return nc;
}
if (inlen <= nc)
return -1;
s++;
c = (*s ^ 0x80) & 0xFF;
if (c & 0xC0)
return -1;
l = (l << 6) | c;
}
return -1;
}
EXPORT_SYMBOL(utf8_to_utf32);
int utf32_to_utf8(unicode_t u, u8 *s, int maxout)
{
unsigned long l;
int c, nc;
const struct utf8_table *t;
if (!s)
return 0;
l = u;
if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR)
return -1;
nc = 0;
for (t = utf8_table; t->cmask && maxout; t++, maxout--) {
nc++;
if (l <= t->lmask) {
c = t->shift;
*s = (u8) (t->cval | (l >> c));
while (c > 0) {
c -= 6;
s++;
*s = (u8) (0x80 | ((l >> c) & 0x3F));
}
return nc;
}
}
return -1;
}
EXPORT_SYMBOL(utf32_to_utf8);
static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian)
{
switch (endian) {
default:
*s = (wchar_t) c;
break;
case UTF16_LITTLE_ENDIAN:
*s = __cpu_to_le16(c);
break;
case UTF16_BIG_ENDIAN:
*s = __cpu_to_be16(c);
break;
}
}
int utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
wchar_t *pwcs, int maxout)
{
u16 *op;
int size;
unicode_t u;
op = pwcs;
while (inlen > 0 && maxout > 0 && *s) {
if (*s & 0x80) {
size = utf8_to_utf32(s, inlen, &u);
if (size < 0)
return -EINVAL;
s += size;
inlen -= size;
if (u >= PLANE_SIZE) {
if (maxout < 2)
break;
u -= PLANE_SIZE;
put_utf16(op++, SURROGATE_PAIR |
((u >> 10) & SURROGATE_BITS),
endian);
put_utf16(op++, SURROGATE_PAIR |
SURROGATE_LOW |
(u & SURROGATE_BITS),
endian);
maxout -= 2;
} else {
put_utf16(op++, u, endian);
maxout--;
}
} else {
put_utf16(op++, *s++, endian);
inlen--;
maxout--;
}
}
return op - pwcs;
}
EXPORT_SYMBOL(utf8s_to_utf16s);
static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian)
{
switch (endian) {
default:
return c;
case UTF16_LITTLE_ENDIAN:
return __le16_to_cpu(c);
case UTF16_BIG_ENDIAN:
return __be16_to_cpu(c);
}
}
int utf16s_to_utf8s(const wchar_t *pwcs, int inlen, enum utf16_endian endian,
u8 *s, int maxout)
{
u8 *op;
int size;
unsigned long u, v;
op = s;
while (inlen > 0 && maxout > 0) {
u = get_utf16(*pwcs, endian);
if (!u)
break;
pwcs++;
inlen--;
if (u > 0x7f) {
if ((u & SURROGATE_MASK) == SURROGATE_PAIR) {
if (u & SURROGATE_LOW) {
/* Ignore character and move on */
continue;
}
if (inlen <= 0)
break;
v = get_utf16(*pwcs, endian);
if ((v & SURROGATE_MASK) != SURROGATE_PAIR ||
!(v & SURROGATE_LOW)) {
/* Ignore character and move on */
continue;
}
u = PLANE_SIZE + ((u & SURROGATE_BITS) << 10)
+ (v & SURROGATE_BITS);
pwcs++;
inlen--;
}
size = utf32_to_utf8(u, op, maxout);
if (size == -1) {
/* Ignore character and move on */
} else {
op += size;
maxout -= size;
}
} else {
*op++ = (u8) u;
maxout--;
}
}
return op - s;
}
EXPORT_SYMBOL(utf16s_to_utf8s);
int __register_nls(struct nls_table *nls, struct module *owner)
{
struct nls_table ** tmp = &tables;
if (nls->next)
return -EBUSY;
nls->owner = owner;
spin_lock(&nls_lock);
while (*tmp) {
if (nls == *tmp) {
spin_unlock(&nls_lock);
return -EBUSY;
}
tmp = &(*tmp)->next;
}
nls->next = tables;
tables = nls;
spin_unlock(&nls_lock);
return 0;
}
EXPORT_SYMBOL(__register_nls);
int unregister_nls(struct nls_table * nls)
{
struct nls_table ** tmp = &tables;
spin_lock(&nls_lock);
while (*tmp) {
if (nls == *tmp) {
*tmp = nls->next;
spin_unlock(&nls_lock);
return 0;
}
tmp = &(*tmp)->next;
}
spin_unlock(&nls_lock);
return -EINVAL;
}
static struct nls_table *find_nls(const char *charset)
{
struct nls_table *nls;
spin_lock(&nls_lock);
for (nls = tables; nls; nls = nls->next) {
if (!strcmp(nls->charset, charset))
break;
if (nls->alias && !strcmp(nls->alias, charset))
break;
}
if (nls && !try_module_get(nls->owner))
nls = NULL;
spin_unlock(&nls_lock);
return nls;
}
struct nls_table *load_nls(const char *charset)
{
return try_then_request_module(find_nls(charset), "nls_%s", charset);
}
void unload_nls(struct nls_table *nls)
{
if (nls)
module_put(nls->owner);
}
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x00a1, 0x00a2, 0x00a3,
0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab,
0x00ac, 0x00ad, 0x00ae, 0x00af,
/* 0xb0*/
0x00b0, 0x00b1, 0x00b2, 0x00b3,
0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb,
0x00bc, 0x00bd, 0x00be, 0x00bf,
/* 0xc0*/
0x00c0, 0x00c1, 0x00c2, 0x00c3,
0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb,
0x00cc, 0x00cd, 0x00ce, 0x00cf,
/* 0xd0*/
0x00d0, 0x00d1, 0x00d2, 0x00d3,
0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db,
0x00dc, 0x00dd, 0x00de, 0x00df,
/* 0xe0*/
0x00e0, 0x00e1, 0x00e2, 0x00e3,
0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb,
0x00ec, 0x00ed, 0x00ee, 0x00ef,
/* 0xf0*/
0x00f0, 0x00f1, 0x00f2, 0x00f3,
0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb,
0x00fc, 0x00fd, 0x00fe, 0x00ff,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char *const page_uni2charset[256] = {
page00
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table default_table = {
.charset = "default",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
};
/* Returns a simple default translation table */
struct nls_table *load_nls_default(void)
{
struct nls_table *default_nls;
default_nls = load_nls(CONFIG_NLS_DEFAULT);
if (default_nls != NULL)
return default_nls;
else
return &default_table;
}
EXPORT_SYMBOL(unregister_nls);
EXPORT_SYMBOL(unload_nls);
EXPORT_SYMBOL(load_nls);
EXPORT_SYMBOL(load_nls_default);
MODULE_DESCRIPTION("Base file system native language support");
MODULE_LICENSE("Dual BSD/GPL");
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* max98088.c -- MAX98088 ALSA SoC Audio driver
*
* Copyright 2010 Maxim Integrated Products
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <linux/slab.h>
#include <asm/div64.h>
#include <sound/max98088.h>
#include "max98088.h"
enum max98088_type {
MAX98088,
MAX98089,
};
struct max98088_cdata {
unsigned int rate;
unsigned int fmt;
int eq_sel;
};
struct max98088_priv {
struct regmap *regmap;
enum max98088_type devtype;
struct max98088_pdata *pdata;
struct clk *mclk;
unsigned char mclk_prescaler;
unsigned int sysclk;
struct max98088_cdata dai[2];
int eq_textcnt;
const char **eq_texts;
struct soc_enum eq_enum;
u8 ina_state;
u8 inb_state;
unsigned int ex_mode;
unsigned int digmic;
unsigned int mic1pre;
unsigned int mic2pre;
unsigned int extmic_mode;
};
static const struct reg_default max98088_reg[] = {
{ 0xf, 0x00 }, /* 0F interrupt enable */
{ 0x10, 0x00 }, /* 10 master clock */
{ 0x11, 0x00 }, /* 11 DAI1 clock mode */
{ 0x12, 0x00 }, /* 12 DAI1 clock control */
{ 0x13, 0x00 }, /* 13 DAI1 clock control */
{ 0x14, 0x00 }, /* 14 DAI1 format */
{ 0x15, 0x00 }, /* 15 DAI1 clock */
{ 0x16, 0x00 }, /* 16 DAI1 config */
{ 0x17, 0x00 }, /* 17 DAI1 TDM */
{ 0x18, 0x00 }, /* 18 DAI1 filters */
{ 0x19, 0x00 }, /* 19 DAI2 clock mode */
{ 0x1a, 0x00 }, /* 1A DAI2 clock control */
{ 0x1b, 0x00 }, /* 1B DAI2 clock control */
{ 0x1c, 0x00 }, /* 1C DAI2 format */
{ 0x1d, 0x00 }, /* 1D DAI2 clock */
{ 0x1e, 0x00 }, /* 1E DAI2 config */
{ 0x1f, 0x00 }, /* 1F DAI2 TDM */
{ 0x20, 0x00 }, /* 20 DAI2 filters */
{ 0x21, 0x00 }, /* 21 data config */
{ 0x22, 0x00 }, /* 22 DAC mixer */
{ 0x23, 0x00 }, /* 23 left ADC mixer */
{ 0x24, 0x00 }, /* 24 right ADC mixer */
{ 0x25, 0x00 }, /* 25 left HP mixer */
{ 0x26, 0x00 }, /* 26 right HP mixer */
{ 0x27, 0x00 }, /* 27 HP control */
{ 0x28, 0x00 }, /* 28 left REC mixer */
{ 0x29, 0x00 }, /* 29 right REC mixer */
{ 0x2a, 0x00 }, /* 2A REC control */
{ 0x2b, 0x00 }, /* 2B left SPK mixer */
{ 0x2c, 0x00 }, /* 2C right SPK mixer */
{ 0x2d, 0x00 }, /* 2D SPK control */
{ 0x2e, 0x00 }, /* 2E sidetone */
{ 0x2f, 0x00 }, /* 2F DAI1 playback level */
{ 0x30, 0x00 }, /* 30 DAI1 playback level */
{ 0x31, 0x00 }, /* 31 DAI2 playback level */
{ 0x32, 0x00 }, /* 32 DAI2 playbakc level */
{ 0x33, 0x00 }, /* 33 left ADC level */
{ 0x34, 0x00 }, /* 34 right ADC level */
{ 0x35, 0x00 }, /* 35 MIC1 level */
{ 0x36, 0x00 }, /* 36 MIC2 level */
{ 0x37, 0x00 }, /* 37 INA level */
{ 0x38, 0x00 }, /* 38 INB level */
{ 0x39, 0x00 }, /* 39 left HP volume */
{ 0x3a, 0x00 }, /* 3A right HP volume */
{ 0x3b, 0x00 }, /* 3B left REC volume */
{ 0x3c, 0x00 }, /* 3C right REC volume */
{ 0x3d, 0x00 }, /* 3D left SPK volume */
{ 0x3e, 0x00 }, /* 3E right SPK volume */
{ 0x3f, 0x00 }, /* 3F MIC config */
{ 0x40, 0x00 }, /* 40 MIC threshold */
{ 0x41, 0x00 }, /* 41 excursion limiter filter */
{ 0x42, 0x00 }, /* 42 excursion limiter threshold */
{ 0x43, 0x00 }, /* 43 ALC */
{ 0x44, 0x00 }, /* 44 power limiter threshold */
{ 0x45, 0x00 }, /* 45 power limiter config */
{ 0x46, 0x00 }, /* 46 distortion limiter config */
{ 0x47, 0x00 }, /* 47 audio input */
{ 0x48, 0x00 }, /* 48 microphone */
{ 0x49, 0x00 }, /* 49 level control */
{ 0x4a, 0x00 }, /* 4A bypass switches */
{ 0x4b, 0x00 }, /* 4B jack detect */
{ 0x4c, 0x00 }, /* 4C input enable */
{ 0x4d, 0x00 }, /* 4D output enable */
{ 0x4e, 0xF0 }, /* 4E bias control */
{ 0x4f, 0x00 }, /* 4F DAC power */
{ 0x50, 0x0F }, /* 50 DAC power */
{ 0x51, 0x00 }, /* 51 system */
{ 0x52, 0x00 }, /* 52 DAI1 EQ1 */
{ 0x53, 0x00 }, /* 53 DAI1 EQ1 */
{ 0x54, 0x00 }, /* 54 DAI1 EQ1 */
{ 0x55, 0x00 }, /* 55 DAI1 EQ1 */
{ 0x56, 0x00 }, /* 56 DAI1 EQ1 */
{ 0x57, 0x00 }, /* 57 DAI1 EQ1 */
{ 0x58, 0x00 }, /* 58 DAI1 EQ1 */
{ 0x59, 0x00 }, /* 59 DAI1 EQ1 */
{ 0x5a, 0x00 }, /* 5A DAI1 EQ1 */
{ 0x5b, 0x00 }, /* 5B DAI1 EQ1 */
{ 0x5c, 0x00 }, /* 5C DAI1 EQ2 */
{ 0x5d, 0x00 }, /* 5D DAI1 EQ2 */
{ 0x5e, 0x00 }, /* 5E DAI1 EQ2 */
{ 0x5f, 0x00 }, /* 5F DAI1 EQ2 */
{ 0x60, 0x00 }, /* 60 DAI1 EQ2 */
{ 0x61, 0x00 }, /* 61 DAI1 EQ2 */
{ 0x62, 0x00 }, /* 62 DAI1 EQ2 */
{ 0x63, 0x00 }, /* 63 DAI1 EQ2 */
{ 0x64, 0x00 }, /* 64 DAI1 EQ2 */
{ 0x65, 0x00 }, /* 65 DAI1 EQ2 */
{ 0x66, 0x00 }, /* 66 DAI1 EQ3 */
{ 0x67, 0x00 }, /* 67 DAI1 EQ3 */
{ 0x68, 0x00 }, /* 68 DAI1 EQ3 */
{ 0x69, 0x00 }, /* 69 DAI1 EQ3 */
{ 0x6a, 0x00 }, /* 6A DAI1 EQ3 */
{ 0x6b, 0x00 }, /* 6B DAI1 EQ3 */
{ 0x6c, 0x00 }, /* 6C DAI1 EQ3 */
{ 0x6d, 0x00 }, /* 6D DAI1 EQ3 */
{ 0x6e, 0x00 }, /* 6E DAI1 EQ3 */
{ 0x6f, 0x00 }, /* 6F DAI1 EQ3 */
{ 0x70, 0x00 }, /* 70 DAI1 EQ4 */
{ 0x71, 0x00 }, /* 71 DAI1 EQ4 */
{ 0x72, 0x00 }, /* 72 DAI1 EQ4 */
{ 0x73, 0x00 }, /* 73 DAI1 EQ4 */
{ 0x74, 0x00 }, /* 74 DAI1 EQ4 */
{ 0x75, 0x00 }, /* 75 DAI1 EQ4 */
{ 0x76, 0x00 }, /* 76 DAI1 EQ4 */
{ 0x77, 0x00 }, /* 77 DAI1 EQ4 */
{ 0x78, 0x00 }, /* 78 DAI1 EQ4 */
{ 0x79, 0x00 }, /* 79 DAI1 EQ4 */
{ 0x7a, 0x00 }, /* 7A DAI1 EQ5 */
{ 0x7b, 0x00 }, /* 7B DAI1 EQ5 */
{ 0x7c, 0x00 }, /* 7C DAI1 EQ5 */
{ 0x7d, 0x00 }, /* 7D DAI1 EQ5 */
{ 0x7e, 0x00 }, /* 7E DAI1 EQ5 */
{ 0x7f, 0x00 }, /* 7F DAI1 EQ5 */
{ 0x80, 0x00 }, /* 80 DAI1 EQ5 */
{ 0x81, 0x00 }, /* 81 DAI1 EQ5 */
{ 0x82, 0x00 }, /* 82 DAI1 EQ5 */
{ 0x83, 0x00 }, /* 83 DAI1 EQ5 */
{ 0x84, 0x00 }, /* 84 DAI2 EQ1 */
{ 0x85, 0x00 }, /* 85 DAI2 EQ1 */
{ 0x86, 0x00 }, /* 86 DAI2 EQ1 */
{ 0x87, 0x00 }, /* 87 DAI2 EQ1 */
{ 0x88, 0x00 }, /* 88 DAI2 EQ1 */
{ 0x89, 0x00 }, /* 89 DAI2 EQ1 */
{ 0x8a, 0x00 }, /* 8A DAI2 EQ1 */
{ 0x8b, 0x00 }, /* 8B DAI2 EQ1 */
{ 0x8c, 0x00 }, /* 8C DAI2 EQ1 */
{ 0x8d, 0x00 }, /* 8D DAI2 EQ1 */
{ 0x8e, 0x00 }, /* 8E DAI2 EQ2 */
{ 0x8f, 0x00 }, /* 8F DAI2 EQ2 */
{ 0x90, 0x00 }, /* 90 DAI2 EQ2 */
{ 0x91, 0x00 }, /* 91 DAI2 EQ2 */
{ 0x92, 0x00 }, /* 92 DAI2 EQ2 */
{ 0x93, 0x00 }, /* 93 DAI2 EQ2 */
{ 0x94, 0x00 }, /* 94 DAI2 EQ2 */
{ 0x95, 0x00 }, /* 95 DAI2 EQ2 */
{ 0x96, 0x00 }, /* 96 DAI2 EQ2 */
{ 0x97, 0x00 }, /* 97 DAI2 EQ2 */
{ 0x98, 0x00 }, /* 98 DAI2 EQ3 */
{ 0x99, 0x00 }, /* 99 DAI2 EQ3 */
{ 0x9a, 0x00 }, /* 9A DAI2 EQ3 */
{ 0x9b, 0x00 }, /* 9B DAI2 EQ3 */
{ 0x9c, 0x00 }, /* 9C DAI2 EQ3 */
{ 0x9d, 0x00 }, /* 9D DAI2 EQ3 */
{ 0x9e, 0x00 }, /* 9E DAI2 EQ3 */
{ 0x9f, 0x00 }, /* 9F DAI2 EQ3 */
{ 0xa0, 0x00 }, /* A0 DAI2 EQ3 */
{ 0xa1, 0x00 }, /* A1 DAI2 EQ3 */
{ 0xa2, 0x00 }, /* A2 DAI2 EQ4 */
{ 0xa3, 0x00 }, /* A3 DAI2 EQ4 */
{ 0xa4, 0x00 }, /* A4 DAI2 EQ4 */
{ 0xa5, 0x00 }, /* A5 DAI2 EQ4 */
{ 0xa6, 0x00 }, /* A6 DAI2 EQ4 */
{ 0xa7, 0x00 }, /* A7 DAI2 EQ4 */
{ 0xa8, 0x00 }, /* A8 DAI2 EQ4 */
{ 0xa9, 0x00 }, /* A9 DAI2 EQ4 */
{ 0xaa, 0x00 }, /* AA DAI2 EQ4 */
{ 0xab, 0x00 }, /* AB DAI2 EQ4 */
{ 0xac, 0x00 }, /* AC DAI2 EQ5 */
{ 0xad, 0x00 }, /* AD DAI2 EQ5 */
{ 0xae, 0x00 }, /* AE DAI2 EQ5 */
{ 0xaf, 0x00 }, /* AF DAI2 EQ5 */
{ 0xb0, 0x00 }, /* B0 DAI2 EQ5 */
{ 0xb1, 0x00 }, /* B1 DAI2 EQ5 */
{ 0xb2, 0x00 }, /* B2 DAI2 EQ5 */
{ 0xb3, 0x00 }, /* B3 DAI2 EQ5 */
{ 0xb4, 0x00 }, /* B4 DAI2 EQ5 */
{ 0xb5, 0x00 }, /* B5 DAI2 EQ5 */
{ 0xb6, 0x00 }, /* B6 DAI1 biquad */
{ 0xb7, 0x00 }, /* B7 DAI1 biquad */
{ 0xb8 ,0x00 }, /* B8 DAI1 biquad */
{ 0xb9, 0x00 }, /* B9 DAI1 biquad */
{ 0xba, 0x00 }, /* BA DAI1 biquad */
{ 0xbb, 0x00 }, /* BB DAI1 biquad */
{ 0xbc, 0x00 }, /* BC DAI1 biquad */
{ 0xbd, 0x00 }, /* BD DAI1 biquad */
{ 0xbe, 0x00 }, /* BE DAI1 biquad */
{ 0xbf, 0x00 }, /* BF DAI1 biquad */
{ 0xc0, 0x00 }, /* C0 DAI2 biquad */
{ 0xc1, 0x00 }, /* C1 DAI2 biquad */
{ 0xc2, 0x00 }, /* C2 DAI2 biquad */
{ 0xc3, 0x00 }, /* C3 DAI2 biquad */
{ 0xc4, 0x00 }, /* C4 DAI2 biquad */
{ 0xc5, 0x00 }, /* C5 DAI2 biquad */
{ 0xc6, 0x00 }, /* C6 DAI2 biquad */
{ 0xc7, 0x00 }, /* C7 DAI2 biquad */
{ 0xc8, 0x00 }, /* C8 DAI2 biquad */
{ 0xc9, 0x00 }, /* C9 DAI2 biquad */
};
static bool max98088_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case M98088_REG_00_IRQ_STATUS ... 0xC9:
case M98088_REG_FF_REV_ID:
return true;
default:
return false;
}
}
static bool max98088_writeable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
return true;
default:
return false;
}
}
static bool max98088_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
case M98088_REG_FF_REV_ID:
return true;
default:
return false;
}
}
static const struct regmap_config max98088_regmap = {
.reg_bits = 8,
.val_bits = 8,
.readable_reg = max98088_readable_register,
.writeable_reg = max98088_writeable_register,
.volatile_reg = max98088_volatile_register,
.max_register = 0xff,
.reg_defaults = max98088_reg,
.num_reg_defaults = ARRAY_SIZE(max98088_reg),
.cache_type = REGCACHE_RBTREE,
};
/*
* Load equalizer DSP coefficient configurations registers
*/
static void m98088_eq_band(struct snd_soc_component *component, unsigned int dai,
unsigned int band, u16 *coefs)
{
unsigned int eq_reg;
unsigned int i;
if (WARN_ON(band > 4) ||
WARN_ON(dai > 1))
return;
/* Load the base register address */
eq_reg = dai ? M98088_REG_84_DAI2_EQ_BASE : M98088_REG_52_DAI1_EQ_BASE;
/* Add the band address offset, note adjustment for word address */
eq_reg += band * (M98088_COEFS_PER_BAND << 1);
/* Step through the registers and coefs */
for (i = 0; i < M98088_COEFS_PER_BAND; i++) {
snd_soc_component_write(component, eq_reg++, M98088_BYTE1(coefs[i]));
snd_soc_component_write(component, eq_reg++, M98088_BYTE0(coefs[i]));
}
}
/*
* Excursion limiter modes
*/
static const char *max98088_exmode_texts[] = {
"Off", "100Hz", "400Hz", "600Hz", "800Hz", "1000Hz", "200-400Hz",
"400-600Hz", "400-800Hz",
};
static const unsigned int max98088_exmode_values[] = {
0x00, 0x43, 0x10, 0x20, 0x30, 0x40, 0x11, 0x22, 0x32
};
static SOC_VALUE_ENUM_SINGLE_DECL(max98088_exmode_enum,
M98088_REG_41_SPKDHP, 0, 127,
max98088_exmode_texts,
max98088_exmode_values);
static const char *max98088_ex_thresh[] = { /* volts PP */
"0.6", "1.2", "1.8", "2.4", "3.0", "3.6", "4.2", "4.8"};
static SOC_ENUM_SINGLE_DECL(max98088_ex_thresh_enum,
M98088_REG_42_SPKDHP_THRESH, 0,
max98088_ex_thresh);
static const char *max98088_fltr_mode[] = {"Voice", "Music" };
static SOC_ENUM_SINGLE_DECL(max98088_filter_mode_enum,
M98088_REG_18_DAI1_FILTERS, 7,
max98088_fltr_mode);
static const char *max98088_extmic_text[] = { "None", "MIC1", "MIC2" };
static SOC_ENUM_SINGLE_DECL(max98088_extmic_enum,
M98088_REG_48_CFG_MIC, 0,
max98088_extmic_text);
static const struct snd_kcontrol_new max98088_extmic_mux =
SOC_DAPM_ENUM("External MIC Mux", max98088_extmic_enum);
static const char *max98088_dai1_fltr[] = {
"Off", "fc=258/fs=16k", "fc=500/fs=16k",
"fc=258/fs=8k", "fc=500/fs=8k", "fc=200"};
static SOC_ENUM_SINGLE_DECL(max98088_dai1_dac_filter_enum,
M98088_REG_18_DAI1_FILTERS, 0,
max98088_dai1_fltr);
static SOC_ENUM_SINGLE_DECL(max98088_dai1_adc_filter_enum,
M98088_REG_18_DAI1_FILTERS, 4,
max98088_dai1_fltr);
static int max98088_mic1pre_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
unsigned int sel = ucontrol->value.integer.value[0];
max98088->mic1pre = sel;
snd_soc_component_update_bits(component, M98088_REG_35_LVL_MIC1, M98088_MICPRE_MASK,
(1+sel)<<M98088_MICPRE_SHIFT);
return 0;
}
static int max98088_mic1pre_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = max98088->mic1pre;
return 0;
}
static int max98088_mic2pre_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
unsigned int sel = ucontrol->value.integer.value[0];
max98088->mic2pre = sel;
snd_soc_component_update_bits(component, M98088_REG_36_LVL_MIC2, M98088_MICPRE_MASK,
(1+sel)<<M98088_MICPRE_SHIFT);
return 0;
}
static int max98088_mic2pre_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = max98088->mic2pre;
return 0;
}
static const DECLARE_TLV_DB_RANGE(max98088_micboost_tlv,
0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
);
static const DECLARE_TLV_DB_RANGE(max98088_hp_tlv,
0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
);
static const DECLARE_TLV_DB_RANGE(max98088_spk_tlv,
0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
);
static const struct snd_kcontrol_new max98088_snd_controls[] = {
SOC_DOUBLE_R_TLV("Headphone Volume", M98088_REG_39_LVL_HP_L,
M98088_REG_3A_LVL_HP_R, 0, 31, 0, max98088_hp_tlv),
SOC_DOUBLE_R_TLV("Speaker Volume", M98088_REG_3D_LVL_SPK_L,
M98088_REG_3E_LVL_SPK_R, 0, 31, 0, max98088_spk_tlv),
SOC_DOUBLE_R_TLV("Receiver Volume", M98088_REG_3B_LVL_REC_L,
M98088_REG_3C_LVL_REC_R, 0, 31, 0, max98088_spk_tlv),
SOC_DOUBLE_R("Headphone Switch", M98088_REG_39_LVL_HP_L,
M98088_REG_3A_LVL_HP_R, 7, 1, 1),
SOC_DOUBLE_R("Speaker Switch", M98088_REG_3D_LVL_SPK_L,
M98088_REG_3E_LVL_SPK_R, 7, 1, 1),
SOC_DOUBLE_R("Receiver Switch", M98088_REG_3B_LVL_REC_L,
M98088_REG_3C_LVL_REC_R, 7, 1, 1),
SOC_SINGLE("MIC1 Volume", M98088_REG_35_LVL_MIC1, 0, 31, 1),
SOC_SINGLE("MIC2 Volume", M98088_REG_36_LVL_MIC2, 0, 31, 1),
SOC_SINGLE_EXT_TLV("MIC1 Boost Volume",
M98088_REG_35_LVL_MIC1, 5, 2, 0,
max98088_mic1pre_get, max98088_mic1pre_set,
max98088_micboost_tlv),
SOC_SINGLE_EXT_TLV("MIC2 Boost Volume",
M98088_REG_36_LVL_MIC2, 5, 2, 0,
max98088_mic2pre_get, max98088_mic2pre_set,
max98088_micboost_tlv),
SOC_SINGLE("Noise Gate Threshold", M98088_REG_40_MICAGC_THRESH,
4, 15, 0),
SOC_SINGLE("INA Volume", M98088_REG_37_LVL_INA, 0, 7, 1),
SOC_SINGLE("INB Volume", M98088_REG_38_LVL_INB, 0, 7, 1),
SOC_SINGLE("DACL Volume", M98088_REG_2F_LVL_DAI1_PLAY, 0, 15, 1),
SOC_SINGLE("DACR Volume", M98088_REG_31_LVL_DAI2_PLAY, 0, 15, 1),
SOC_SINGLE("ADCL Volume", M98088_REG_33_LVL_ADC_L, 0, 15, 0),
SOC_SINGLE("ADCR Volume", M98088_REG_34_LVL_ADC_R, 0, 15, 0),
SOC_SINGLE("ADCL Boost Volume", M98088_REG_33_LVL_ADC_L, 4, 3, 0),
SOC_SINGLE("ADCR Boost Volume", M98088_REG_34_LVL_ADC_R, 4, 3, 0),
SOC_SINGLE("Left HP Output Mixer Switch", M98088_REG_27_MIX_HP_CNTL, 4, 1, 0),
SOC_SINGLE("Right HP Output Mixer Switch", M98088_REG_27_MIX_HP_CNTL, 5, 1, 0),
SOC_SINGLE("EQ1 Switch", M98088_REG_49_CFG_LEVEL, 0, 1, 0),
SOC_SINGLE("EQ2 Switch", M98088_REG_49_CFG_LEVEL, 1, 1, 0),
SOC_ENUM("EX Limiter Mode", max98088_exmode_enum),
SOC_ENUM("EX Limiter Threshold", max98088_ex_thresh_enum),
SOC_ENUM("DAI1 Filter Mode", max98088_filter_mode_enum),
SOC_ENUM("DAI1 DAC Filter", max98088_dai1_dac_filter_enum),
SOC_ENUM("DAI1 ADC Filter", max98088_dai1_adc_filter_enum),
SOC_SINGLE("DAI2 DC Block Switch", M98088_REG_20_DAI2_FILTERS,
0, 1, 0),
SOC_SINGLE("ALC Switch", M98088_REG_43_SPKALC_COMP, 7, 1, 0),
SOC_SINGLE("ALC Threshold", M98088_REG_43_SPKALC_COMP, 0, 7, 0),
SOC_SINGLE("ALC Multiband", M98088_REG_43_SPKALC_COMP, 3, 1, 0),
SOC_SINGLE("ALC Release Time", M98088_REG_43_SPKALC_COMP, 4, 7, 0),
SOC_SINGLE("PWR Limiter Threshold", M98088_REG_44_PWRLMT_CFG,
4, 15, 0),
SOC_SINGLE("PWR Limiter Weight", M98088_REG_44_PWRLMT_CFG, 0, 7, 0),
SOC_SINGLE("PWR Limiter Time1", M98088_REG_45_PWRLMT_TIME, 0, 15, 0),
SOC_SINGLE("PWR Limiter Time2", M98088_REG_45_PWRLMT_TIME, 4, 15, 0),
SOC_SINGLE("THD Limiter Threshold", M98088_REG_46_THDLMT_CFG, 4, 15, 0),
SOC_SINGLE("THD Limiter Time", M98088_REG_46_THDLMT_CFG, 0, 7, 0),
};
/* Left speaker mixer switch */
static const struct snd_kcontrol_new max98088_left_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_2B_MIX_SPK_LEFT, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_2B_MIX_SPK_LEFT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 4, 1, 0),
};
/* Right speaker mixer switch */
static const struct snd_kcontrol_new max98088_right_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_2C_MIX_SPK_RIGHT, 7, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_2C_MIX_SPK_RIGHT, 0, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 4, 1, 0),
};
/* Left headphone mixer switch */
static const struct snd_kcontrol_new max98088_left_hp_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_25_MIX_HP_LEFT, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_25_MIX_HP_LEFT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_25_MIX_HP_LEFT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_25_MIX_HP_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_25_MIX_HP_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_25_MIX_HP_LEFT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_25_MIX_HP_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_25_MIX_HP_LEFT, 4, 1, 0),
};
/* Right headphone mixer switch */
static const struct snd_kcontrol_new max98088_right_hp_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_26_MIX_HP_RIGHT, 7, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_26_MIX_HP_RIGHT, 0, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_26_MIX_HP_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_26_MIX_HP_RIGHT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_26_MIX_HP_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_26_MIX_HP_RIGHT, 4, 1, 0),
};
/* Left earpiece/receiver mixer switch */
static const struct snd_kcontrol_new max98088_left_rec_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_28_MIX_REC_LEFT, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_28_MIX_REC_LEFT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_28_MIX_REC_LEFT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_28_MIX_REC_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_28_MIX_REC_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_28_MIX_REC_LEFT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_28_MIX_REC_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_28_MIX_REC_LEFT, 4, 1, 0),
};
/* Right earpiece/receiver mixer switch */
static const struct snd_kcontrol_new max98088_right_rec_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC Switch", M98088_REG_29_MIX_REC_RIGHT, 7, 1, 0),
SOC_DAPM_SINGLE("Right DAC Switch", M98088_REG_29_MIX_REC_RIGHT, 0, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_29_MIX_REC_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_29_MIX_REC_RIGHT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_29_MIX_REC_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_29_MIX_REC_RIGHT, 4, 1, 0),
};
/* Left ADC mixer switch */
static const struct snd_kcontrol_new max98088_left_ADC_mixer_controls[] = {
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_23_MIX_ADC_LEFT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_23_MIX_ADC_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_23_MIX_ADC_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_23_MIX_ADC_LEFT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_23_MIX_ADC_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_23_MIX_ADC_LEFT, 0, 1, 0),
};
/* Right ADC mixer switch */
static const struct snd_kcontrol_new max98088_right_ADC_mixer_controls[] = {
SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 2, 1, 0),
SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 0, 1, 0),
};
static int max98088_mic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
if (w->reg == M98088_REG_35_LVL_MIC1) {
snd_soc_component_update_bits(component, w->reg, M98088_MICPRE_MASK,
(1+max98088->mic1pre)<<M98088_MICPRE_SHIFT);
} else {
snd_soc_component_update_bits(component, w->reg, M98088_MICPRE_MASK,
(1+max98088->mic2pre)<<M98088_MICPRE_SHIFT);
}
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(component, w->reg, M98088_MICPRE_MASK, 0);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* The line inputs are 2-channel stereo inputs with the left
* and right channels sharing a common PGA power control signal.
*/
static int max98088_line_pga(struct snd_soc_dapm_widget *w,
int event, int line, u8 channel)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
u8 *state;
if (WARN_ON(!(channel == 1 || channel == 2)))
return -EINVAL;
switch (line) {
case LINE_INA:
state = &max98088->ina_state;
break;
case LINE_INB:
state = &max98088->inb_state;
break;
default:
return -EINVAL;
}
switch (event) {
case SND_SOC_DAPM_POST_PMU:
*state |= channel;
snd_soc_component_update_bits(component, w->reg,
(1 << w->shift), (1 << w->shift));
break;
case SND_SOC_DAPM_POST_PMD:
*state &= ~channel;
if (*state == 0) {
snd_soc_component_update_bits(component, w->reg,
(1 << w->shift), 0);
}
break;
default:
return -EINVAL;
}
return 0;
}
static int max98088_pga_ina1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98088_line_pga(w, event, LINE_INA, 1);
}
static int max98088_pga_ina2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98088_line_pga(w, event, LINE_INA, 2);
}
static int max98088_pga_inb1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98088_line_pga(w, event, LINE_INB, 1);
}
static int max98088_pga_inb2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98088_line_pga(w, event, LINE_INB, 2);
}
static const struct snd_soc_dapm_widget max98088_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 1, 0),
SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 0, 0),
SND_SOC_DAPM_DAC("DACL", "HiFi Playback",
M98088_REG_4D_PWR_EN_OUT, 1, 0),
SND_SOC_DAPM_DAC("DACR", "HiFi Playback",
M98088_REG_4D_PWR_EN_OUT, 0, 0),
SND_SOC_DAPM_PGA("HP Left Out", M98088_REG_4D_PWR_EN_OUT,
7, 0, NULL, 0),
SND_SOC_DAPM_PGA("HP Right Out", M98088_REG_4D_PWR_EN_OUT,
6, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPK Left Out", M98088_REG_4D_PWR_EN_OUT,
5, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPK Right Out", M98088_REG_4D_PWR_EN_OUT,
4, 0, NULL, 0),
SND_SOC_DAPM_PGA("REC Left Out", M98088_REG_4D_PWR_EN_OUT,
3, 0, NULL, 0),
SND_SOC_DAPM_PGA("REC Right Out", M98088_REG_4D_PWR_EN_OUT,
2, 0, NULL, 0),
SND_SOC_DAPM_MUX("External MIC", SND_SOC_NOPM, 0, 0,
&max98088_extmic_mux),
SND_SOC_DAPM_MIXER("Left HP Mixer", SND_SOC_NOPM, 0, 0,
&max98088_left_hp_mixer_controls[0],
ARRAY_SIZE(max98088_left_hp_mixer_controls)),
SND_SOC_DAPM_MIXER("Right HP Mixer", SND_SOC_NOPM, 0, 0,
&max98088_right_hp_mixer_controls[0],
ARRAY_SIZE(max98088_right_hp_mixer_controls)),
SND_SOC_DAPM_MIXER("Left SPK Mixer", SND_SOC_NOPM, 0, 0,
&max98088_left_speaker_mixer_controls[0],
ARRAY_SIZE(max98088_left_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Right SPK Mixer", SND_SOC_NOPM, 0, 0,
&max98088_right_speaker_mixer_controls[0],
ARRAY_SIZE(max98088_right_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Left REC Mixer", SND_SOC_NOPM, 0, 0,
&max98088_left_rec_mixer_controls[0],
ARRAY_SIZE(max98088_left_rec_mixer_controls)),
SND_SOC_DAPM_MIXER("Right REC Mixer", SND_SOC_NOPM, 0, 0,
&max98088_right_rec_mixer_controls[0],
ARRAY_SIZE(max98088_right_rec_mixer_controls)),
SND_SOC_DAPM_MIXER("Left ADC Mixer", SND_SOC_NOPM, 0, 0,
&max98088_left_ADC_mixer_controls[0],
ARRAY_SIZE(max98088_left_ADC_mixer_controls)),
SND_SOC_DAPM_MIXER("Right ADC Mixer", SND_SOC_NOPM, 0, 0,
&max98088_right_ADC_mixer_controls[0],
ARRAY_SIZE(max98088_right_ADC_mixer_controls)),
SND_SOC_DAPM_PGA_E("MIC1 Input", M98088_REG_35_LVL_MIC1,
5, 0, NULL, 0, max98088_mic_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("MIC2 Input", M98088_REG_36_LVL_MIC2,
5, 0, NULL, 0, max98088_mic_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("INA1 Input", M98088_REG_4C_PWR_EN_IN,
7, 0, NULL, 0, max98088_pga_ina1_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("INA2 Input", M98088_REG_4C_PWR_EN_IN,
7, 0, NULL, 0, max98088_pga_ina2_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("INB1 Input", M98088_REG_4C_PWR_EN_IN,
6, 0, NULL, 0, max98088_pga_inb1_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("INB2 Input", M98088_REG_4C_PWR_EN_IN,
6, 0, NULL, 0, max98088_pga_inb2_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS("MICBIAS", M98088_REG_4C_PWR_EN_IN, 3, 0),
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
SND_SOC_DAPM_OUTPUT("SPKL"),
SND_SOC_DAPM_OUTPUT("SPKR"),
SND_SOC_DAPM_OUTPUT("RECL"),
SND_SOC_DAPM_OUTPUT("RECR"),
SND_SOC_DAPM_INPUT("MIC1"),
SND_SOC_DAPM_INPUT("MIC2"),
SND_SOC_DAPM_INPUT("INA1"),
SND_SOC_DAPM_INPUT("INA2"),
SND_SOC_DAPM_INPUT("INB1"),
SND_SOC_DAPM_INPUT("INB2"),
};
static const struct snd_soc_dapm_route max98088_audio_map[] = {
/* Left headphone output mixer */
{"Left HP Mixer", "Left DAC Switch", "DACL"},
{"Left HP Mixer", "Right DAC Switch", "DACR"},
{"Left HP Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left HP Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left HP Mixer", "INA1 Switch", "INA1 Input"},
{"Left HP Mixer", "INA2 Switch", "INA2 Input"},
{"Left HP Mixer", "INB1 Switch", "INB1 Input"},
{"Left HP Mixer", "INB2 Switch", "INB2 Input"},
/* Right headphone output mixer */
{"Right HP Mixer", "Left DAC Switch", "DACL"},
{"Right HP Mixer", "Right DAC Switch", "DACR"},
{"Right HP Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right HP Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right HP Mixer", "INA1 Switch", "INA1 Input"},
{"Right HP Mixer", "INA2 Switch", "INA2 Input"},
{"Right HP Mixer", "INB1 Switch", "INB1 Input"},
{"Right HP Mixer", "INB2 Switch", "INB2 Input"},
/* Left speaker output mixer */
{"Left SPK Mixer", "Left DAC Switch", "DACL"},
{"Left SPK Mixer", "Right DAC Switch", "DACR"},
{"Left SPK Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left SPK Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left SPK Mixer", "INA1 Switch", "INA1 Input"},
{"Left SPK Mixer", "INA2 Switch", "INA2 Input"},
{"Left SPK Mixer", "INB1 Switch", "INB1 Input"},
{"Left SPK Mixer", "INB2 Switch", "INB2 Input"},
/* Right speaker output mixer */
{"Right SPK Mixer", "Left DAC Switch", "DACL"},
{"Right SPK Mixer", "Right DAC Switch", "DACR"},
{"Right SPK Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right SPK Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right SPK Mixer", "INA1 Switch", "INA1 Input"},
{"Right SPK Mixer", "INA2 Switch", "INA2 Input"},
{"Right SPK Mixer", "INB1 Switch", "INB1 Input"},
{"Right SPK Mixer", "INB2 Switch", "INB2 Input"},
/* Earpiece/Receiver output mixer */
{"Left REC Mixer", "Left DAC Switch", "DACL"},
{"Left REC Mixer", "Right DAC Switch", "DACR"},
{"Left REC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left REC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left REC Mixer", "INA1 Switch", "INA1 Input"},
{"Left REC Mixer", "INA2 Switch", "INA2 Input"},
{"Left REC Mixer", "INB1 Switch", "INB1 Input"},
{"Left REC Mixer", "INB2 Switch", "INB2 Input"},
/* Earpiece/Receiver output mixer */
{"Right REC Mixer", "Left DAC Switch", "DACL"},
{"Right REC Mixer", "Right DAC Switch", "DACR"},
{"Right REC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right REC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right REC Mixer", "INA1 Switch", "INA1 Input"},
{"Right REC Mixer", "INA2 Switch", "INA2 Input"},
{"Right REC Mixer", "INB1 Switch", "INB1 Input"},
{"Right REC Mixer", "INB2 Switch", "INB2 Input"},
{"HP Left Out", NULL, "Left HP Mixer"},
{"HP Right Out", NULL, "Right HP Mixer"},
{"SPK Left Out", NULL, "Left SPK Mixer"},
{"SPK Right Out", NULL, "Right SPK Mixer"},
{"REC Left Out", NULL, "Left REC Mixer"},
{"REC Right Out", NULL, "Right REC Mixer"},
{"HPL", NULL, "HP Left Out"},
{"HPR", NULL, "HP Right Out"},
{"SPKL", NULL, "SPK Left Out"},
{"SPKR", NULL, "SPK Right Out"},
{"RECL", NULL, "REC Left Out"},
{"RECR", NULL, "REC Right Out"},
/* Left ADC input mixer */
{"Left ADC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left ADC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left ADC Mixer", "INA1 Switch", "INA1 Input"},
{"Left ADC Mixer", "INA2 Switch", "INA2 Input"},
{"Left ADC Mixer", "INB1 Switch", "INB1 Input"},
{"Left ADC Mixer", "INB2 Switch", "INB2 Input"},
/* Right ADC input mixer */
{"Right ADC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right ADC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right ADC Mixer", "INA1 Switch", "INA1 Input"},
{"Right ADC Mixer", "INA2 Switch", "INA2 Input"},
{"Right ADC Mixer", "INB1 Switch", "INB1 Input"},
{"Right ADC Mixer", "INB2 Switch", "INB2 Input"},
/* Inputs */
{"ADCL", NULL, "Left ADC Mixer"},
{"ADCR", NULL, "Right ADC Mixer"},
{"INA1 Input", NULL, "INA1"},
{"INA2 Input", NULL, "INA2"},
{"INB1 Input", NULL, "INB1"},
{"INB2 Input", NULL, "INB2"},
{"MIC1 Input", NULL, "MIC1"},
{"MIC2 Input", NULL, "MIC2"},
};
/* codec mclk clock divider coefficients */
static const struct {
u32 rate;
u8 sr;
} rate_table[] = {
{8000, 0x10},
{11025, 0x20},
{16000, 0x30},
{22050, 0x40},
{24000, 0x50},
{32000, 0x60},
{44100, 0x70},
{48000, 0x80},
{88200, 0x90},
{96000, 0xA0},
};
static inline int rate_value(int rate, u8 *value)
{
int i;
for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
if (rate_table[i].rate >= rate) {
*value = rate_table[i].sr;
return 0;
}
}
*value = rate_table[0].sr;
return -EINVAL;
}
static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_cdata *cdata;
unsigned long long ni;
unsigned int rate;
u8 regval;
cdata = &max98088->dai[0];
rate = params_rate(params);
switch (params_width(params)) {
case 16:
snd_soc_component_update_bits(component, M98088_REG_14_DAI1_FORMAT,
M98088_DAI_WS, 0);
break;
case 24:
snd_soc_component_update_bits(component, M98088_REG_14_DAI1_FORMAT,
M98088_DAI_WS, M98088_DAI_WS);
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0);
if (rate_value(rate, ®val))
return -EINVAL;
snd_soc_component_update_bits(component, M98088_REG_11_DAI1_CLKMODE,
M98088_CLKMODE_MASK, regval);
cdata->rate = rate;
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
& M98088_DAI_MAS) {
unsigned long pclk;
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
ni & 0xFF);
}
/* Update sample rate mode */
if (rate < 50000)
snd_soc_component_update_bits(component, M98088_REG_18_DAI1_FILTERS,
M98088_DAI_DHF, 0);
else
snd_soc_component_update_bits(component, M98088_REG_18_DAI1_FILTERS,
M98088_DAI_DHF, M98088_DAI_DHF);
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS, M98088_SHDNRUN,
M98088_SHDNRUN);
return 0;
}
static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_cdata *cdata;
unsigned long long ni;
unsigned int rate;
u8 regval;
cdata = &max98088->dai[1];
rate = params_rate(params);
switch (params_width(params)) {
case 16:
snd_soc_component_update_bits(component, M98088_REG_1C_DAI2_FORMAT,
M98088_DAI_WS, 0);
break;
case 24:
snd_soc_component_update_bits(component, M98088_REG_1C_DAI2_FORMAT,
M98088_DAI_WS, M98088_DAI_WS);
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0);
if (rate_value(rate, ®val))
return -EINVAL;
snd_soc_component_update_bits(component, M98088_REG_19_DAI2_CLKMODE,
M98088_CLKMODE_MASK, regval);
cdata->rate = rate;
/* Configure NI when operating as master */
if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
& M98088_DAI_MAS) {
unsigned long pclk;
if (max98088->sysclk == 0) {
dev_err(component->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
ni & 0xFF);
}
/* Update sample rate mode */
if (rate < 50000)
snd_soc_component_update_bits(component, M98088_REG_20_DAI2_FILTERS,
M98088_DAI_DHF, 0);
else
snd_soc_component_update_bits(component, M98088_REG_20_DAI2_FILTERS,
M98088_DAI_DHF, M98088_DAI_DHF);
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS, M98088_SHDNRUN,
M98088_SHDNRUN);
return 0;
}
static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_component *component = dai->component;
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
/* Requested clock frequency is already setup */
if (freq == max98088->sysclk)
return 0;
if (!IS_ERR(max98088->mclk)) {
freq = clk_round_rate(max98088->mclk, freq);
clk_set_rate(max98088->mclk, freq);
}
/* Setup clocks for slave mode, and using the PLL
* PSCLK = 0x01 (when master clk is 10MHz to 20MHz)
* 0x02 (when master clk is 20MHz to 30MHz)..
*/
if ((freq >= 10000000) && (freq < 20000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
max98088->mclk_prescaler = 1;
} else if ((freq >= 20000000) && (freq < 30000000)) {
snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
max98088->mclk_prescaler = 2;
} else {
dev_err(component->dev, "Invalid master clock frequency\n");
return -EINVAL;
}
if (snd_soc_component_read(component, M98088_REG_51_PWR_SYS) & M98088_SHDNRUN) {
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS,
M98088_SHDNRUN, 0);
snd_soc_component_update_bits(component, M98088_REG_51_PWR_SYS,
M98088_SHDNRUN, M98088_SHDNRUN);
}
dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
max98088->sysclk = freq;
return 0;
}
static int max98088_dai1_set_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_component *component = codec_dai->component;
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_cdata *cdata;
u8 reg15val;
u8 reg14val = 0;
cdata = &max98088->dai[0];
if (fmt != cdata->fmt) {
cdata->fmt = fmt;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_CBC_CFC:
/* Consumer mode PLL */
snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
0x80);
snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
0x00);
break;
case SND_SOC_DAIFMT_CBP_CFP:
/* Set to provider mode */
reg14val |= M98088_DAI_MAS;
break;
default:
dev_err(component->dev, "Clock mode unsupported");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
reg14val |= M98088_DAI_DLY;
break;
case SND_SOC_DAIFMT_LEFT_J:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
reg14val |= M98088_DAI_WCI;
break;
case SND_SOC_DAIFMT_IB_NF:
reg14val |= M98088_DAI_BCI;
break;
case SND_SOC_DAIFMT_IB_IF:
reg14val |= M98088_DAI_BCI|M98088_DAI_WCI;
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, M98088_REG_14_DAI1_FORMAT,
M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI |
M98088_DAI_WCI, reg14val);
reg15val = M98088_DAI_BSEL64;
if (max98088->digmic)
reg15val |= M98088_DAI_OSR64;
snd_soc_component_write(component, M98088_REG_15_DAI1_CLOCK, reg15val);
}
return 0;
}
static int max98088_dai2_set_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_component *component = codec_dai->component;
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_cdata *cdata;
u8 reg1Cval = 0;
cdata = &max98088->dai[1];
if (fmt != cdata->fmt) {
cdata->fmt = fmt;
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_CBC_CFC:
/* Consumer mode PLL */
snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
0x80);
snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
0x00);
break;
case SND_SOC_DAIFMT_CBP_CFP:
/* Set to provider mode */
reg1Cval |= M98088_DAI_MAS;
break;
default:
dev_err(component->dev, "Clock mode unsupported");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
reg1Cval |= M98088_DAI_DLY;
break;
case SND_SOC_DAIFMT_LEFT_J:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
reg1Cval |= M98088_DAI_WCI;
break;
case SND_SOC_DAIFMT_IB_NF:
reg1Cval |= M98088_DAI_BCI;
break;
case SND_SOC_DAIFMT_IB_IF:
reg1Cval |= M98088_DAI_BCI|M98088_DAI_WCI;
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, M98088_REG_1C_DAI2_FORMAT,
M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI |
M98088_DAI_WCI, reg1Cval);
snd_soc_component_write(component, M98088_REG_1D_DAI2_CLOCK,
M98088_DAI_BSEL64);
}
return 0;
}
static int max98088_dai1_mute(struct snd_soc_dai *codec_dai, int mute,
int direction)
{
struct snd_soc_component *component = codec_dai->component;
int reg;
if (mute)
reg = M98088_DAI_MUTE;
else
reg = 0;
snd_soc_component_update_bits(component, M98088_REG_2F_LVL_DAI1_PLAY,
M98088_DAI_MUTE_MASK, reg);
return 0;
}
static int max98088_dai2_mute(struct snd_soc_dai *codec_dai, int mute,
int direction)
{
struct snd_soc_component *component = codec_dai->component;
int reg;
if (mute)
reg = M98088_DAI_MUTE;
else
reg = 0;
snd_soc_component_update_bits(component, M98088_REG_31_LVL_DAI2_PLAY,
M98088_DAI_MUTE_MASK, reg);
return 0;
}
static int max98088_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/*
* SND_SOC_BIAS_PREPARE is called while preparing for a
* transition to ON or away from ON. If current bias_level
* is SND_SOC_BIAS_ON, then it is preparing for a transition
* away from ON. Disable the clock in that case, otherwise
* enable it.
*/
if (!IS_ERR(max98088->mclk)) {
if (snd_soc_component_get_bias_level(component) ==
SND_SOC_BIAS_ON) {
clk_disable_unprepare(max98088->mclk);
} else {
ret = clk_prepare_enable(max98088->mclk);
if (ret)
return ret;
}
}
break;
case SND_SOC_BIAS_STANDBY:
if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
regcache_sync(max98088->regmap);
snd_soc_component_update_bits(component, M98088_REG_4C_PWR_EN_IN,
M98088_MBEN, M98088_MBEN);
break;
case SND_SOC_BIAS_OFF:
snd_soc_component_update_bits(component, M98088_REG_4C_PWR_EN_IN,
M98088_MBEN, 0);
regcache_mark_dirty(max98088->regmap);
break;
}
return 0;
}
#define MAX98088_RATES SNDRV_PCM_RATE_8000_96000
#define MAX98088_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops max98088_dai1_ops = {
.set_sysclk = max98088_dai_set_sysclk,
.set_fmt = max98088_dai1_set_fmt,
.hw_params = max98088_dai1_hw_params,
.mute_stream = max98088_dai1_mute,
.no_capture_mute = 1,
};
static const struct snd_soc_dai_ops max98088_dai2_ops = {
.set_sysclk = max98088_dai_set_sysclk,
.set_fmt = max98088_dai2_set_fmt,
.hw_params = max98088_dai2_hw_params,
.mute_stream = max98088_dai2_mute,
.no_capture_mute = 1,
};
static struct snd_soc_dai_driver max98088_dai[] = {
{
.name = "HiFi",
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
.channels_max = 2,
.rates = MAX98088_RATES,
.formats = MAX98088_FORMATS,
},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 1,
.channels_max = 2,
.rates = MAX98088_RATES,
.formats = MAX98088_FORMATS,
},
.ops = &max98088_dai1_ops,
},
{
.name = "Aux",
.playback = {
.stream_name = "Aux Playback",
.channels_min = 1,
.channels_max = 2,
.rates = MAX98088_RATES,
.formats = MAX98088_FORMATS,
},
.ops = &max98088_dai2_ops,
}
};
static const char *eq_mode_name[] = {"EQ1 Mode", "EQ2 Mode"};
static int max98088_get_channel(struct snd_soc_component *component, const char *name)
{
int ret;
ret = match_string(eq_mode_name, ARRAY_SIZE(eq_mode_name), name);
if (ret < 0)
dev_err(component->dev, "Bad EQ channel name '%s'\n", name);
return ret;
}
static void max98088_setup_eq1(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_pdata *pdata = max98088->pdata;
struct max98088_eq_cfg *coef_set;
int best, best_val, save, i, sel, fs;
struct max98088_cdata *cdata;
cdata = &max98088->dai[0];
if (!pdata || !max98088->eq_textcnt)
return;
/* Find the selected configuration with nearest sample rate */
fs = cdata->rate;
sel = cdata->eq_sel;
best = 0;
best_val = INT_MAX;
for (i = 0; i < pdata->eq_cfgcnt; i++) {
if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 &&
abs(pdata->eq_cfg[i].rate - fs) < best_val) {
best = i;
best_val = abs(pdata->eq_cfg[i].rate - fs);
}
}
dev_dbg(component->dev, "Selected %s/%dHz for %dHz sample rate\n",
pdata->eq_cfg[best].name,
pdata->eq_cfg[best].rate, fs);
/* Disable EQ while configuring, and save current on/off state */
save = snd_soc_component_read(component, M98088_REG_49_CFG_LEVEL);
snd_soc_component_update_bits(component, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, 0);
coef_set = &pdata->eq_cfg[sel];
m98088_eq_band(component, 0, 0, coef_set->band1);
m98088_eq_band(component, 0, 1, coef_set->band2);
m98088_eq_band(component, 0, 2, coef_set->band3);
m98088_eq_band(component, 0, 3, coef_set->band4);
m98088_eq_band(component, 0, 4, coef_set->band5);
/* Restore the original on/off state */
snd_soc_component_update_bits(component, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, save);
}
static void max98088_setup_eq2(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_pdata *pdata = max98088->pdata;
struct max98088_eq_cfg *coef_set;
int best, best_val, save, i, sel, fs;
struct max98088_cdata *cdata;
cdata = &max98088->dai[1];
if (!pdata || !max98088->eq_textcnt)
return;
/* Find the selected configuration with nearest sample rate */
fs = cdata->rate;
sel = cdata->eq_sel;
best = 0;
best_val = INT_MAX;
for (i = 0; i < pdata->eq_cfgcnt; i++) {
if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 &&
abs(pdata->eq_cfg[i].rate - fs) < best_val) {
best = i;
best_val = abs(pdata->eq_cfg[i].rate - fs);
}
}
dev_dbg(component->dev, "Selected %s/%dHz for %dHz sample rate\n",
pdata->eq_cfg[best].name,
pdata->eq_cfg[best].rate, fs);
/* Disable EQ while configuring, and save current on/off state */
save = snd_soc_component_read(component, M98088_REG_49_CFG_LEVEL);
snd_soc_component_update_bits(component, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN, 0);
coef_set = &pdata->eq_cfg[sel];
m98088_eq_band(component, 1, 0, coef_set->band1);
m98088_eq_band(component, 1, 1, coef_set->band2);
m98088_eq_band(component, 1, 2, coef_set->band3);
m98088_eq_band(component, 1, 3, coef_set->band4);
m98088_eq_band(component, 1, 4, coef_set->band5);
/* Restore the original on/off state */
snd_soc_component_update_bits(component, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN,
save);
}
static int max98088_put_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_pdata *pdata = max98088->pdata;
int channel = max98088_get_channel(component, kcontrol->id.name);
struct max98088_cdata *cdata;
int sel = ucontrol->value.enumerated.item[0];
if (channel < 0)
return channel;
cdata = &max98088->dai[channel];
if (sel >= pdata->eq_cfgcnt)
return -EINVAL;
cdata->eq_sel = sel;
switch (channel) {
case 0:
max98088_setup_eq1(component);
break;
case 1:
max98088_setup_eq2(component);
break;
}
return 0;
}
static int max98088_get_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
int channel = max98088_get_channel(component, kcontrol->id.name);
struct max98088_cdata *cdata;
if (channel < 0)
return channel;
cdata = &max98088->dai[channel];
ucontrol->value.enumerated.item[0] = cdata->eq_sel;
return 0;
}
static void max98088_handle_eq_pdata(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_pdata *pdata = max98088->pdata;
struct max98088_eq_cfg *cfg;
unsigned int cfgcnt;
int i, j;
const char **t;
int ret;
struct snd_kcontrol_new controls[] = {
SOC_ENUM_EXT((char *)eq_mode_name[0],
max98088->eq_enum,
max98088_get_eq_enum,
max98088_put_eq_enum),
SOC_ENUM_EXT((char *)eq_mode_name[1],
max98088->eq_enum,
max98088_get_eq_enum,
max98088_put_eq_enum),
};
BUILD_BUG_ON(ARRAY_SIZE(controls) != ARRAY_SIZE(eq_mode_name));
cfg = pdata->eq_cfg;
cfgcnt = pdata->eq_cfgcnt;
/* Setup an array of texts for the equalizer enum.
* This is based on Mark Brown's equalizer driver code.
*/
max98088->eq_textcnt = 0;
max98088->eq_texts = NULL;
for (i = 0; i < cfgcnt; i++) {
for (j = 0; j < max98088->eq_textcnt; j++) {
if (strcmp(cfg[i].name, max98088->eq_texts[j]) == 0)
break;
}
if (j != max98088->eq_textcnt)
continue;
/* Expand the array */
t = krealloc(max98088->eq_texts,
sizeof(char *) * (max98088->eq_textcnt + 1),
GFP_KERNEL);
if (t == NULL)
continue;
/* Store the new entry */
t[max98088->eq_textcnt] = cfg[i].name;
max98088->eq_textcnt++;
max98088->eq_texts = t;
}
/* Now point the soc_enum to .texts array items */
max98088->eq_enum.texts = max98088->eq_texts;
max98088->eq_enum.items = max98088->eq_textcnt;
ret = snd_soc_add_component_controls(component, controls, ARRAY_SIZE(controls));
if (ret != 0)
dev_err(component->dev, "Failed to add EQ control: %d\n", ret);
}
static void max98088_handle_pdata(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_pdata *pdata = max98088->pdata;
u8 regval = 0;
if (!pdata) {
dev_dbg(component->dev, "No platform data\n");
return;
}
/* Configure mic for analog/digital mic mode */
if (pdata->digmic_left_mode)
regval |= M98088_DIGMIC_L;
if (pdata->digmic_right_mode)
regval |= M98088_DIGMIC_R;
max98088->digmic = (regval ? 1 : 0);
snd_soc_component_write(component, M98088_REG_48_CFG_MIC, regval);
/* Configure receiver output */
regval = ((pdata->receiver_mode) ? M98088_REC_LINEMODE : 0);
snd_soc_component_update_bits(component, M98088_REG_2A_MIC_REC_CNTL,
M98088_REC_LINEMODE_MASK, regval);
/* Configure equalizers */
if (pdata->eq_cfgcnt)
max98088_handle_eq_pdata(component);
}
static int max98088_probe(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
struct max98088_cdata *cdata;
int ret = 0;
regcache_mark_dirty(max98088->regmap);
/* initialize private data */
max98088->sysclk = (unsigned)-1;
max98088->eq_textcnt = 0;
cdata = &max98088->dai[0];
cdata->rate = (unsigned)-1;
cdata->fmt = (unsigned)-1;
cdata->eq_sel = 0;
cdata = &max98088->dai[1];
cdata->rate = (unsigned)-1;
cdata->fmt = (unsigned)-1;
cdata->eq_sel = 0;
max98088->ina_state = 0;
max98088->inb_state = 0;
max98088->ex_mode = 0;
max98088->digmic = 0;
max98088->mic1pre = 0;
max98088->mic2pre = 0;
ret = snd_soc_component_read(component, M98088_REG_FF_REV_ID);
if (ret < 0) {
dev_err(component->dev, "Failed to read device revision: %d\n",
ret);
goto err_access;
}
dev_info(component->dev, "revision %c\n", ret - 0x40 + 'A');
snd_soc_component_write(component, M98088_REG_51_PWR_SYS, M98088_PWRSV);
snd_soc_component_write(component, M98088_REG_0F_IRQ_ENABLE, 0x00);
snd_soc_component_write(component, M98088_REG_22_MIX_DAC,
M98088_DAI1L_TO_DACL|M98088_DAI2L_TO_DACL|
M98088_DAI1R_TO_DACR|M98088_DAI2R_TO_DACR);
snd_soc_component_write(component, M98088_REG_4E_BIAS_CNTL, 0xF0);
snd_soc_component_write(component, M98088_REG_50_DAC_BIAS2, 0x0F);
snd_soc_component_write(component, M98088_REG_16_DAI1_IOCFG,
M98088_S1NORMAL|M98088_SDATA);
snd_soc_component_write(component, M98088_REG_1E_DAI2_IOCFG,
M98088_S2NORMAL|M98088_SDATA);
max98088_handle_pdata(component);
err_access:
return ret;
}
static void max98088_remove(struct snd_soc_component *component)
{
struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
kfree(max98088->eq_texts);
}
static const struct snd_soc_component_driver soc_component_dev_max98088 = {
.probe = max98088_probe,
.remove = max98088_remove,
.set_bias_level = max98088_set_bias_level,
.controls = max98088_snd_controls,
.num_controls = ARRAY_SIZE(max98088_snd_controls),
.dapm_widgets = max98088_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets),
.dapm_routes = max98088_audio_map,
.num_dapm_routes = ARRAY_SIZE(max98088_audio_map),
.suspend_bias_off = 1,
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
};
static const struct i2c_device_id max98088_i2c_id[] = {
{ "max98088", MAX98088 },
{ "max98089", MAX98089 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
static int max98088_i2c_probe(struct i2c_client *i2c)
{
struct max98088_priv *max98088;
const struct i2c_device_id *id;
max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv),
GFP_KERNEL);
if (max98088 == NULL)
return -ENOMEM;
max98088->regmap = devm_regmap_init_i2c(i2c, &max98088_regmap);
if (IS_ERR(max98088->regmap))
return PTR_ERR(max98088->regmap);
max98088->mclk = devm_clk_get(&i2c->dev, "mclk");
if (IS_ERR(max98088->mclk))
if (PTR_ERR(max98088->mclk) == -EPROBE_DEFER)
return PTR_ERR(max98088->mclk);
id = i2c_match_id(max98088_i2c_id, i2c);
max98088->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98088);
max98088->pdata = i2c->dev.platform_data;
return devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_max98088,
&max98088_dai[0], 2);
}
#if defined(CONFIG_OF)
static const struct of_device_id max98088_of_match[] = {
{ .compatible = "maxim,max98088" },
{ .compatible = "maxim,max98089" },
{ }
};
MODULE_DEVICE_TABLE(of, max98088_of_match);
#endif
static struct i2c_driver max98088_i2c_driver = {
.driver = {
.name = "max98088",
.of_match_table = of_match_ptr(max98088_of_match),
},
.probe = max98088_i2c_probe,
.id_table = max98088_i2c_id,
};
module_i2c_driver(max98088_i2c_driver);
MODULE_DESCRIPTION("ALSA SoC MAX98088 driver");
MODULE_AUTHOR("Peter Hsiang, Jesse Marroquin");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* In-kernel rxperf server for testing purposes.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "rxperf: " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
#include <trace/events/rxrpc.h>
MODULE_DESCRIPTION("rxperf test server (afs)");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
#define RXPERF_PORT 7009
#define RX_PERF_SERVICE 147
#define RX_PERF_VERSION 3
#define RX_PERF_SEND 0
#define RX_PERF_RECV 1
#define RX_PERF_RPC 3
#define RX_PERF_FILE 4
#define RX_PERF_MAGIC_COOKIE 0x4711
struct rxperf_proto_params {
__be32 version;
__be32 type;
__be32 rsize;
__be32 wsize;
} __packed;
static const u8 rxperf_magic_cookie[] = { 0x00, 0x00, 0x47, 0x11 };
static const u8 secret[8] = { 0xa7, 0x83, 0x8a, 0xcb, 0xc7, 0x83, 0xec, 0x94 };
enum rxperf_call_state {
RXPERF_CALL_SV_AWAIT_PARAMS, /* Server: Awaiting parameter block */
RXPERF_CALL_SV_AWAIT_REQUEST, /* Server: Awaiting request data */
RXPERF_CALL_SV_REPLYING, /* Server: Replying */
RXPERF_CALL_SV_AWAIT_ACK, /* Server: Awaiting final ACK */
RXPERF_CALL_COMPLETE, /* Completed or failed */
};
struct rxperf_call {
struct rxrpc_call *rxcall;
struct iov_iter iter;
struct kvec kvec[1];
struct work_struct work;
const char *type;
size_t iov_len;
size_t req_len; /* Size of request blob */
size_t reply_len; /* Size of reply blob */
unsigned int debug_id;
unsigned int operation_id;
struct rxperf_proto_params params;
__be32 tmp[2];
s32 abort_code;
enum rxperf_call_state state;
short error;
unsigned short unmarshal;
u16 service_id;
int (*deliver)(struct rxperf_call *call);
void (*processor)(struct work_struct *work);
};
static struct socket *rxperf_socket;
static struct key *rxperf_sec_keyring; /* Ring of security/crypto keys */
static struct workqueue_struct *rxperf_workqueue;
static void rxperf_deliver_to_call(struct work_struct *work);
static int rxperf_deliver_param_block(struct rxperf_call *call);
static int rxperf_deliver_request(struct rxperf_call *call);
static int rxperf_process_call(struct rxperf_call *call);
static void rxperf_charge_preallocation(struct work_struct *work);
static DECLARE_WORK(rxperf_charge_preallocation_work,
rxperf_charge_preallocation);
static inline void rxperf_set_call_state(struct rxperf_call *call,
enum rxperf_call_state to)
{
call->state = to;
}
static inline void rxperf_set_call_complete(struct rxperf_call *call,
int error, s32 remote_abort)
{
if (call->state != RXPERF_CALL_COMPLETE) {
call->abort_code = remote_abort;
call->error = error;
call->state = RXPERF_CALL_COMPLETE;
}
}
static void rxperf_rx_discard_new_call(struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
kfree((struct rxperf_call *)user_call_ID);
}
static void rxperf_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
queue_work(rxperf_workqueue, &rxperf_charge_preallocation_work);
}
static void rxperf_queue_call_work(struct rxperf_call *call)
{
queue_work(rxperf_workqueue, &call->work);
}
static void rxperf_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long call_user_ID)
{
struct rxperf_call *call = (struct rxperf_call *)call_user_ID;
if (call->state != RXPERF_CALL_COMPLETE)
rxperf_queue_call_work(call);
}
static void rxperf_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
{
struct rxperf_call *call = (struct rxperf_call *)user_call_ID;
call->rxcall = rxcall;
}
static void rxperf_notify_end_reply_tx(struct sock *sock,
struct rxrpc_call *rxcall,
unsigned long call_user_ID)
{
rxperf_set_call_state((struct rxperf_call *)call_user_ID,
RXPERF_CALL_SV_AWAIT_ACK);
}
/*
* Charge the incoming call preallocation.
*/
static void rxperf_charge_preallocation(struct work_struct *work)
{
struct rxperf_call *call;
for (;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (!call)
break;
call->type = "unset";
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->deliver = rxperf_deliver_param_block;
call->state = RXPERF_CALL_SV_AWAIT_PARAMS;
call->service_id = RX_PERF_SERVICE;
call->iov_len = sizeof(call->params);
call->kvec[0].iov_len = sizeof(call->params);
call->kvec[0].iov_base = &call->params;
iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
INIT_WORK(&call->work, rxperf_deliver_to_call);
if (rxrpc_kernel_charge_accept(rxperf_socket,
rxperf_notify_rx,
rxperf_rx_attach,
(unsigned long)call,
GFP_KERNEL,
call->debug_id) < 0)
break;
call = NULL;
}
kfree(call);
}
/*
* Open an rxrpc socket and bind it to be a server for callback notifications
* - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
*/
static int rxperf_open_socket(void)
{
struct sockaddr_rxrpc srx;
struct socket *socket;
int ret;
ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET6,
&socket);
if (ret < 0)
goto error_1;
socket->sk->sk_allocation = GFP_NOFS;
/* bind the callback manager's address to make this a server socket */
memset(&srx, 0, sizeof(srx));
srx.srx_family = AF_RXRPC;
srx.srx_service = RX_PERF_SERVICE;
srx.transport_type = SOCK_DGRAM;
srx.transport_len = sizeof(srx.transport.sin6);
srx.transport.sin6.sin6_family = AF_INET6;
srx.transport.sin6.sin6_port = htons(RXPERF_PORT);
ret = rxrpc_sock_set_min_security_level(socket->sk,
RXRPC_SECURITY_ENCRYPT);
if (ret < 0)
goto error_2;
ret = rxrpc_sock_set_security_keyring(socket->sk, rxperf_sec_keyring);
ret = kernel_bind(socket, (struct sockaddr *)&srx, sizeof(srx));
if (ret < 0)
goto error_2;
rxrpc_kernel_new_call_notification(socket, rxperf_rx_new_call,
rxperf_rx_discard_new_call);
ret = kernel_listen(socket, INT_MAX);
if (ret < 0)
goto error_2;
rxperf_socket = socket;
rxperf_charge_preallocation(&rxperf_charge_preallocation_work);
return 0;
error_2:
sock_release(socket);
error_1:
pr_err("Can't set up rxperf socket: %d\n", ret);
return ret;
}
/*
* close the rxrpc socket rxperf was using
*/
static void rxperf_close_socket(void)
{
kernel_listen(rxperf_socket, 0);
kernel_sock_shutdown(rxperf_socket, SHUT_RDWR);
flush_workqueue(rxperf_workqueue);
sock_release(rxperf_socket);
}
/*
* Log remote abort codes that indicate that we have a protocol disagreement
* with the server.
*/
static void rxperf_log_error(struct rxperf_call *call, s32 remote_abort)
{
static int max = 0;
const char *msg;
int m;
switch (remote_abort) {
case RX_EOF: msg = "unexpected EOF"; break;
case RXGEN_CC_MARSHAL: msg = "client marshalling"; break;
case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break;
case RXGEN_SS_MARSHAL: msg = "server marshalling"; break;
case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break;
case RXGEN_DECODE: msg = "opcode decode"; break;
case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break;
case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break;
case -32: msg = "insufficient data"; break;
default:
return;
}
m = max;
if (m < 3) {
max = m + 1;
pr_info("Peer reported %s failure on %s\n", msg, call->type);
}
}
/*
* deliver messages to a call
*/
static void rxperf_deliver_to_call(struct work_struct *work)
{
struct rxperf_call *call = container_of(work, struct rxperf_call, work);
enum rxperf_call_state state;
u32 abort_code, remote_abort = 0;
int ret = 0;
if (call->state == RXPERF_CALL_COMPLETE)
return;
while (state = call->state,
state == RXPERF_CALL_SV_AWAIT_PARAMS ||
state == RXPERF_CALL_SV_AWAIT_REQUEST ||
state == RXPERF_CALL_SV_AWAIT_ACK
) {
if (state == RXPERF_CALL_SV_AWAIT_ACK) {
if (!rxrpc_kernel_check_life(rxperf_socket, call->rxcall))
goto call_complete;
return;
}
ret = call->deliver(call);
if (ret == 0)
ret = rxperf_process_call(call);
switch (ret) {
case 0:
continue;
case -EINPROGRESS:
case -EAGAIN:
return;
case -ECONNABORTED:
rxperf_log_error(call, call->abort_code);
goto call_complete;
case -EOPNOTSUPP:
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
abort_code, ret,
rxperf_abort_op_not_supported);
goto call_complete;
case -ENOTSUPP:
abort_code = RX_USER_ABORT;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
abort_code, ret,
rxperf_abort_op_not_supported);
goto call_complete;
case -EIO:
pr_err("Call %u in bad state %u\n",
call->debug_id, call->state);
fallthrough;
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
case -ENOMEM:
case -EFAULT:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
RXGEN_SS_UNMARSHAL, ret,
rxperf_abort_unmarshal_error);
goto call_complete;
default:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
RX_CALL_DEAD, ret,
rxperf_abort_general_error);
goto call_complete;
}
}
call_complete:
rxperf_set_call_complete(call, ret, remote_abort);
/* The call may have been requeued */
rxrpc_kernel_shutdown_call(rxperf_socket, call->rxcall);
rxrpc_kernel_put_call(rxperf_socket, call->rxcall);
cancel_work(&call->work);
kfree(call);
}
/*
* Extract a piece of data from the received data socket buffers.
*/
static int rxperf_extract_data(struct rxperf_call *call, bool want_more)
{
u32 remote_abort = 0;
int ret;
ret = rxrpc_kernel_recv_data(rxperf_socket, call->rxcall, &call->iter,
&call->iov_len, want_more, &remote_abort,
&call->service_id);
pr_debug("Extract i=%zu l=%zu m=%u ret=%d\n",
iov_iter_count(&call->iter), call->iov_len, want_more, ret);
if (ret == 0 || ret == -EAGAIN)
return ret;
if (ret == 1) {
switch (call->state) {
case RXPERF_CALL_SV_AWAIT_REQUEST:
rxperf_set_call_state(call, RXPERF_CALL_SV_REPLYING);
break;
case RXPERF_CALL_COMPLETE:
pr_debug("premature completion %d", call->error);
return call->error;
default:
break;
}
return 0;
}
rxperf_set_call_complete(call, ret, remote_abort);
return ret;
}
/*
* Grab the operation ID from an incoming manager call.
*/
static int rxperf_deliver_param_block(struct rxperf_call *call)
{
u32 version;
int ret;
/* Extract the parameter block */
ret = rxperf_extract_data(call, true);
if (ret < 0)
return ret;
version = ntohl(call->params.version);
call->operation_id = ntohl(call->params.type);
call->deliver = rxperf_deliver_request;
if (version != RX_PERF_VERSION) {
pr_info("Version mismatch %x\n", version);
return -ENOTSUPP;
}
switch (call->operation_id) {
case RX_PERF_SEND:
call->type = "send";
call->reply_len = 0;
call->iov_len = 4; /* Expect req size */
break;
case RX_PERF_RECV:
call->type = "recv";
call->req_len = 0;
call->iov_len = 4; /* Expect reply size */
break;
case RX_PERF_RPC:
call->type = "rpc";
call->iov_len = 8; /* Expect req size and reply size */
break;
case RX_PERF_FILE:
call->type = "file";
fallthrough;
default:
return -EOPNOTSUPP;
}
rxperf_set_call_state(call, RXPERF_CALL_SV_AWAIT_REQUEST);
return call->deliver(call);
}
/*
* Deliver the request data.
*/
static int rxperf_deliver_request(struct rxperf_call *call)
{
int ret;
switch (call->unmarshal) {
case 0:
call->kvec[0].iov_len = call->iov_len;
call->kvec[0].iov_base = call->tmp;
iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
call->unmarshal++;
fallthrough;
case 1:
ret = rxperf_extract_data(call, true);
if (ret < 0)
return ret;
switch (call->operation_id) {
case RX_PERF_SEND:
call->type = "send";
call->req_len = ntohl(call->tmp[0]);
call->reply_len = 0;
break;
case RX_PERF_RECV:
call->type = "recv";
call->req_len = 0;
call->reply_len = ntohl(call->tmp[0]);
break;
case RX_PERF_RPC:
call->type = "rpc";
call->req_len = ntohl(call->tmp[0]);
call->reply_len = ntohl(call->tmp[1]);
break;
default:
pr_info("Can't parse extra params\n");
return -EIO;
}
pr_debug("CALL op=%s rq=%zx rp=%zx\n",
call->type, call->req_len, call->reply_len);
call->iov_len = call->req_len;
iov_iter_discard(&call->iter, READ, call->req_len);
call->unmarshal++;
fallthrough;
case 2:
ret = rxperf_extract_data(call, false);
if (ret < 0)
return ret;
call->unmarshal++;
fallthrough;
default:
return 0;
}
}
/*
* Process a call for which we've received the request.
*/
static int rxperf_process_call(struct rxperf_call *call)
{
struct msghdr msg = {};
struct bio_vec bv;
struct kvec iov[1];
ssize_t n;
size_t reply_len = call->reply_len, len;
rxrpc_kernel_set_tx_length(rxperf_socket, call->rxcall,
reply_len + sizeof(rxperf_magic_cookie));
while (reply_len > 0) {
len = min_t(size_t, reply_len, PAGE_SIZE);
bvec_set_page(&bv, ZERO_PAGE(0), len, 0);
iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, len);
msg.msg_flags = MSG_MORE;
n = rxrpc_kernel_send_data(rxperf_socket, call->rxcall, &msg,
len, rxperf_notify_end_reply_tx);
if (n < 0)
return n;
if (n == 0)
return -EIO;
reply_len -= n;
}
len = sizeof(rxperf_magic_cookie);
iov[0].iov_base = (void *)rxperf_magic_cookie;
iov[0].iov_len = len;
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
msg.msg_flags = 0;
n = rxrpc_kernel_send_data(rxperf_socket, call->rxcall, &msg, len,
rxperf_notify_end_reply_tx);
if (n >= 0)
return 0; /* Success */
if (n == -ENOMEM)
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
RXGEN_SS_MARSHAL, -ENOMEM,
rxperf_abort_oom);
return n;
}
/*
* Add a key to the security keyring.
*/
static int rxperf_add_key(struct key *keyring)
{
key_ref_t kref;
int ret;
kref = key_create_or_update(make_key_ref(keyring, true),
"rxrpc_s",
__stringify(RX_PERF_SERVICE) ":2",
secret,
sizeof(secret),
KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH
| KEY_USR_VIEW,
KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(kref)) {
pr_err("Can't allocate rxperf server key: %ld\n", PTR_ERR(kref));
return PTR_ERR(kref);
}
ret = key_link(keyring, key_ref_to_ptr(kref));
if (ret < 0)
pr_err("Can't link rxperf server key: %d\n", ret);
key_ref_put(kref);
return ret;
}
/*
* Initialise the rxperf server.
*/
static int __init rxperf_init(void)
{
struct key *keyring;
int ret = -ENOMEM;
pr_info("Server registering\n");
rxperf_workqueue = alloc_workqueue("rxperf", 0, 0);
if (!rxperf_workqueue)
goto error_workqueue;
keyring = keyring_alloc("rxperf_server",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
KEY_POS_WRITE |
KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
KEY_USR_WRITE |
KEY_OTH_VIEW | KEY_OTH_READ | KEY_OTH_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA,
NULL, NULL);
if (IS_ERR(keyring)) {
pr_err("Can't allocate rxperf server keyring: %ld\n",
PTR_ERR(keyring));
goto error_keyring;
}
rxperf_sec_keyring = keyring;
ret = rxperf_add_key(keyring);
if (ret < 0)
goto error_key;
ret = rxperf_open_socket();
if (ret < 0)
goto error_socket;
return 0;
error_socket:
error_key:
key_put(rxperf_sec_keyring);
error_keyring:
destroy_workqueue(rxperf_workqueue);
rcu_barrier();
error_workqueue:
pr_err("Failed to register: %d\n", ret);
return ret;
}
late_initcall(rxperf_init); /* Must be called after net/ to create socket */
static void __exit rxperf_exit(void)
{
pr_info("Server unregistering.\n");
rxperf_close_socket();
key_put(rxperf_sec_keyring);
destroy_workqueue(rxperf_workqueue);
rcu_barrier();
}
module_exit(rxperf_exit);
|
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <time.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <dlfcn.h>
#include "log.h"
#include "timens.h"
typedef int (*vgettime_t)(clockid_t, struct timespec *);
vgettime_t vdso_clock_gettime;
static void fill_function_pointers(void)
{
void *vdso = dlopen("linux-vdso.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-gate.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-vdso32.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso)
vdso = dlopen("linux-vdso64.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
if (!vdso) {
pr_err("[WARN]\tfailed to find vDSO\n");
return;
}
vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
if (!vdso_clock_gettime)
vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__kernel_clock_gettime");
if (!vdso_clock_gettime)
pr_err("Warning: failed to find clock_gettime in vDSO\n");
}
static void test(clock_t clockid, char *clockstr, bool in_ns)
{
struct timespec tp, start;
long i = 0;
const int timeout = 3;
vdso_clock_gettime(clockid, &start);
tp = start;
for (tp = start; start.tv_sec + timeout > tp.tv_sec ||
(start.tv_sec + timeout == tp.tv_sec &&
start.tv_nsec > tp.tv_nsec); i++) {
vdso_clock_gettime(clockid, &tp);
}
ksft_test_result_pass("%s:\tclock: %10s\tcycles:\t%10ld\n",
in_ns ? "ns" : "host", clockstr, i);
}
int main(int argc, char *argv[])
{
time_t offset = 10;
int nsfd;
ksft_set_plan(8);
fill_function_pointers();
test(CLOCK_MONOTONIC, "monotonic", false);
test(CLOCK_MONOTONIC_COARSE, "monotonic-coarse", false);
test(CLOCK_MONOTONIC_RAW, "monotonic-raw", false);
test(CLOCK_BOOTTIME, "boottime", false);
nscheck();
if (unshare_timens())
return 1;
nsfd = open("/proc/self/ns/time_for_children", O_RDONLY);
if (nsfd < 0)
return pr_perror("Can't open a time namespace");
if (_settime(CLOCK_MONOTONIC, offset))
return 1;
if (_settime(CLOCK_BOOTTIME, offset))
return 1;
if (setns(nsfd, CLONE_NEWTIME))
return pr_perror("setns");
test(CLOCK_MONOTONIC, "monotonic", true);
test(CLOCK_MONOTONIC_COARSE, "monotonic-coarse", true);
test(CLOCK_MONOTONIC_RAW, "monotonic-raw", true);
test(CLOCK_BOOTTIME, "boottime", true);
ksft_exit_pass();
return 0;
}
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Defines for Mobile High-Definition Link (MHL) interface
*
* Copyright (C) 2015, Samsung Electronics, Co., Ltd.
* Andrzej Hajda <[email protected]>
*
* Based on MHL driver for Android devices.
* Copyright (C) 2013-2014 Silicon Image, Inc.
*/
#ifndef __MHL_H__
#define __MHL_H__
#include <linux/types.h>
/* Device Capabilities Registers */
enum {
MHL_DCAP_DEV_STATE,
MHL_DCAP_MHL_VERSION,
MHL_DCAP_CAT,
MHL_DCAP_ADOPTER_ID_H,
MHL_DCAP_ADOPTER_ID_L,
MHL_DCAP_VID_LINK_MODE,
MHL_DCAP_AUD_LINK_MODE,
MHL_DCAP_VIDEO_TYPE,
MHL_DCAP_LOG_DEV_MAP,
MHL_DCAP_BANDWIDTH,
MHL_DCAP_FEATURE_FLAG,
MHL_DCAP_DEVICE_ID_H,
MHL_DCAP_DEVICE_ID_L,
MHL_DCAP_SCRATCHPAD_SIZE,
MHL_DCAP_INT_STAT_SIZE,
MHL_DCAP_RESERVED,
MHL_DCAP_SIZE
};
#define MHL_DCAP_CAT_SINK 0x01
#define MHL_DCAP_CAT_SOURCE 0x02
#define MHL_DCAP_CAT_POWER 0x10
#define MHL_DCAP_CAT_PLIM(x) ((x) << 5)
#define MHL_DCAP_VID_LINK_RGB444 0x01
#define MHL_DCAP_VID_LINK_YCBCR444 0x02
#define MHL_DCAP_VID_LINK_YCBCR422 0x04
#define MHL_DCAP_VID_LINK_PPIXEL 0x08
#define MHL_DCAP_VID_LINK_ISLANDS 0x10
#define MHL_DCAP_VID_LINK_VGA 0x20
#define MHL_DCAP_VID_LINK_16BPP 0x40
#define MHL_DCAP_AUD_LINK_2CH 0x01
#define MHL_DCAP_AUD_LINK_8CH 0x02
#define MHL_DCAP_VT_GRAPHICS 0x00
#define MHL_DCAP_VT_PHOTO 0x02
#define MHL_DCAP_VT_CINEMA 0x04
#define MHL_DCAP_VT_GAMES 0x08
#define MHL_DCAP_SUPP_VT 0x80
#define MHL_DCAP_LD_DISPLAY 0x01
#define MHL_DCAP_LD_VIDEO 0x02
#define MHL_DCAP_LD_AUDIO 0x04
#define MHL_DCAP_LD_MEDIA 0x08
#define MHL_DCAP_LD_TUNER 0x10
#define MHL_DCAP_LD_RECORD 0x20
#define MHL_DCAP_LD_SPEAKER 0x40
#define MHL_DCAP_LD_GUI 0x80
#define MHL_DCAP_LD_ALL 0xFF
#define MHL_DCAP_FEATURE_RCP_SUPPORT 0x01
#define MHL_DCAP_FEATURE_RAP_SUPPORT 0x02
#define MHL_DCAP_FEATURE_SP_SUPPORT 0x04
#define MHL_DCAP_FEATURE_UCP_SEND_SUPPOR 0x08
#define MHL_DCAP_FEATURE_UCP_RECV_SUPPORT 0x10
#define MHL_DCAP_FEATURE_RBP_SUPPORT 0x40
/* Extended Device Capabilities Registers */
enum {
MHL_XDC_ECBUS_SPEEDS,
MHL_XDC_TMDS_SPEEDS,
MHL_XDC_ECBUS_ROLES,
MHL_XDC_LOG_DEV_MAPX,
MHL_XDC_SIZE
};
#define MHL_XDC_ECBUS_S_075 0x01
#define MHL_XDC_ECBUS_S_8BIT 0x02
#define MHL_XDC_ECBUS_S_12BIT 0x04
#define MHL_XDC_ECBUS_D_150 0x10
#define MHL_XDC_ECBUS_D_8BIT 0x20
#define MHL_XDC_TMDS_000 0x00
#define MHL_XDC_TMDS_150 0x01
#define MHL_XDC_TMDS_300 0x02
#define MHL_XDC_TMDS_600 0x04
/* MHL_XDC_ECBUS_ROLES flags */
#define MHL_XDC_DEV_HOST 0x01
#define MHL_XDC_DEV_DEVICE 0x02
#define MHL_XDC_DEV_CHARGER 0x04
#define MHL_XDC_HID_HOST 0x08
#define MHL_XDC_HID_DEVICE 0x10
/* MHL_XDC_LOG_DEV_MAPX flags */
#define MHL_XDC_LD_PHONE 0x01
/* Device Status Registers */
enum {
MHL_DST_CONNECTED_RDY,
MHL_DST_LINK_MODE,
MHL_DST_VERSION,
MHL_DST_SIZE
};
/* Offset of DEVSTAT registers */
#define MHL_DST_OFFSET 0x30
#define MHL_DST_REG(name) (MHL_DST_OFFSET + MHL_DST_##name)
#define MHL_DST_CONN_DCAP_RDY 0x01
#define MHL_DST_CONN_XDEVCAPP_SUPP 0x02
#define MHL_DST_CONN_POW_STAT 0x04
#define MHL_DST_CONN_PLIM_STAT_MASK 0x38
#define MHL_DST_LM_CLK_MODE_MASK 0x07
#define MHL_DST_LM_CLK_MODE_PACKED_PIXEL 0x02
#define MHL_DST_LM_CLK_MODE_NORMAL 0x03
#define MHL_DST_LM_PATH_EN_MASK 0x08
#define MHL_DST_LM_PATH_ENABLED 0x08
#define MHL_DST_LM_PATH_DISABLED 0x00
#define MHL_DST_LM_MUTED_MASK 0x10
/* Extended Device Status Registers */
enum {
MHL_XDS_CURR_ECBUS_MODE,
MHL_XDS_AVLINK_MODE_STATUS,
MHL_XDS_AVLINK_MODE_CONTROL,
MHL_XDS_MULTI_SINK_STATUS,
MHL_XDS_SIZE
};
/* Offset of XDEVSTAT registers */
#define MHL_XDS_OFFSET 0x90
#define MHL_XDS_REG(name) (MHL_XDS_OFFSET + MHL_XDS_##name)
/* MHL_XDS_REG_CURR_ECBUS_MODE flags */
#define MHL_XDS_SLOT_MODE_8BIT 0x00
#define MHL_XDS_SLOT_MODE_6BIT 0x01
#define MHL_XDS_ECBUS_S 0x04
#define MHL_XDS_ECBUS_D 0x08
#define MHL_XDS_LINK_CLOCK_75MHZ 0x00
#define MHL_XDS_LINK_CLOCK_150MHZ 0x10
#define MHL_XDS_LINK_CLOCK_300MHZ 0x20
#define MHL_XDS_LINK_CLOCK_600MHZ 0x30
#define MHL_XDS_LINK_STATUS_NO_SIGNAL 0x00
#define MHL_XDS_LINK_STATUS_CRU_LOCKED 0x01
#define MHL_XDS_LINK_STATUS_TMDS_NORMAL 0x02
#define MHL_XDS_LINK_STATUS_TMDS_RESERVED 0x03
#define MHL_XDS_LINK_RATE_1_5_GBPS 0x00
#define MHL_XDS_LINK_RATE_3_0_GBPS 0x01
#define MHL_XDS_LINK_RATE_6_0_GBPS 0x02
#define MHL_XDS_ATT_CAPABLE 0x08
#define MHL_XDS_SINK_STATUS_1_HPD_LOW 0x00
#define MHL_XDS_SINK_STATUS_1_HPD_HIGH 0x01
#define MHL_XDS_SINK_STATUS_2_HPD_LOW 0x00
#define MHL_XDS_SINK_STATUS_2_HPD_HIGH 0x04
#define MHL_XDS_SINK_STATUS_3_HPD_LOW 0x00
#define MHL_XDS_SINK_STATUS_3_HPD_HIGH 0x10
#define MHL_XDS_SINK_STATUS_4_HPD_LOW 0x00
#define MHL_XDS_SINK_STATUS_4_HPD_HIGH 0x40
/* Interrupt Registers */
enum {
MHL_INT_RCHANGE,
MHL_INT_DCHANGE,
MHL_INT_SIZE
};
/* Offset of DEVSTAT registers */
#define MHL_INT_OFFSET 0x20
#define MHL_INT_REG(name) (MHL_INT_OFFSET + MHL_INT_##name)
#define MHL_INT_RC_DCAP_CHG 0x01
#define MHL_INT_RC_DSCR_CHG 0x02
#define MHL_INT_RC_REQ_WRT 0x04
#define MHL_INT_RC_GRT_WRT 0x08
#define MHL_INT_RC_3D_REQ 0x10
#define MHL_INT_RC_FEAT_REQ 0x20
#define MHL_INT_RC_FEAT_COMPLETE 0x40
#define MHL_INT_DC_EDID_CHG 0x02
enum {
MHL_ACK = 0x33, /* Command or Data byte acknowledge */
MHL_NACK = 0x34, /* Command or Data byte not acknowledge */
MHL_ABORT = 0x35, /* Transaction abort */
MHL_WRITE_STAT = 0xe0, /* Write one status register */
MHL_SET_INT = 0x60, /* Write one interrupt register */
MHL_READ_DEVCAP_REG = 0x61, /* Read one register */
MHL_GET_STATE = 0x62, /* Read CBUS revision level from follower */
MHL_GET_VENDOR_ID = 0x63, /* Read vendor ID value from follower */
MHL_SET_HPD = 0x64, /* Set Hot Plug Detect in follower */
MHL_CLR_HPD = 0x65, /* Clear Hot Plug Detect in follower */
MHL_SET_CAP_ID = 0x66, /* Set Capture ID for downstream device */
MHL_GET_CAP_ID = 0x67, /* Get Capture ID from downstream device */
MHL_MSC_MSG = 0x68, /* VS command to send RCP sub-commands */
MHL_GET_SC1_ERRORCODE = 0x69, /* Get Vendor-Specific error code */
MHL_GET_DDC_ERRORCODE = 0x6A, /* Get DDC channel command error code */
MHL_GET_MSC_ERRORCODE = 0x6B, /* Get MSC command error code */
MHL_WRITE_BURST = 0x6C, /* Write 1-16 bytes to responder's scratchpad */
MHL_GET_SC3_ERRORCODE = 0x6D, /* Get channel 3 command error code */
MHL_WRITE_XSTAT = 0x70, /* Write one extended status register */
MHL_READ_XDEVCAP_REG = 0x71, /* Read one extended devcap register */
/* let the rest of these float, they are software specific */
MHL_READ_EDID_BLOCK,
MHL_SEND_3D_REQ_OR_FEAT_REQ,
MHL_READ_DEVCAP,
MHL_READ_XDEVCAP
};
/* MSC message types */
enum {
MHL_MSC_MSG_RCP = 0x10, /* RCP sub-command */
MHL_MSC_MSG_RCPK = 0x11, /* RCP Acknowledge sub-command */
MHL_MSC_MSG_RCPE = 0x12, /* RCP Error sub-command */
MHL_MSC_MSG_RAP = 0x20, /* Mode Change Warning sub-command */
MHL_MSC_MSG_RAPK = 0x21, /* MCW Acknowledge sub-command */
MHL_MSC_MSG_RBP = 0x22, /* Remote Button Protocol sub-command */
MHL_MSC_MSG_RBPK = 0x23, /* RBP Acknowledge sub-command */
MHL_MSC_MSG_RBPE = 0x24, /* RBP Error sub-command */
MHL_MSC_MSG_UCP = 0x30, /* UCP sub-command */
MHL_MSC_MSG_UCPK = 0x31, /* UCP Acknowledge sub-command */
MHL_MSC_MSG_UCPE = 0x32, /* UCP Error sub-command */
MHL_MSC_MSG_RUSB = 0x40, /* Request USB host role */
MHL_MSC_MSG_RUSBK = 0x41, /* Acknowledge request for USB host role */
MHL_MSC_MSG_RHID = 0x42, /* Request HID host role */
MHL_MSC_MSG_RHIDK = 0x43, /* Acknowledge request for HID host role */
MHL_MSC_MSG_ATT = 0x50, /* Request attention sub-command */
MHL_MSC_MSG_ATTK = 0x51, /* ATT Acknowledge sub-command */
MHL_MSC_MSG_BIST_TRIGGER = 0x60,
MHL_MSC_MSG_BIST_REQUEST_STAT = 0x61,
MHL_MSC_MSG_BIST_READY = 0x62,
MHL_MSC_MSG_BIST_STOP = 0x63,
};
/* RAP action codes */
#define MHL_RAP_POLL 0x00 /* Just do an ack */
#define MHL_RAP_CONTENT_ON 0x10 /* Turn content stream ON */
#define MHL_RAP_CONTENT_OFF 0x11 /* Turn content stream OFF */
#define MHL_RAP_CBUS_MODE_DOWN 0x20
#define MHL_RAP_CBUS_MODE_UP 0x21
/* RAPK status codes */
#define MHL_RAPK_NO_ERR 0x00 /* RAP action recognized & supported */
#define MHL_RAPK_UNRECOGNIZED 0x01 /* Unknown RAP action code received */
#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
/* Bit masks for RCP messages */
#define MHL_RCP_KEY_RELEASED_MASK 0x80
#define MHL_RCP_KEY_ID_MASK 0x7F
/*
* Error status codes for RCPE messages
*/
/* No error. (Not allowed in RCPE messages) */
#define MHL_RCPE_STATUS_NO_ERROR 0x00
/* Unsupported/unrecognized key code */
#define MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
/* Responder busy. Initiator may retry message */
#define MHL_RCPE_STATUS_BUSY 0x02
/*
* Error status codes for RBPE messages
*/
/* No error. (Not allowed in RBPE messages) */
#define MHL_RBPE_STATUS_NO_ERROR 0x00
/* Unsupported/unrecognized button code */
#define MHL_RBPE_STATUS_INEFFECTIVE_BUTTON_CODE 0x01
/* Responder busy. Initiator may retry message */
#define MHL_RBPE_STATUS_BUSY 0x02
/*
* Error status codes for UCPE messages
*/
/* No error. (Not allowed in UCPE messages) */
#define MHL_UCPE_STATUS_NO_ERROR 0x00
/* Unsupported/unrecognized key code */
#define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01
enum mhl_burst_id {
MHL_BURST_ID_3D_VIC = 0x10,
MHL_BURST_ID_3D_DTD = 0x11,
MHL_BURST_ID_HEV_VIC = 0x20,
MHL_BURST_ID_HEV_DTDA = 0x21,
MHL_BURST_ID_HEV_DTDB = 0x22,
MHL_BURST_ID_VC_ASSIGN = 0x38,
MHL_BURST_ID_VC_CONFIRM = 0x39,
MHL_BURST_ID_AUD_DELAY = 0x40,
MHL_BURST_ID_ADT_BURSTID = 0x41,
MHL_BURST_ID_BIST_SETUP = 0x51,
MHL_BURST_ID_BIST_RETURN_STAT = 0x52,
MHL_BURST_ID_EMSC_SUPPORT = 0x61,
MHL_BURST_ID_HID_PAYLOAD = 0x62,
MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63,
MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64,
};
struct mhl_burst_blk_rcv_buffer_info {
__be16 id;
__le16 size;
} __packed;
struct mhl3_burst_header {
__be16 id;
u8 checksum;
u8 total_entries;
u8 sequence_index;
} __packed;
struct mhl_burst_bits_per_pixel_fmt {
struct mhl3_burst_header hdr;
u8 num_entries;
struct {
u8 stream_id;
u8 pixel_format;
} __packed desc[];
} __packed;
struct mhl_burst_emsc_support {
struct mhl3_burst_header hdr;
u8 num_entries;
__be16 burst_id[];
} __packed;
struct mhl_burst_audio_descr {
struct mhl3_burst_header hdr;
u8 flags;
u8 short_desc[9];
} __packed;
/*
* MHL3 infoframe related definitions
*/
#define MHL3_IEEE_OUI 0x7ca61d
#define MHL3_INFOFRAME_SIZE 15
enum mhl3_video_format {
MHL3_VIDEO_FORMAT_NONE,
MHL3_VIDEO_FORMAT_3D,
MHL3_VIDEO_FORMAT_MULTI_VIEW,
MHL3_VIDEO_FORMAT_DUAL_3D
};
enum mhl3_3d_format_type {
MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */
MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */
MHL3_3D_FORMAT_TYPE_LR, /* left-right */
MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */
MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */
MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */
};
struct mhl3_infoframe {
unsigned char version;
enum mhl3_video_format video_format;
enum mhl3_3d_format_type format_type;
bool sep_audio;
int hev_format;
int av_delay;
};
#endif /* __MHL_H__ */
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
//
// Device Tree file for LX2160A-CEx7
//
// Copyright 2019 SolidRun Ltd.
/dts-v1/;
#include "fsl-lx2160a.dtsi"
/ {
model = "SolidRun LX2160A COM Express Type 7 module";
compatible = "solidrun,lx2160a-cex7", "fsl,lx2160a";
aliases {
crypto = &crypto;
};
sb_3v3: regulator-sb3v3 {
compatible = "regulator-fixed";
regulator-name = "RT7290";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
};
&crypto {
status = "okay";
};
&dpmac17 {
phy-handle = <&rgmii_phy1>;
phy-connection-type = "rgmii-id";
};
&emdio1 {
status = "okay";
rgmii_phy1: ethernet-phy@1 {
reg = <1>;
qca,smarteee-tw-us-1g = <24>;
};
};
&esdhc1 {
mmc-hs200-1_8v;
mmc-hs400-1_8v;
bus-width = <8>;
status = "okay";
};
&i2c0 {
status = "okay";
i2c-mux@77 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x77>;
i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
eeprom@50 {
compatible = "atmel,24c512";
reg = <0x50>;
};
eeprom@51 {
compatible = "atmel,spd";
reg = <0x51>;
};
eeprom@53 {
compatible = "atmel,spd";
reg = <0x53>;
};
eeprom@57 {
compatible = "atmel,24c02";
reg = <0x57>;
};
};
i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
fan-temperature-ctrlr@18 {
compatible = "ti,amc6821";
reg = <0x18>;
};
};
i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
regulator@5c {
compatible = "lltc,ltc3882";
reg = <0x5c>;
};
};
i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
temperature-sensor@48 {
compatible = "nxp,sa56004";
reg = <0x48>;
vcc-supply = <&sb_3v3>;
};
};
sfp0_i2c: i2c@4 {
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
};
sfp1_i2c: i2c@5 {
#address-cells = <1>;
#size-cells = <0>;
reg = <5>;
};
sfp2_i2c: i2c@6 {
#address-cells = <1>;
#size-cells = <0>;
reg = <6>;
};
sfp3_i2c: i2c@7 {
#address-cells = <1>;
#size-cells = <0>;
reg = <7>;
};
};
};
&i2c2 {
status = "okay";
};
&i2c4 {
status = "okay";
rtc@51 {
compatible = "nxp,pcf2129";
reg = <0x51>;
};
};
&fspi {
status = "okay";
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "micron,m25p80";
m25p,fast-read;
spi-max-frequency = <50000000>;
reg = <0>;
/* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
spi-rx-bus-width = <8>;
spi-tx-bus-width = <1>;
};
};
&usb0 {
status = "okay";
};
&usb1 {
status = "okay";
};
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARCH_POWERPC_EXTABLE_H
#define _ARCH_POWERPC_EXTABLE_H
/*
* The exception table consists of pairs of relative addresses: the first is
* the address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out what
* to do.
*
* All the routines below use bits of fixup code that are out of line with the
* main instruction path. This means when everything is well, we don't even
* have to jump over them. Further, they do not intrude on our cache or tlb
* entries.
*/
#define ARCH_HAS_RELATIVE_EXTABLE
#ifndef __ASSEMBLY__
struct exception_table_entry {
int insn;
int fixup;
};
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
#endif
/*
* Helper macro for exception table entries
*/
#define EX_TABLE(_fault, _target) \
stringify_in_c(.section __ex_table,"a";)\
stringify_in_c(.balign 4;) \
stringify_in_c(.long (_fault) - . ;) \
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
#endif
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <[email protected]>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*/
#ifndef __OMAP2_DSS_H
#define __OMAP2_DSS_H
#include <linux/interrupt.h>
#include "omapdss.h"
struct dispc_device;
struct dss_debugfs_entry;
struct platform_device;
struct seq_file;
#define MAX_DSS_LCD_MANAGERS 3
#define MAX_NUM_DSI 2
#ifdef pr_fmt
#undef pr_fmt
#endif
#ifdef DSS_SUBSYS_NAME
#define pr_fmt(fmt) DSS_SUBSYS_NAME ": " fmt
#else
#define pr_fmt(fmt) fmt
#endif
#define DSSDBG(format, ...) \
pr_debug(format, ## __VA_ARGS__)
#ifdef DSS_SUBSYS_NAME
#define DSSERR(format, ...) \
pr_err("omapdss " DSS_SUBSYS_NAME " error: " format, ##__VA_ARGS__)
#else
#define DSSERR(format, ...) \
pr_err("omapdss error: " format, ##__VA_ARGS__)
#endif
#ifdef DSS_SUBSYS_NAME
#define DSSINFO(format, ...) \
pr_info("omapdss " DSS_SUBSYS_NAME ": " format, ##__VA_ARGS__)
#else
#define DSSINFO(format, ...) \
pr_info("omapdss: " format, ## __VA_ARGS__)
#endif
#ifdef DSS_SUBSYS_NAME
#define DSSWARN(format, ...) \
pr_warn("omapdss " DSS_SUBSYS_NAME ": " format, ##__VA_ARGS__)
#else
#define DSSWARN(format, ...) \
pr_warn("omapdss: " format, ##__VA_ARGS__)
#endif
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
number. For example 7:0 */
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
enum dss_model {
DSS_MODEL_OMAP2,
DSS_MODEL_OMAP3,
DSS_MODEL_OMAP4,
DSS_MODEL_OMAP5,
DSS_MODEL_DRA7,
};
enum dss_io_pad_mode {
DSS_IO_PAD_MODE_RESET,
DSS_IO_PAD_MODE_RFBI,
DSS_IO_PAD_MODE_BYPASS,
};
enum dss_hdmi_venc_clk_source_select {
DSS_VENC_TV_CLK = 0,
DSS_HDMI_M_PCLK = 1,
};
enum dss_dsi_content_type {
DSS_DSI_CONTENT_DCS,
DSS_DSI_CONTENT_GENERIC,
};
enum dss_clk_source {
DSS_CLK_SRC_FCK = 0,
DSS_CLK_SRC_PLL1_1,
DSS_CLK_SRC_PLL1_2,
DSS_CLK_SRC_PLL1_3,
DSS_CLK_SRC_PLL2_1,
DSS_CLK_SRC_PLL2_2,
DSS_CLK_SRC_PLL2_3,
DSS_CLK_SRC_HDMI_PLL,
};
enum dss_pll_id {
DSS_PLL_DSI1,
DSS_PLL_DSI2,
DSS_PLL_HDMI,
DSS_PLL_VIDEO1,
DSS_PLL_VIDEO2,
};
struct dss_pll;
#define DSS_PLL_MAX_HSDIVS 4
enum dss_pll_type {
DSS_PLL_TYPE_A,
DSS_PLL_TYPE_B,
};
/*
* Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
* Type-B PLLs: clkout[0] refers to m2.
*/
struct dss_pll_clock_info {
/* rates that we get with dividers below */
unsigned long fint;
unsigned long clkdco;
unsigned long clkout[DSS_PLL_MAX_HSDIVS];
/* dividers */
u16 n;
u16 m;
u32 mf;
u16 mX[DSS_PLL_MAX_HSDIVS];
u16 sd;
};
struct dss_pll_ops {
int (*enable)(struct dss_pll *pll);
void (*disable)(struct dss_pll *pll);
int (*set_config)(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
};
struct dss_pll_hw {
enum dss_pll_type type;
unsigned int n_max;
unsigned int m_min;
unsigned int m_max;
unsigned int mX_max;
unsigned long fint_min, fint_max;
unsigned long clkdco_min, clkdco_low, clkdco_max;
u8 n_msb, n_lsb;
u8 m_msb, m_lsb;
u8 mX_msb[DSS_PLL_MAX_HSDIVS], mX_lsb[DSS_PLL_MAX_HSDIVS];
bool has_stopmode;
bool has_freqsel;
bool has_selfreqdco;
bool has_refsel;
/* DRA7 errata i886: use high N & M to avoid jitter */
bool errata_i886;
/* DRA7 errata i932: retry pll lock on failure */
bool errata_i932;
};
struct dss_pll {
const char *name;
enum dss_pll_id id;
struct dss_device *dss;
struct clk *clkin;
struct regulator *regulator;
void __iomem *base;
const struct dss_pll_hw *hw;
const struct dss_pll_ops *ops;
struct dss_pll_clock_info cinfo;
};
/* Defines a generic omap register field */
struct dss_reg_field {
u8 start, end;
};
struct dispc_clock_info {
/* rates that we get with dividers below */
unsigned long lck;
unsigned long pck;
/* dividers */
u16 lck_div;
u16 pck_div;
};
struct dss_lcd_mgr_config {
enum dss_io_pad_mode io_pad_mode;
bool stallmode;
bool fifohandcheck;
struct dispc_clock_info clock_info;
int video_port_width;
int lcden_sig_polarity;
};
#define DSS_SZ_REGS SZ_512
struct dss_device {
struct platform_device *pdev;
void __iomem *base;
struct regmap *syscon_pll_ctrl;
u32 syscon_pll_ctrl_offset;
struct platform_device *drm_pdev;
struct clk *parent_clk;
struct clk *dss_clk;
unsigned long dss_clk_rate;
unsigned long cache_req_pck;
unsigned long cache_prate;
struct dispc_clock_info cache_dispc_cinfo;
enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
enum dss_clk_source dispc_clk_source;
enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
const struct dss_features *feat;
struct {
struct dentry *root;
struct dss_debugfs_entry *clk;
struct dss_debugfs_entry *dss;
} debugfs;
struct dss_pll *plls[4];
struct dss_pll *video1_pll;
struct dss_pll *video2_pll;
struct dispc_device *dispc;
struct omap_drm_private *mgr_ops_priv;
};
/* core */
static inline int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
{
/* To be implemented when the OMAP platform will provide this feature */
return 0;
}
static inline bool dss_mgr_is_lcd(enum omap_channel id)
{
if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 ||
id == OMAP_DSS_CHANNEL_LCD3)
return true;
else
return false;
}
/* DSS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
struct dss_debugfs_entry *
dss_debugfs_create_file(struct dss_device *dss, const char *name,
int (*show_fn)(struct seq_file *s, void *data),
void *data);
void dss_debugfs_remove_file(struct dss_debugfs_entry *entry);
#else
static inline struct dss_debugfs_entry *
dss_debugfs_create_file(struct dss_device *dss, const char *name,
int (*show_fn)(struct seq_file *s, void *data),
void *data)
{
return NULL;
}
static inline void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
struct dss_device *dss_get_device(struct device *dev);
int dss_runtime_get(struct dss_device *dss);
void dss_runtime_put(struct dss_device *dss);
unsigned long dss_get_dispc_clk_rate(struct dss_device *dss);
unsigned long dss_get_max_fck_rate(struct dss_device *dss);
int dss_dpi_select_source(struct dss_device *dss, int port,
enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
enum dss_hdmi_venc_clk_source_select src);
const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
/* DSS VIDEO PLL */
struct dss_pll *dss_video_pll_init(struct dss_device *dss,
struct platform_device *pdev, int id,
struct regulator *regulator);
void dss_video_pll_uninit(struct dss_pll *pll);
void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable);
void dss_sdi_init(struct dss_device *dss, int datapairs);
int dss_sdi_enable(struct dss_device *dss);
void dss_sdi_disable(struct dss_device *dss);
void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
enum dss_clk_source clk_src);
void dss_select_lcd_clk_source(struct dss_device *dss,
enum omap_channel channel,
enum dss_clk_source clk_src);
enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss);
enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
int dsi_module);
enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
enum omap_channel channel);
void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable);
int dss_set_fck_rate(struct dss_device *dss, unsigned long rate);
typedef bool (*dss_div_calc_func)(unsigned long fck, void *data);
bool dss_div_calc(struct dss_device *dss, unsigned long pck,
unsigned long fck_min, dss_div_calc_func func, void *data);
/* SDI */
#ifdef CONFIG_OMAP2_DSS_SDI
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
struct device_node *port);
void sdi_uninit_port(struct device_node *port);
#else
static inline int sdi_init_port(struct dss_device *dss,
struct platform_device *pdev,
struct device_node *port)
{
return 0;
}
static inline void sdi_uninit_port(struct device_node *port)
{
}
#endif
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
void dsi_irq_handler(void);
#endif
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
struct device_node *port, enum dss_model dss_model);
void dpi_uninit_port(struct device_node *port);
#else
static inline int dpi_init_port(struct dss_device *dss,
struct platform_device *pdev,
struct device_node *port,
enum dss_model dss_model)
{
return 0;
}
static inline void dpi_uninit_port(struct device_node *port)
{
}
#endif
/* DISPC */
void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
int dispc_runtime_get(struct dispc_device *dispc);
void dispc_runtime_put(struct dispc_device *dispc);
int dispc_get_num_ovls(struct dispc_device *dispc);
int dispc_get_num_mgrs(struct dispc_device *dispc);
const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
enum omap_plane_id plane);
void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height);
bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
enum omap_plane_id plane, u32 fourcc);
enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane);
u32 dispc_read_irqstatus(struct dispc_device *dispc);
void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
void *dev_id);
void dispc_free_irq(struct dispc_device *dispc, void *dev_id);
u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable);
bool dispc_mgr_go_busy(struct dispc_device *dispc,
enum omap_channel channel);
void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel);
void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
void dispc_mgr_set_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm);
void dispc_mgr_setup(struct dispc_device *dispc,
enum omap_channel channel,
const struct omap_overlay_manager_info *info);
int dispc_mgr_check_timings(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm);
u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
enum omap_channel channel);
void dispc_mgr_set_gamma(struct dispc_device *dispc,
enum omap_channel channel,
const struct drm_color_lut *lut,
unsigned int length);
int dispc_ovl_setup(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct omap_overlay_info *oi,
const struct videomode *vm, bool mem_to_mem,
enum omap_channel channel);
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable);
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable);
void dispc_pck_free_enable(struct dispc_device *dispc, bool enable);
typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data);
bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
int dispc_calc_clock_rates(struct dispc_device *dispc,
unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
enum omap_plane_id plane, u32 low, u32 high);
void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
enum omap_plane_id plane,
u32 *fifo_low, u32 *fifo_high,
bool use_fifomerge, bool manual_update);
void dispc_mgr_set_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
const struct dispc_clock_info *cinfo);
void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static inline void dss_collect_irq_stats(u32 irqstatus, unsigned int *irq_arr)
{
int b;
for (b = 0; b < 32; ++b) {
if (irqstatus & (1 << b))
irq_arr[b]++;
}
}
#endif
/* PLL */
typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint,
unsigned long clkdco, void *data);
typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
void *data);
int dss_pll_register(struct dss_device *dss, struct dss_pll *pll);
void dss_pll_unregister(struct dss_pll *pll);
struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name);
struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
enum dss_clk_source src);
unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
int dss_pll_enable(struct dss_pll *pll);
void dss_pll_disable(struct dss_pll *pll);
int dss_pll_set_config(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data);
bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data);
bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_write_config_type_b(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_wait_reset_done(struct dss_pll *pll);
extern struct platform_driver omap_dsshw_driver;
extern struct platform_driver omap_dispchw_driver;
#ifdef CONFIG_OMAP2_DSS_DSI
extern struct platform_driver omap_dsihw_driver;
#endif
#ifdef CONFIG_OMAP2_DSS_VENC
extern struct platform_driver omap_venchw_driver;
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
extern struct platform_driver omapdss_hdmi4hw_driver;
#endif
#ifdef CONFIG_OMAP5_DSS_HDMI
extern struct platform_driver omapdss_hdmi5hw_driver;
#endif
#endif
|
/*
* Copyright (C) 2016 Hans de Goede <[email protected]>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Or, alternatively,
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/* The Orange Pi PC Plus is an extended version of the regular PC */
#include "sun8i-h3-orangepi-pc.dts"
/ {
model = "Xunlong Orange Pi PC Plus";
compatible = "xunlong,orangepi-pc-plus", "allwinner,sun8i-h3";
aliases {
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
ethernet1 = &rtl8189ftv;
};
};
&mmc1 {
vmmc-supply = <®_vcc3v3>;
bus-width = <4>;
non-removable;
status = "okay";
/*
* Explicitly define the sdio device, so that we can add an ethernet
* alias for it (which e.g. makes u-boot set a mac-address).
*/
rtl8189ftv: wifi@1 {
reg = <1>;
};
};
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_8bit_pins>;
vmmc-supply = <®_vcc3v3>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
status = "okay";
};
&mmc2_8bit_pins {
/* Increase drive strength for DDR modes */
drive-strength = <40>;
/* eMMC is missing pull-ups */
bias-pull-up;
};
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2016 Chen-Yu Tsai
*
* Chen-Yu Tsai <[email protected]>
*/
#ifndef _CCU_SUN9I_A80_H_
#define _CCU_SUN9I_A80_H_
#include <dt-bindings/clock/sun9i-a80-ccu.h>
#include <dt-bindings/reset/sun9i-a80-ccu.h>
#define CLK_PLL_C0CPUX 0
#define CLK_PLL_C1CPUX 1
/* pll-audio and pll-periph0 are exported to the PRCM block */
#define CLK_PLL_VE 4
#define CLK_PLL_DDR 5
#define CLK_PLL_VIDEO0 6
#define CLK_PLL_VIDEO1 7
#define CLK_PLL_GPU 8
#define CLK_PLL_DE 9
#define CLK_PLL_ISP 10
#define CLK_PLL_PERIPH1 11
/* The CPUX clocks are exported */
#define CLK_ATB0 14
#define CLK_AXI0 15
#define CLK_ATB1 16
#define CLK_AXI1 17
#define CLK_GTBUS 18
#define CLK_AHB0 19
#define CLK_AHB1 20
#define CLK_AHB2 21
#define CLK_APB0 22
#define CLK_APB1 23
#define CLK_CCI400 24
#define CLK_ATS 25
#define CLK_TRACE 26
/* module clocks and bus gates exported */
#define CLK_NUMBER (CLK_BUS_UART5 + 1)
#endif /* _CCU_SUN9I_A80_H_ */
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 1999 by Uros Bizjak <[email protected]>
* Takashi Iwai <[email protected]>
*
* SB16ASP/AWE32 CSP control
*
* CSP microcode loader:
* alsa-tools/sb16_csp/
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/sb16_csp.h>
#include <sound/initval.h>
MODULE_AUTHOR("Uros Bizjak <[email protected]>");
MODULE_DESCRIPTION("ALSA driver for SB16 Creative Signal Processor");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("sb16/mulaw_main.csp");
MODULE_FIRMWARE("sb16/alaw_main.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_init.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_playback.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_capture.csp");
#ifdef SNDRV_LITTLE_ENDIAN
#define CSP_HDR_VALUE(a,b,c,d) ((a) | ((b)<<8) | ((c)<<16) | ((d)<<24))
#else
#define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24))
#endif
#define RIFF_HEADER CSP_HDR_VALUE('R', 'I', 'F', 'F')
#define CSP__HEADER CSP_HDR_VALUE('C', 'S', 'P', ' ')
#define LIST_HEADER CSP_HDR_VALUE('L', 'I', 'S', 'T')
#define FUNC_HEADER CSP_HDR_VALUE('f', 'u', 'n', 'c')
#define CODE_HEADER CSP_HDR_VALUE('c', 'o', 'd', 'e')
#define INIT_HEADER CSP_HDR_VALUE('i', 'n', 'i', 't')
#define MAIN_HEADER CSP_HDR_VALUE('m', 'a', 'i', 'n')
/*
* RIFF data format
*/
struct riff_header {
__le32 name;
__le32 len;
};
struct desc_header {
struct riff_header info;
__le16 func_nr;
__le16 VOC_type;
__le16 flags_play_rec;
__le16 flags_16bit_8bit;
__le16 flags_stereo_mono;
__le16 flags_rates;
};
/*
* prototypes
*/
static void snd_sb_csp_free(struct snd_hwdep *hw);
static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file);
static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg);
static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file);
static int csp_detect(struct snd_sb *chip, int *version);
static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val);
static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val);
static int read_register(struct snd_sb *chip, unsigned char reg);
static int set_mode_register(struct snd_sb *chip, unsigned char mode);
static int get_version(struct snd_sb *chip);
static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
struct snd_sb_csp_microcode __user * code);
static int snd_sb_csp_unload(struct snd_sb_csp * p);
static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags);
static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
static int snd_sb_csp_check_version(struct snd_sb_csp * p);
static int snd_sb_csp_use(struct snd_sb_csp * p);
static int snd_sb_csp_unuse(struct snd_sb_csp * p);
static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels);
static int snd_sb_csp_stop(struct snd_sb_csp * p);
static int snd_sb_csp_pause(struct snd_sb_csp * p);
static int snd_sb_csp_restart(struct snd_sb_csp * p);
static int snd_sb_qsound_build(struct snd_sb_csp * p);
static void snd_sb_qsound_destroy(struct snd_sb_csp * p);
static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p);
static int init_proc_entry(struct snd_sb_csp * p, int device);
static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer);
/*
* Detect CSP chip and create a new instance
*/
int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep)
{
struct snd_sb_csp *p;
int version;
int err;
struct snd_hwdep *hw;
if (rhwdep)
*rhwdep = NULL;
if (csp_detect(chip, &version))
return -ENODEV;
err = snd_hwdep_new(chip->card, "SB16-CSP", device, &hw);
if (err < 0)
return err;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
snd_device_free(chip->card, hw);
return -ENOMEM;
}
p->chip = chip;
p->version = version;
/* CSP operators */
p->ops.csp_use = snd_sb_csp_use;
p->ops.csp_unuse = snd_sb_csp_unuse;
p->ops.csp_autoload = snd_sb_csp_autoload;
p->ops.csp_start = snd_sb_csp_start;
p->ops.csp_stop = snd_sb_csp_stop;
p->ops.csp_qsound_transfer = snd_sb_csp_qsound_transfer;
mutex_init(&p->access_mutex);
sprintf(hw->name, "CSP v%d.%d", (version >> 4), (version & 0x0f));
hw->iface = SNDRV_HWDEP_IFACE_SB16CSP;
hw->private_data = p;
hw->private_free = snd_sb_csp_free;
/* operators - only write/ioctl */
hw->ops.open = snd_sb_csp_open;
hw->ops.ioctl = snd_sb_csp_ioctl;
hw->ops.release = snd_sb_csp_release;
/* create a proc entry */
init_proc_entry(p, device);
if (rhwdep)
*rhwdep = hw;
return 0;
}
/*
* free_private for hwdep instance
*/
static void snd_sb_csp_free(struct snd_hwdep *hwdep)
{
int i;
struct snd_sb_csp *p = hwdep->private_data;
if (p) {
if (p->running & SNDRV_SB_CSP_ST_RUNNING)
snd_sb_csp_stop(p);
for (i = 0; i < ARRAY_SIZE(p->csp_programs); ++i)
release_firmware(p->csp_programs[i]);
kfree(p);
}
}
/* ------------------------------ */
/*
* open the device exclusively
*/
static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file)
{
struct snd_sb_csp *p = hw->private_data;
return (snd_sb_csp_use(p));
}
/*
* ioctl for hwdep device:
*/
static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_sb_csp *p = hw->private_data;
struct snd_sb_csp_info info;
struct snd_sb_csp_start start_info;
int err;
if (snd_BUG_ON(!p))
return -EINVAL;
if (snd_sb_csp_check_version(p))
return -ENODEV;
switch (cmd) {
/* get information */
case SNDRV_SB_CSP_IOCTL_INFO:
memset(&info, 0, sizeof(info));
*info.codec_name = *p->codec_name;
info.func_nr = p->func_nr;
info.acc_format = p->acc_format;
info.acc_channels = p->acc_channels;
info.acc_width = p->acc_width;
info.acc_rates = p->acc_rates;
info.csp_mode = p->mode;
info.run_channels = p->run_channels;
info.run_width = p->run_width;
info.version = p->version;
info.state = p->running;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
err = -EFAULT;
else
err = 0;
break;
/* load CSP microcode */
case SNDRV_SB_CSP_IOCTL_LOAD_CODE:
err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
-EBUSY : snd_sb_csp_riff_load(p, (struct snd_sb_csp_microcode __user *) arg));
break;
case SNDRV_SB_CSP_IOCTL_UNLOAD_CODE:
err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
-EBUSY : snd_sb_csp_unload(p));
break;
/* change CSP running state */
case SNDRV_SB_CSP_IOCTL_START:
if (copy_from_user(&start_info, (void __user *) arg, sizeof(start_info)))
err = -EFAULT;
else
err = snd_sb_csp_start(p, start_info.sample_width, start_info.channels);
break;
case SNDRV_SB_CSP_IOCTL_STOP:
err = snd_sb_csp_stop(p);
break;
case SNDRV_SB_CSP_IOCTL_PAUSE:
err = snd_sb_csp_pause(p);
break;
case SNDRV_SB_CSP_IOCTL_RESTART:
err = snd_sb_csp_restart(p);
break;
default:
err = -ENOTTY;
break;
}
return err;
}
/*
* close the device
*/
static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file)
{
struct snd_sb_csp *p = hw->private_data;
return (snd_sb_csp_unuse(p));
}
/* ------------------------------ */
/*
* acquire device
*/
static int snd_sb_csp_use(struct snd_sb_csp * p)
{
mutex_lock(&p->access_mutex);
if (p->used) {
mutex_unlock(&p->access_mutex);
return -EAGAIN;
}
p->used++;
mutex_unlock(&p->access_mutex);
return 0;
}
/*
* release device
*/
static int snd_sb_csp_unuse(struct snd_sb_csp * p)
{
mutex_lock(&p->access_mutex);
p->used--;
mutex_unlock(&p->access_mutex);
return 0;
}
/*
* load microcode via ioctl:
* code is user-space pointer
*/
static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
struct snd_sb_csp_microcode __user * mcode)
{
struct snd_sb_csp_mc_header info;
struct device *dev = p->chip->card->dev;
unsigned char __user *data_ptr;
unsigned char __user *data_end;
unsigned short func_nr = 0;
struct riff_header file_h, item_h, code_h;
__le32 item_type;
struct desc_header funcdesc_h;
unsigned long flags;
int err;
if (copy_from_user(&info, mcode, sizeof(info)))
return -EFAULT;
data_ptr = mcode->data;
if (copy_from_user(&file_h, data_ptr, sizeof(file_h)))
return -EFAULT;
if ((le32_to_cpu(file_h.name) != RIFF_HEADER) ||
(le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
dev_dbg(dev, "%s: Invalid RIFF header\n", __func__);
return -EINVAL;
}
data_ptr += sizeof(file_h);
data_end = data_ptr + le32_to_cpu(file_h.len);
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
if (le32_to_cpu(item_type) != CSP__HEADER) {
dev_dbg(dev, "%s: Invalid RIFF file type\n", __func__);
return -EINVAL;
}
data_ptr += sizeof (item_type);
for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) {
if (copy_from_user(&item_h, data_ptr, sizeof(item_h)))
return -EFAULT;
data_ptr += sizeof(item_h);
if (le32_to_cpu(item_h.name) != LIST_HEADER)
continue;
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
switch (le32_to_cpu(item_type)) {
case FUNC_HEADER:
if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h)))
return -EFAULT;
func_nr = le16_to_cpu(funcdesc_h.func_nr);
break;
case CODE_HEADER:
if (func_nr != info.func_req)
break; /* not required function, try next */
data_ptr += sizeof(item_type);
/* destroy QSound mixer element */
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_sb_qsound_destroy(p);
}
/* Clear all flags */
p->running = 0;
p->mode = 0;
/* load microcode blocks */
for (;;) {
if (data_ptr >= data_end)
return -EINVAL;
if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
return -EFAULT;
/* init microcode blocks */
if (le32_to_cpu(code_h.name) != INIT_HEADER)
break;
data_ptr += sizeof(code_h);
err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
SNDRV_SB_CSP_LOAD_INITBLOCK);
if (err)
return err;
data_ptr += le32_to_cpu(code_h.len);
}
/* main microcode block */
if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
return -EFAULT;
if (le32_to_cpu(code_h.name) != MAIN_HEADER) {
dev_dbg(dev, "%s: Missing 'main' microcode\n", __func__);
return -EINVAL;
}
data_ptr += sizeof(code_h);
err = snd_sb_csp_load_user(p, data_ptr,
le32_to_cpu(code_h.len), 0);
if (err)
return err;
/* fill in codec header */
strscpy(p->codec_name, info.codec_name, sizeof(p->codec_name));
p->func_nr = func_nr;
p->mode = le16_to_cpu(funcdesc_h.flags_play_rec);
switch (le16_to_cpu(funcdesc_h.VOC_type)) {
case 0x0001: /* QSound decoder */
if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
if (snd_sb_qsound_build(p) == 0)
/* set QSound flag and clear all other mode flags */
p->mode = SNDRV_SB_CSP_MODE_QSOUND;
}
p->acc_format = 0;
break;
case 0x0006: /* A Law codec */
p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
break;
case 0x0007: /* Mu Law codec */
p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
break;
case 0x0011: /* what Creative thinks is IMA ADPCM codec */
case 0x0200: /* Creative ADPCM codec */
p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
break;
case 201: /* Text 2 Speech decoder */
/* TODO: Text2Speech handling routines */
p->acc_format = 0;
break;
case 0x0202: /* Fast Speech 8 codec */
case 0x0203: /* Fast Speech 10 codec */
p->acc_format = SNDRV_PCM_FMTBIT_SPECIAL;
break;
default: /* other codecs are unsupported */
p->acc_format = p->acc_width = p->acc_rates = 0;
p->mode = 0;
dev_dbg(dev, "%s: Unsupported CSP codec type: 0x%04x\n",
__func__,
le16_to_cpu(funcdesc_h.VOC_type));
return -EINVAL;
}
p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono);
p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit);
p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates);
/* Decouple CSP from IRQ and DMAREQ lines */
spin_lock_irqsave(&p->chip->reg_lock, flags);
set_mode_register(p->chip, 0xfc);
set_mode_register(p->chip, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
/* finished loading successfully */
p->running = SNDRV_SB_CSP_ST_LOADED; /* set LOADED flag */
return 0;
}
}
dev_dbg(dev, "%s: Function #%d not found\n", __func__, info.func_req);
return -EINVAL;
}
/*
* unload CSP microcode
*/
static int snd_sb_csp_unload(struct snd_sb_csp * p)
{
if (p->running & SNDRV_SB_CSP_ST_RUNNING)
return -EBUSY;
if (!(p->running & SNDRV_SB_CSP_ST_LOADED))
return -ENXIO;
/* clear supported formats */
p->acc_format = 0;
p->acc_channels = p->acc_width = p->acc_rates = 0;
/* destroy QSound mixer element */
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_sb_qsound_destroy(p);
}
/* clear all flags */
p->running = 0;
p->mode = 0;
return 0;
}
/*
* send command sequence to DSP
*/
static inline int command_seq(struct snd_sb *chip, const unsigned char *seq, int size)
{
int i;
for (i = 0; i < size; i++) {
if (!snd_sbdsp_command(chip, seq[i]))
return -EIO;
}
return 0;
}
/*
* set CSP codec parameter
*/
static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val)
{
unsigned char dsp_cmd[3];
dsp_cmd[0] = 0x05; /* CSP set codec parameter */
dsp_cmd[1] = val; /* Parameter value */
dsp_cmd[2] = par; /* Parameter */
command_seq(chip, dsp_cmd, 3);
snd_sbdsp_command(chip, 0x03); /* DSP read? */
if (snd_sbdsp_get_byte(chip) != par)
return -EIO;
return 0;
}
/*
* set CSP register
*/
static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val)
{
unsigned char dsp_cmd[3];
dsp_cmd[0] = 0x0e; /* CSP set register */
dsp_cmd[1] = reg; /* CSP Register */
dsp_cmd[2] = val; /* value */
return command_seq(chip, dsp_cmd, 3);
}
/*
* read CSP register
* return < 0 -> error
*/
static int read_register(struct snd_sb *chip, unsigned char reg)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x0f; /* CSP read register */
dsp_cmd[1] = reg; /* CSP Register */
command_seq(chip, dsp_cmd, 2);
return snd_sbdsp_get_byte(chip); /* Read DSP value */
}
/*
* set CSP mode register
*/
static int set_mode_register(struct snd_sb *chip, unsigned char mode)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x04; /* CSP set mode register */
dsp_cmd[1] = mode; /* mode */
return command_seq(chip, dsp_cmd, 2);
}
/*
* Detect CSP
* return 0 if CSP exists.
*/
static int csp_detect(struct snd_sb *chip, int *version)
{
unsigned char csp_test1, csp_test2;
unsigned long flags;
int result = -ENODEV;
spin_lock_irqsave(&chip->reg_lock, flags);
set_codec_parameter(chip, 0x00, 0x00);
set_mode_register(chip, 0xfc); /* 0xfc = ?? */
csp_test1 = read_register(chip, 0x83);
set_register(chip, 0x83, ~csp_test1);
csp_test2 = read_register(chip, 0x83);
if (csp_test2 != (csp_test1 ^ 0xff))
goto __fail;
set_register(chip, 0x83, csp_test1);
csp_test2 = read_register(chip, 0x83);
if (csp_test2 != csp_test1)
goto __fail;
set_mode_register(chip, 0x00); /* 0x00 = ? */
*version = get_version(chip);
snd_sbdsp_reset(chip); /* reset DSP after getversion! */
if (*version >= 0x10 && *version <= 0x1f)
result = 0; /* valid version id */
__fail:
spin_unlock_irqrestore(&chip->reg_lock, flags);
return result;
}
/*
* get CSP version number
*/
static int get_version(struct snd_sb *chip)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x08; /* SB_DSP_!something! */
dsp_cmd[1] = 0x03; /* get chip version id? */
command_seq(chip, dsp_cmd, 2);
return (snd_sbdsp_get_byte(chip));
}
/*
* check if the CSP version is valid
*/
static int snd_sb_csp_check_version(struct snd_sb_csp * p)
{
if (p->version < 0x10 || p->version > 0x1f) {
dev_dbg(p->chip->card->dev,
"%s: Invalid CSP version: 0x%x\n",
__func__, p->version);
return 1;
}
return 0;
}
/*
* download microcode to CSP (microcode should have one "main" block).
*/
static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int size, int load_flags)
{
int status, i;
int err;
int result = -EIO;
unsigned long flags;
spin_lock_irqsave(&p->chip->reg_lock, flags);
snd_sbdsp_command(p->chip, 0x01); /* CSP download command */
if (snd_sbdsp_get_byte(p->chip)) {
dev_dbg(p->chip->card->dev, "%s: Download command failed\n", __func__);
goto __fail;
}
/* Send CSP low byte (size - 1) */
snd_sbdsp_command(p->chip, (unsigned char)(size - 1));
/* Send high byte */
snd_sbdsp_command(p->chip, (unsigned char)((size - 1) >> 8));
/* send microcode sequence */
/* load from kernel space */
while (size--) {
if (!snd_sbdsp_command(p->chip, *buf++))
goto __fail;
}
if (snd_sbdsp_get_byte(p->chip))
goto __fail;
if (load_flags & SNDRV_SB_CSP_LOAD_INITBLOCK) {
i = 0;
/* some codecs (FastSpeech) take some time to initialize */
while (1) {
snd_sbdsp_command(p->chip, 0x03);
status = snd_sbdsp_get_byte(p->chip);
if (status == 0x55 || ++i >= 10)
break;
udelay (10);
}
if (status != 0x55) {
dev_dbg(p->chip->card->dev,
"%s: Microcode initialization failed\n",
__func__);
goto __fail;
}
} else {
/*
* Read mixer register SB_DSP4_DMASETUP after loading 'main' code.
* Start CSP chip if no 16bit DMA channel is set - some kind
* of autorun or perhaps a bugfix?
*/
spin_lock(&p->chip->mixer_lock);
status = snd_sbmixer_read(p->chip, SB_DSP4_DMASETUP);
spin_unlock(&p->chip->mixer_lock);
if (!(status & (SB_DMASETUP_DMA7 | SB_DMASETUP_DMA6 | SB_DMASETUP_DMA5))) {
err = (set_codec_parameter(p->chip, 0xaa, 0x00) ||
set_codec_parameter(p->chip, 0xff, 0x00));
snd_sbdsp_reset(p->chip); /* really! */
if (err)
goto __fail;
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
set_mode_register(p->chip, 0x70); /* 70 = RUN */
}
}
result = 0;
__fail:
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
return result;
}
static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags)
{
int err;
unsigned char *kbuf;
kbuf = memdup_user(buf, size);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
err = snd_sb_csp_load(p, kbuf, size, load_flags);
kfree(kbuf);
return err;
}
static int snd_sb_csp_firmware_load(struct snd_sb_csp *p, int index, int flags)
{
static const char *const names[] = {
"sb16/mulaw_main.csp",
"sb16/alaw_main.csp",
"sb16/ima_adpcm_init.csp",
"sb16/ima_adpcm_playback.csp",
"sb16/ima_adpcm_capture.csp",
};
const struct firmware *program;
BUILD_BUG_ON(ARRAY_SIZE(names) != CSP_PROGRAM_COUNT);
program = p->csp_programs[index];
if (!program) {
int err = request_firmware(&program, names[index],
p->chip->card->dev);
if (err < 0)
return err;
p->csp_programs[index] = program;
}
return snd_sb_csp_load(p, program->data, program->size, flags);
}
/*
* autoload hardware codec if necessary
* return 0 if CSP is loaded and ready to run (p->running != 0)
*/
static int snd_sb_csp_autoload(struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode)
{
unsigned long flags;
int err = 0;
/* if CSP is running or manually loaded then exit */
if (p->running & (SNDRV_SB_CSP_ST_RUNNING | SNDRV_SB_CSP_ST_LOADED))
return -EBUSY;
/* autoload microcode only if requested hardware codec is not already loaded */
if (((1U << (__force int)pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) {
p->running = SNDRV_SB_CSP_ST_AUTO;
} else {
switch (pcm_sfmt) {
case SNDRV_PCM_FORMAT_MU_LAW:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_MULAW, 0);
p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
break;
case SNDRV_PCM_FORMAT_A_LAW:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ALAW, 0);
p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
break;
case SNDRV_PCM_FORMAT_IMA_ADPCM:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ADPCM_INIT,
SNDRV_SB_CSP_LOAD_INITBLOCK);
if (err)
break;
if (play_rec_mode == SNDRV_SB_CSP_MODE_DSP_WRITE) {
err = snd_sb_csp_firmware_load
(p, CSP_PROGRAM_ADPCM_PLAYBACK, 0);
p->mode = SNDRV_SB_CSP_MODE_DSP_WRITE;
} else {
err = snd_sb_csp_firmware_load
(p, CSP_PROGRAM_ADPCM_CAPTURE, 0);
p->mode = SNDRV_SB_CSP_MODE_DSP_READ;
}
p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
break;
default:
/* Decouple CSP from IRQ and DMAREQ lines */
if (p->running & SNDRV_SB_CSP_ST_AUTO) {
spin_lock_irqsave(&p->chip->reg_lock, flags);
set_mode_register(p->chip, 0xfc);
set_mode_register(p->chip, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p->running = 0; /* clear autoloaded flag */
}
return -EINVAL;
}
if (err) {
p->acc_format = 0;
p->acc_channels = p->acc_width = p->acc_rates = 0;
p->running = 0; /* clear autoloaded flag */
p->mode = 0;
return (err);
} else {
p->running = SNDRV_SB_CSP_ST_AUTO; /* set autoloaded flag */
p->acc_width = SNDRV_SB_CSP_SAMPLE_16BIT; /* only 16 bit data */
p->acc_channels = SNDRV_SB_CSP_MONO | SNDRV_SB_CSP_STEREO;
p->acc_rates = SNDRV_SB_CSP_RATE_ALL; /* HW codecs accept all rates */
}
}
return (p->running & SNDRV_SB_CSP_ST_AUTO) ? 0 : -ENXIO;
}
/*
* start CSP
*/
static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels)
{
struct device *dev = p->chip->card->dev;
unsigned char s_type; /* sample type */
unsigned char mixL, mixR;
int result = -EIO;
unsigned long flags;
if (!(p->running & (SNDRV_SB_CSP_ST_LOADED | SNDRV_SB_CSP_ST_AUTO))) {
dev_dbg(dev, "%s: Microcode not loaded\n", __func__);
return -ENXIO;
}
if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
dev_dbg(dev, "%s: CSP already running\n", __func__);
return -EBUSY;
}
if (!(sample_width & p->acc_width)) {
dev_dbg(dev, "%s: Unsupported PCM sample width\n", __func__);
return -EINVAL;
}
if (!(channels & p->acc_channels)) {
dev_dbg(dev, "%s: Invalid number of channels\n", __func__);
return -EINVAL;
}
/* Mute PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
set_mode_register(p->chip, 0x70); /* 70 = RUN */
s_type = 0x00;
if (channels == SNDRV_SB_CSP_MONO)
s_type = 0x11; /* 000n 000n (n = 1 if mono) */
if (sample_width == SNDRV_SB_CSP_SAMPLE_8BIT)
s_type |= 0x22; /* 00dX 00dX (d = 1 if 8 bit samples) */
if (set_codec_parameter(p->chip, 0x81, s_type)) {
dev_dbg(dev, "%s: Set sample type command failed\n", __func__);
goto __fail;
}
if (set_codec_parameter(p->chip, 0x80, 0x00)) {
dev_dbg(dev, "%s: Codec start command failed\n", __func__);
goto __fail;
}
p->run_width = sample_width;
p->run_channels = channels;
p->running |= SNDRV_SB_CSP_ST_RUNNING;
if (p->mode & SNDRV_SB_CSP_MODE_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* enable QSound decoder */
set_codec_parameter(p->chip, 0x00, 0xff);
set_codec_parameter(p->chip, 0x01, 0xff);
p->running |= SNDRV_SB_CSP_ST_QSOUND;
/* set QSound startup value */
snd_sb_csp_qsound_transfer(p);
}
result = 0;
__fail:
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
return result;
}
/*
* stop CSP
*/
static int snd_sb_csp_stop(struct snd_sb_csp * p)
{
int result;
unsigned char mixL, mixR;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
return 0;
/* Mute PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* disable QSound decoder */
set_codec_parameter(p->chip, 0x00, 0x00);
set_codec_parameter(p->chip, 0x01, 0x00);
p->running &= ~SNDRV_SB_CSP_ST_QSOUND;
}
result = set_mode_register(p->chip, 0xc0); /* c0 = STOP */
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
if (!(result))
p->running &= ~(SNDRV_SB_CSP_ST_PAUSED | SNDRV_SB_CSP_ST_RUNNING);
return result;
}
/*
* pause CSP codec and hold DMA transfer
*/
static int snd_sb_csp_pause(struct snd_sb_csp * p)
{
int result;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
return -EBUSY;
spin_lock_irqsave(&p->chip->reg_lock, flags);
result = set_codec_parameter(p->chip, 0x80, 0xff);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
if (!(result))
p->running |= SNDRV_SB_CSP_ST_PAUSED;
return result;
}
/*
* restart CSP codec and resume DMA transfer
*/
static int snd_sb_csp_restart(struct snd_sb_csp * p)
{
int result;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_PAUSED))
return -EBUSY;
spin_lock_irqsave(&p->chip->reg_lock, flags);
result = set_codec_parameter(p->chip, 0x80, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
if (!(result))
p->running &= ~SNDRV_SB_CSP_ST_PAUSED;
return result;
}
/* ------------------------------ */
/*
* QSound mixer control for PCM
*/
#define snd_sb_qsound_switch_info snd_ctl_boolean_mono_info
static int snd_sb_qsound_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = p->q_enabled ? 1 : 0;
return 0;
}
static int snd_sb_qsound_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned char nval;
nval = ucontrol->value.integer.value[0] & 0x01;
spin_lock_irqsave(&p->q_lock, flags);
change = p->q_enabled != nval;
p->q_enabled = nval;
spin_unlock_irqrestore(&p->q_lock, flags);
return change;
}
static int snd_sb_qsound_space_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
return 0;
}
static int snd_sb_qsound_space_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&p->q_lock, flags);
ucontrol->value.integer.value[0] = p->qpos_left;
ucontrol->value.integer.value[1] = p->qpos_right;
spin_unlock_irqrestore(&p->q_lock, flags);
return 0;
}
static int snd_sb_qsound_space_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned char nval1, nval2;
nval1 = ucontrol->value.integer.value[0];
if (nval1 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT)
nval1 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
nval2 = ucontrol->value.integer.value[1];
if (nval2 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT)
nval2 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
spin_lock_irqsave(&p->q_lock, flags);
change = p->qpos_left != nval1 || p->qpos_right != nval2;
p->qpos_left = nval1;
p->qpos_right = nval2;
p->qpos_changed = change;
spin_unlock_irqrestore(&p->q_lock, flags);
return change;
}
static const struct snd_kcontrol_new snd_sb_qsound_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Switch",
.info = snd_sb_qsound_switch_info,
.get = snd_sb_qsound_switch_get,
.put = snd_sb_qsound_switch_put
};
static const struct snd_kcontrol_new snd_sb_qsound_space = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Space",
.info = snd_sb_qsound_space_info,
.get = snd_sb_qsound_space_get,
.put = snd_sb_qsound_space_put
};
static int snd_sb_qsound_build(struct snd_sb_csp * p)
{
struct snd_card *card;
struct snd_kcontrol *kctl;
int err;
if (snd_BUG_ON(!p))
return -EINVAL;
card = p->chip->card;
p->qpos_left = p->qpos_right = SNDRV_SB_CSP_QSOUND_MAX_RIGHT / 2;
p->qpos_changed = 0;
spin_lock_init(&p->q_lock);
kctl = snd_ctl_new1(&snd_sb_qsound_switch, p);
err = snd_ctl_add(card, kctl);
if (err < 0)
goto __error;
p->qsound_switch = kctl;
kctl = snd_ctl_new1(&snd_sb_qsound_space, p);
err = snd_ctl_add(card, kctl);
if (err < 0)
goto __error;
p->qsound_space = kctl;
return 0;
__error:
snd_sb_qsound_destroy(p);
return err;
}
static void snd_sb_qsound_destroy(struct snd_sb_csp * p)
{
struct snd_card *card;
unsigned long flags;
if (snd_BUG_ON(!p))
return;
card = p->chip->card;
snd_ctl_remove(card, p->qsound_switch);
p->qsound_switch = NULL;
snd_ctl_remove(card, p->qsound_space);
p->qsound_space = NULL;
/* cancel pending transfer of QSound parameters */
spin_lock_irqsave (&p->q_lock, flags);
p->qpos_changed = 0;
spin_unlock_irqrestore (&p->q_lock, flags);
}
/*
* Transfer qsound parameters to CSP,
* function should be called from interrupt routine
*/
static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p)
{
int err = -ENXIO;
spin_lock(&p->q_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* left channel */
set_codec_parameter(p->chip, 0x00, p->qpos_left);
set_codec_parameter(p->chip, 0x02, 0x00);
/* right channel */
set_codec_parameter(p->chip, 0x00, p->qpos_right);
set_codec_parameter(p->chip, 0x03, 0x00);
err = 0;
}
p->qpos_changed = 0;
spin_unlock(&p->q_lock);
return err;
}
/* ------------------------------ */
/*
* proc interface
*/
static int init_proc_entry(struct snd_sb_csp * p, int device)
{
char name[16];
sprintf(name, "cspD%d", device);
snd_card_ro_proc_new(p->chip->card, name, p, info_read);
return 0;
}
static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_sb_csp *p = entry->private_data;
snd_iprintf(buffer, "Creative Signal Processor [v%d.%d]\n", (p->version >> 4), (p->version & 0x0f));
snd_iprintf(buffer, "State: %cx%c%c%c\n", ((p->running & SNDRV_SB_CSP_ST_QSOUND) ? 'Q' : '-'),
((p->running & SNDRV_SB_CSP_ST_PAUSED) ? 'P' : '-'),
((p->running & SNDRV_SB_CSP_ST_RUNNING) ? 'R' : '-'),
((p->running & SNDRV_SB_CSP_ST_LOADED) ? 'L' : '-'));
if (p->running & SNDRV_SB_CSP_ST_LOADED) {
snd_iprintf(buffer, "Codec: %s [func #%d]\n", p->codec_name, p->func_nr);
snd_iprintf(buffer, "Sample rates: ");
if (p->acc_rates == SNDRV_SB_CSP_RATE_ALL) {
snd_iprintf(buffer, "All\n");
} else {
snd_iprintf(buffer, "%s%s%s%s\n",
((p->acc_rates & SNDRV_SB_CSP_RATE_8000) ? "8000Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_11025) ? "11025Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_22050) ? "22050Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_44100) ? "44100Hz" : ""));
}
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_iprintf(buffer, "QSound decoder %sabled\n",
p->q_enabled ? "en" : "dis");
} else {
snd_iprintf(buffer, "PCM format ID: 0x%x (%s/%s) [%s/%s] [%s/%s]\n",
p->acc_format,
((p->acc_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? "16bit" : "-"),
((p->acc_width & SNDRV_SB_CSP_SAMPLE_8BIT) ? "8bit" : "-"),
((p->acc_channels & SNDRV_SB_CSP_MONO) ? "mono" : "-"),
((p->acc_channels & SNDRV_SB_CSP_STEREO) ? "stereo" : "-"),
((p->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) ? "playback" : "-"),
((p->mode & SNDRV_SB_CSP_MODE_DSP_READ) ? "capture" : "-"));
}
}
if (p->running & SNDRV_SB_CSP_ST_AUTO) {
snd_iprintf(buffer, "Autoloaded Mu-Law, A-Law or Ima-ADPCM hardware codec\n");
}
if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
snd_iprintf(buffer, "Processing %dbit %s PCM samples\n",
((p->run_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? 16 : 8),
((p->run_channels & SNDRV_SB_CSP_MONO) ? "mono" : "stereo"));
}
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
snd_iprintf(buffer, "Qsound position: left = 0x%x, right = 0x%x\n",
p->qpos_left, p->qpos_right);
}
}
/* */
EXPORT_SYMBOL(snd_sb_csp_new);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/regulator/lp872x.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
/* Registers : LP8720/8725 shared */
#define LP872X_GENERAL_CFG 0x00
#define LP872X_LDO1_VOUT 0x01
#define LP872X_LDO2_VOUT 0x02
#define LP872X_LDO3_VOUT 0x03
#define LP872X_LDO4_VOUT 0x04
#define LP872X_LDO5_VOUT 0x05
/* Registers : LP8720 */
#define LP8720_BUCK_VOUT1 0x06
#define LP8720_BUCK_VOUT2 0x07
#define LP8720_ENABLE 0x08
/* Registers : LP8725 */
#define LP8725_LILO1_VOUT 0x06
#define LP8725_LILO2_VOUT 0x07
#define LP8725_BUCK1_VOUT1 0x08
#define LP8725_BUCK1_VOUT2 0x09
#define LP8725_BUCK2_VOUT1 0x0A
#define LP8725_BUCK2_VOUT2 0x0B
#define LP8725_BUCK_CTRL 0x0C
#define LP8725_LDO_CTRL 0x0D
/* Mask/shift : LP8720/LP8725 shared */
#define LP872X_VOUT_M 0x1F
#define LP872X_START_DELAY_M 0xE0
#define LP872X_START_DELAY_S 5
#define LP872X_EN_LDO1_M BIT(0)
#define LP872X_EN_LDO2_M BIT(1)
#define LP872X_EN_LDO3_M BIT(2)
#define LP872X_EN_LDO4_M BIT(3)
#define LP872X_EN_LDO5_M BIT(4)
/* Mask/shift : LP8720 */
#define LP8720_TIMESTEP_S 0 /* Addr 00h */
#define LP8720_TIMESTEP_M BIT(0)
#define LP8720_EXT_DVS_M BIT(2)
#define LP8720_BUCK_FPWM_S 5 /* Addr 07h */
#define LP8720_BUCK_FPWM_M BIT(5)
#define LP8720_EN_BUCK_M BIT(5) /* Addr 08h */
#define LP8720_DVS_SEL_M BIT(7)
/* Mask/shift : LP8725 */
#define LP8725_TIMESTEP_M 0xC0 /* Addr 00h */
#define LP8725_TIMESTEP_S 6
#define LP8725_BUCK1_EN_M BIT(0)
#define LP8725_DVS1_M BIT(2)
#define LP8725_DVS2_M BIT(3)
#define LP8725_BUCK2_EN_M BIT(4)
#define LP8725_BUCK_CL_M 0xC0 /* Addr 09h, 0Bh */
#define LP8725_BUCK_CL_S 6
#define LP8725_BUCK1_FPWM_S 1 /* Addr 0Ch */
#define LP8725_BUCK1_FPWM_M BIT(1)
#define LP8725_BUCK2_FPWM_S 5
#define LP8725_BUCK2_FPWM_M BIT(5)
#define LP8725_EN_LILO1_M BIT(5) /* Addr 0Dh */
#define LP8725_EN_LILO2_M BIT(6)
/* PWM mode */
#define LP872X_FORCE_PWM 1
#define LP872X_AUTO_PWM 0
#define LP8720_NUM_REGULATORS 6
#define LP8725_NUM_REGULATORS 9
#define EXTERN_DVS_USED 0
#define MAX_DELAY 6
/* Default DVS Mode */
#define LP8720_DEFAULT_DVS 0
#define LP8725_DEFAULT_DVS BIT(2)
/* dump registers in regmap-debugfs */
#define MAX_REGISTERS 0x0F
enum lp872x_id {
LP8720,
LP8725,
};
struct lp872x {
struct regmap *regmap;
struct device *dev;
enum lp872x_id chipid;
struct lp872x_platform_data *pdata;
int num_regulators;
enum gpiod_flags dvs_pin;
};
/* LP8720/LP8725 shared voltage table for LDOs */
static const unsigned int lp872x_ldo_vtbl[] = {
1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 2000000,
2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2650000, 2700000,
2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3100000, 3300000,
};
/* LP8720 LDO4 voltage table */
static const unsigned int lp8720_ldo4_vtbl[] = {
800000, 850000, 900000, 1000000, 1100000, 1200000, 1250000, 1300000,
1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000,
1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
2400000, 2500000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000,
};
/* LP8725 LILO(Low Input Low Output) voltage table */
static const unsigned int lp8725_lilo_vtbl[] = {
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
};
/* LP8720 BUCK voltage table */
#define EXT_R 0 /* external resistor divider */
static const unsigned int lp8720_buck_vtbl[] = {
EXT_R, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000,
};
/* LP8725 BUCK voltage table */
static const unsigned int lp8725_buck_vtbl[] = {
800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
2400000, 2500000, 2600000, 2700000, 2800000, 2850000, 2900000, 3000000,
};
/* LP8725 BUCK current limit */
static const unsigned int lp8725_buck_uA[] = {
460000, 780000, 1050000, 1370000,
};
static int lp872x_read_byte(struct lp872x *lp, u8 addr, u8 *data)
{
int ret;
unsigned int val;
ret = regmap_read(lp->regmap, addr, &val);
if (ret < 0) {
dev_err(lp->dev, "failed to read 0x%.2x\n", addr);
return ret;
}
*data = (u8)val;
return 0;
}
static inline int lp872x_write_byte(struct lp872x *lp, u8 addr, u8 data)
{
return regmap_write(lp->regmap, addr, data);
}
static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
unsigned int mask, u8 data)
{
return regmap_update_bits(lp->regmap, addr, mask, data);
}
static int lp872x_get_timestep_usec(struct lp872x *lp)
{
enum lp872x_id chip = lp->chipid;
u8 val, mask, shift;
int *time_usec, size, ret;
int lp8720_time_usec[] = { 25, 50 };
int lp8725_time_usec[] = { 32, 64, 128, 256 };
switch (chip) {
case LP8720:
mask = LP8720_TIMESTEP_M;
shift = LP8720_TIMESTEP_S;
time_usec = &lp8720_time_usec[0];
size = ARRAY_SIZE(lp8720_time_usec);
break;
case LP8725:
mask = LP8725_TIMESTEP_M;
shift = LP8725_TIMESTEP_S;
time_usec = &lp8725_time_usec[0];
size = ARRAY_SIZE(lp8725_time_usec);
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
if (ret)
return ret;
val = (val & mask) >> shift;
if (val >= size)
return -EINVAL;
return *(time_usec + val);
}
static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id rid = rdev_get_id(rdev);
int time_step_us = lp872x_get_timestep_usec(lp);
int ret;
u8 addr, val;
if (time_step_us < 0)
return time_step_us;
switch (rid) {
case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
addr = LP872X_LDO1_VOUT + rid;
break;
case LP8725_ID_LDO1 ... LP8725_ID_BUCK1:
addr = LP872X_LDO1_VOUT + rid - LP8725_ID_BASE;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK2_VOUT1;
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
val = (val & LP872X_START_DELAY_M) >> LP872X_START_DELAY_S;
return val > MAX_DELAY ? 0 : val * time_step_us;
}
static void lp872x_set_dvs(struct lp872x *lp, enum lp872x_dvs_sel dvs_sel,
struct gpio_desc *gpio)
{
enum gpiod_flags state;
state = dvs_sel == SEL_V1 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
gpiod_set_value(gpio, state);
lp->dvs_pin = state;
}
static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
enum lp872x_regulator_id buck)
{
u8 val, addr;
if (lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val))
return 0;
switch (buck) {
case LP8720_ID_BUCK:
if (val & LP8720_EXT_DVS_M) {
addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
} else {
if (lp872x_read_byte(lp, LP8720_ENABLE, &val))
return 0;
addr = val & LP8720_DVS_SEL_M ?
LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
}
break;
case LP8725_ID_BUCK1:
if (val & LP8725_DVS1_M)
addr = LP8725_BUCK1_VOUT1;
else
addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2;
break;
case LP8725_ID_BUCK2:
addr = val & LP8725_DVS2_M ?
LP8725_BUCK2_VOUT1 : LP8725_BUCK2_VOUT2;
break;
default:
return 0;
}
return addr;
}
static bool lp872x_is_valid_buck_addr(u8 addr)
{
switch (addr) {
case LP8720_BUCK_VOUT1:
case LP8720_BUCK_VOUT2:
case LP8725_BUCK1_VOUT1:
case LP8725_BUCK1_VOUT2:
case LP8725_BUCK2_VOUT1:
case LP8725_BUCK2_VOUT2:
return true;
default:
return false;
}
}
static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask = LP872X_VOUT_M;
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
if (dvs && dvs->gpio)
lp872x_set_dvs(lp, dvs->vsel, dvs->gpio);
addr = lp872x_select_buck_vout_addr(lp, buck);
if (!lp872x_is_valid_buck_addr(addr))
return -EINVAL;
return lp872x_update_bits(lp, addr, mask, selector);
}
static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, val;
int ret;
addr = lp872x_select_buck_vout_addr(lp, buck);
if (!lp872x_is_valid_buck_addr(addr))
return -EINVAL;
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
return val & LP872X_VOUT_M;
}
static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask, shift, val;
switch (buck) {
case LP8720_ID_BUCK:
addr = LP8720_BUCK_VOUT2;
mask = LP8720_BUCK_FPWM_M;
shift = LP8720_BUCK_FPWM_S;
break;
case LP8725_ID_BUCK1:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK1_FPWM_M;
shift = LP8725_BUCK1_FPWM_S;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK2_FPWM_M;
shift = LP8725_BUCK2_FPWM_S;
break;
default:
return -EINVAL;
}
if (mode == REGULATOR_MODE_FAST)
val = LP872X_FORCE_PWM << shift;
else if (mode == REGULATOR_MODE_NORMAL)
val = LP872X_AUTO_PWM << shift;
else
return -EINVAL;
return lp872x_update_bits(lp, addr, mask, val);
}
static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
enum lp872x_regulator_id buck = rdev_get_id(rdev);
u8 addr, mask, val;
int ret;
switch (buck) {
case LP8720_ID_BUCK:
addr = LP8720_BUCK_VOUT2;
mask = LP8720_BUCK_FPWM_M;
break;
case LP8725_ID_BUCK1:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK1_FPWM_M;
break;
case LP8725_ID_BUCK2:
addr = LP8725_BUCK_CTRL;
mask = LP8725_BUCK2_FPWM_M;
break;
default:
return -EINVAL;
}
ret = lp872x_read_byte(lp, addr, &val);
if (ret)
return ret;
return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
static const struct regulator_ops lp872x_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
};
static const struct regulator_ops lp8720_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
};
static const struct regulator_ops lp8725_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
.set_current_limit = regulator_set_current_limit_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_desc lp8720_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
.id = LP8720_ID_LDO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO1_M,
},
{
.name = "ldo2",
.of_match = of_match_ptr("ldo2"),
.id = LP8720_ID_LDO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO2_M,
},
{
.name = "ldo3",
.of_match = of_match_ptr("ldo3"),
.id = LP8720_ID_LDO3,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO3_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO3_M,
},
{
.name = "ldo4",
.of_match = of_match_ptr("ldo4"),
.id = LP8720_ID_LDO4,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8720_ldo4_vtbl),
.volt_table = lp8720_ldo4_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO4_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO4_M,
},
{
.name = "ldo5",
.of_match = of_match_ptr("ldo5"),
.id = LP8720_ID_LDO5,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO5_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP872X_EN_LDO5_M,
},
{
.name = "buck",
.of_match = of_match_ptr("buck"),
.id = LP8720_ID_BUCK,
.ops = &lp8720_buck_ops,
.n_voltages = ARRAY_SIZE(lp8720_buck_vtbl),
.volt_table = lp8720_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8720_ENABLE,
.enable_mask = LP8720_EN_BUCK_M,
},
};
static const struct regulator_desc lp8725_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
.id = LP8725_ID_LDO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO1_M,
},
{
.name = "ldo2",
.of_match = of_match_ptr("ldo2"),
.id = LP8725_ID_LDO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO2_M,
},
{
.name = "ldo3",
.of_match = of_match_ptr("ldo3"),
.id = LP8725_ID_LDO3,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO3_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO3_M,
},
{
.name = "ldo4",
.of_match = of_match_ptr("ldo4"),
.id = LP8725_ID_LDO4,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO4_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO4_M,
},
{
.name = "ldo5",
.of_match = of_match_ptr("ldo5"),
.id = LP8725_ID_LDO5,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
.volt_table = lp872x_ldo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP872X_LDO5_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP872X_EN_LDO5_M,
},
{
.name = "lilo1",
.of_match = of_match_ptr("lilo1"),
.id = LP8725_ID_LILO1,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
.volt_table = lp8725_lilo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8725_LILO1_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP8725_EN_LILO1_M,
},
{
.name = "lilo2",
.of_match = of_match_ptr("lilo2"),
.id = LP8725_ID_LILO2,
.ops = &lp872x_ldo_ops,
.n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
.volt_table = lp8725_lilo_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8725_LILO2_VOUT,
.vsel_mask = LP872X_VOUT_M,
.enable_reg = LP8725_LDO_CTRL,
.enable_mask = LP8725_EN_LILO2_M,
},
{
.name = "buck1",
.of_match = of_match_ptr("buck1"),
.id = LP8725_ID_BUCK1,
.ops = &lp8725_buck_ops,
.n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
.volt_table = lp8725_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK1_EN_M,
.curr_table = lp8725_buck_uA,
.n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
.csel_reg = LP8725_BUCK1_VOUT2,
.csel_mask = LP8725_BUCK_CL_M,
},
{
.name = "buck2",
.of_match = of_match_ptr("buck2"),
.id = LP8725_ID_BUCK2,
.ops = &lp8725_buck_ops,
.n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
.volt_table = lp8725_buck_vtbl,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK2_EN_M,
.curr_table = lp8725_buck_uA,
.n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
.csel_reg = LP8725_BUCK2_VOUT2,
.csel_mask = LP8725_BUCK_CL_M,
},
};
static int lp872x_init_dvs(struct lp872x *lp)
{
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
enum gpiod_flags pinstate;
u8 mask[] = { LP8720_EXT_DVS_M, LP8725_DVS1_M | LP8725_DVS2_M };
u8 default_dvs_mode[] = { LP8720_DEFAULT_DVS, LP8725_DEFAULT_DVS };
if (!dvs)
goto set_default_dvs_mode;
if (!dvs->gpio)
goto set_default_dvs_mode;
pinstate = dvs->init_state;
dvs->gpio = devm_gpiod_get_optional(lp->dev, "ti,dvs", pinstate);
if (IS_ERR(dvs->gpio)) {
dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(dvs->gpio));
return PTR_ERR(dvs->gpio);
}
lp->dvs_pin = pinstate;
return 0;
set_default_dvs_mode:
return lp872x_update_bits(lp, LP872X_GENERAL_CFG, mask[lp->chipid],
default_dvs_mode[lp->chipid]);
}
static int lp872x_hw_enable(struct lp872x *lp)
{
if (!lp->pdata)
return -EINVAL;
if (!lp->pdata->enable_gpio)
return 0;
/* Always set enable GPIO high. */
lp->pdata->enable_gpio = devm_gpiod_get_optional(lp->dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(lp->pdata->enable_gpio)) {
dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(lp->pdata->enable_gpio));
return PTR_ERR(lp->pdata->enable_gpio);
}
/* Each chip has a different enable delay. */
if (lp->chipid == LP8720)
usleep_range(LP8720_ENABLE_DELAY, 1.5 * LP8720_ENABLE_DELAY);
else
usleep_range(LP8725_ENABLE_DELAY, 1.5 * LP8725_ENABLE_DELAY);
return 0;
}
static int lp872x_config(struct lp872x *lp)
{
struct lp872x_platform_data *pdata = lp->pdata;
int ret;
if (!pdata || !pdata->update_config)
goto init_dvs;
ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config);
if (ret)
return ret;
init_dvs:
return lp872x_init_dvs(lp);
}
static struct regulator_init_data
*lp872x_find_regulator_init_data(int id, struct lp872x *lp)
{
struct lp872x_platform_data *pdata = lp->pdata;
int i;
if (!pdata)
return NULL;
for (i = 0; i < lp->num_regulators; i++) {
if (pdata->regulator_data[i].id == id)
return pdata->regulator_data[i].init_data;
}
return NULL;
}
static int lp872x_regulator_register(struct lp872x *lp)
{
const struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int i;
for (i = 0; i < lp->num_regulators; i++) {
desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
&lp8725_regulator_desc[i];
cfg.dev = lp->dev;
cfg.init_data = lp872x_find_regulator_init_data(desc->id, lp);
cfg.driver_data = lp;
cfg.regmap = lp->regmap;
rdev = devm_regulator_register(lp->dev, desc, &cfg);
if (IS_ERR(rdev)) {
dev_err(lp->dev, "regulator register err");
return PTR_ERR(rdev);
}
}
return 0;
}
static const struct regmap_config lp872x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX_REGISTERS,
};
#ifdef CONFIG_OF
#define LP872X_VALID_OPMODE (REGULATOR_MODE_FAST | REGULATOR_MODE_NORMAL)
static struct of_regulator_match lp8720_matches[] = {
{ .name = "ldo1", .driver_data = (void *)LP8720_ID_LDO1, },
{ .name = "ldo2", .driver_data = (void *)LP8720_ID_LDO2, },
{ .name = "ldo3", .driver_data = (void *)LP8720_ID_LDO3, },
{ .name = "ldo4", .driver_data = (void *)LP8720_ID_LDO4, },
{ .name = "ldo5", .driver_data = (void *)LP8720_ID_LDO5, },
{ .name = "buck", .driver_data = (void *)LP8720_ID_BUCK, },
};
static struct of_regulator_match lp8725_matches[] = {
{ .name = "ldo1", .driver_data = (void *)LP8725_ID_LDO1, },
{ .name = "ldo2", .driver_data = (void *)LP8725_ID_LDO2, },
{ .name = "ldo3", .driver_data = (void *)LP8725_ID_LDO3, },
{ .name = "ldo4", .driver_data = (void *)LP8725_ID_LDO4, },
{ .name = "ldo5", .driver_data = (void *)LP8725_ID_LDO5, },
{ .name = "lilo1", .driver_data = (void *)LP8725_ID_LILO1, },
{ .name = "lilo2", .driver_data = (void *)LP8725_ID_LILO2, },
{ .name = "buck1", .driver_data = (void *)LP8725_ID_BUCK1, },
{ .name = "buck2", .driver_data = (void *)LP8725_ID_BUCK2, },
};
static struct lp872x_platform_data
*lp872x_populate_pdata_from_dt(struct device *dev, enum lp872x_id which)
{
struct device_node *np = dev->of_node;
struct lp872x_platform_data *pdata;
struct of_regulator_match *match;
int num_matches;
int count;
int i;
u8 dvs_state;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
of_property_read_u8(np, "ti,general-config", &pdata->general_config);
pdata->update_config = of_property_read_bool(np, "ti,update-config");
pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL);
if (!pdata->dvs)
return ERR_PTR(-ENOMEM);
of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
of_property_read_u8(np, "ti,dvs-state", &dvs_state);
pdata->dvs->init_state = dvs_state ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
if (of_get_child_count(np) == 0)
goto out;
switch (which) {
case LP8720:
match = lp8720_matches;
num_matches = ARRAY_SIZE(lp8720_matches);
break;
case LP8725:
match = lp8725_matches;
num_matches = ARRAY_SIZE(lp8725_matches);
break;
default:
goto out;
}
count = of_regulator_match(dev, np, match, num_matches);
if (count <= 0)
goto out;
for (i = 0; i < num_matches; i++) {
pdata->regulator_data[i].id =
(uintptr_t)match[i].driver_data;
pdata->regulator_data[i].init_data = match[i].init_data;
}
out:
return pdata;
}
#else
static struct lp872x_platform_data
*lp872x_populate_pdata_from_dt(struct device *dev, enum lp872x_id which)
{
return NULL;
}
#endif
static int lp872x_probe(struct i2c_client *cl)
{
const struct i2c_device_id *id = i2c_client_get_device_id(cl);
struct lp872x *lp;
struct lp872x_platform_data *pdata;
int ret;
static const int lp872x_num_regulators[] = {
[LP8720] = LP8720_NUM_REGULATORS,
[LP8725] = LP8725_NUM_REGULATORS,
};
if (cl->dev.of_node) {
pdata = lp872x_populate_pdata_from_dt(&cl->dev,
(enum lp872x_id)id->driver_data);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
} else {
pdata = dev_get_platdata(&cl->dev);
}
lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
if (!lp)
return -ENOMEM;
lp->num_regulators = lp872x_num_regulators[id->driver_data];
lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
if (IS_ERR(lp->regmap)) {
ret = PTR_ERR(lp->regmap);
dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
return ret;
}
lp->dev = &cl->dev;
lp->pdata = pdata;
lp->chipid = id->driver_data;
i2c_set_clientdata(cl, lp);
ret = lp872x_hw_enable(lp);
if (ret)
return ret;
ret = lp872x_config(lp);
if (ret)
return ret;
return lp872x_regulator_register(lp);
}
static const struct of_device_id lp872x_dt_ids[] __maybe_unused = {
{ .compatible = "ti,lp8720", },
{ .compatible = "ti,lp8725", },
{ }
};
MODULE_DEVICE_TABLE(of, lp872x_dt_ids);
static const struct i2c_device_id lp872x_ids[] = {
{"lp8720", LP8720},
{"lp8725", LP8725},
{ }
};
MODULE_DEVICE_TABLE(i2c, lp872x_ids);
static struct i2c_driver lp872x_driver = {
.driver = {
.name = "lp872x",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
.probe = lp872x_probe,
.id_table = lp872x_ids,
};
module_i2c_driver(lp872x_driver);
MODULE_DESCRIPTION("TI/National Semiconductor LP872x PMU Regulator Driver");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2018 Theobroma Systems Design und Consulting GmbH
*/
/dts-v1/;
#include "rk3368-lion.dtsi"
/ {
model = "Theobroma Systems RK3368-uQ7 Baseboard";
compatible = "tsd,rk3368-lion-haikou", "rockchip,rk3368";
aliases {
mmc1 = &sdmmc;
};
chosen {
stdout-path = "serial0:115200n8";
};
i2cmux2 {
i2c@0 {
eeprom: eeprom@50 {
compatible = "atmel,24c01";
pagesize = <8>;
reg = <0x50>;
};
};
};
leds {
pinctrl-0 = <&module_led_pins>, <&sd_card_led_pin>;
sd_card_led: led-3 {
label = "sd_card_led";
gpios = <&gpio0 RK_PD2 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc0";
};
};
dc_12v: regulator-dc-12v {
compatible = "regulator-fixed";
regulator-name = "dc_12v";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
};
vcc3v3_baseboard: regulator-vcc3v3-baseboard {
compatible = "regulator-fixed";
regulator-name = "vcc3v3_baseboard";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&dc_12v>;
};
vcc5v0_otg: regulator-vcc5v0-otg {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&otg_vbus_drv>;
regulator-name = "vcc5v0_otg";
regulator-always-on;
};
};
&sdmmc {
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
cd-gpios = <&gpio2 RK_PB3 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <25000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
rockchip,default-sample-phase = <90>;
vmmc-supply = <&vcc3v3_baseboard>;
status = "okay";
};
&spi2 {
cs-gpios = <0>, <&gpio2 RK_PC3 GPIO_ACTIVE_LOW>;
status = "okay";
};
&usb_otg {
dr_mode = "otg";
status = "okay";
};
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
status = "okay";
};
&uart1 {
/* alternate function of GPIO5/6 */
status = "disabled";
};
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&haikou_pin_hog>;
hog {
haikou_pin_hog: haikou-pin-hog {
rockchip,pins =
/* LID_BTN */
<3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
/* BATLOW# */
<0 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>,
/* SLP_BTN# */
<3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
/* BIOS_DISABLE# */
<3 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
sd_card_led_pin: sd-card-led-pin {
rockchip,pins =
<0 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_cd_pin: sdmmc-cd-pin {
rockchip,pins =
<2 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_otg {
otg_vbus_drv: otg-vbus-drv {
rockchip,pins =
<0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE 802.1Q Multiple Registration Protocol (MRP)
*
* Copyright (c) 2012 Massachusetts Institute of Technology
*
* Adapted from code in net/802/garp.c
* Copyright (c) 2008 Patrick McHardy <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/mrp.h>
#include <linux/unaligned.h>
static unsigned int mrp_join_time __read_mostly = 200;
module_param(mrp_join_time, uint, 0644);
MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
static unsigned int mrp_periodic_time __read_mostly = 1000;
module_param(mrp_periodic_time, uint, 0644);
MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
MODULE_DESCRIPTION("IEEE 802.1Q Multiple Registration Protocol (MRP)");
MODULE_LICENSE("GPL");
static const u8
mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
[MRP_APPLICANT_VO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
},
[MRP_APPLICANT_VP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_AA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
},
[MRP_APPLICANT_VN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_AN,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
},
[MRP_APPLICANT_AN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
},
[MRP_APPLICANT_AA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_QA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_LA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
},
[MRP_APPLICANT_AO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_AO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
},
[MRP_APPLICANT_QO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
},
[MRP_APPLICANT_AP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
[MRP_APPLICANT_QP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QP,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
};
static const u8
mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
[MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
[MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
[MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
};
static void mrp_attrvalue_inc(void *value, u8 len)
{
u8 *v = (u8 *)value;
/* Add 1 to the last byte. If it becomes zero,
* go to the previous byte and repeat.
*/
while (len > 0 && !++v[--len])
;
}
static int mrp_attr_cmp(const struct mrp_attr *attr,
const void *value, u8 len, u8 type)
{
if (attr->type != type)
return attr->type - type;
if (attr->len != len)
return attr->len - len;
return memcmp(attr->value, value, len);
}
static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (parent) {
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
parent = parent->rb_left;
else if (d < 0)
parent = parent->rb_right;
else
return attr;
}
return NULL;
}
static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (*p) {
parent = *p;
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
p = &parent->rb_left;
else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
return attr;
}
}
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
attr->state = MRP_APPLICANT_VO;
attr->type = type;
attr->len = len;
memcpy(attr->value, value, len);
rb_link_node(&attr->node, parent, p);
rb_insert_color(&attr->node, &app->mad);
return attr;
}
static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
{
rb_erase(&attr->node, &app->mad);
kfree(attr);
}
static void mrp_attr_destroy_all(struct mrp_applicant *app)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_destroy(app, attr);
}
}
static int mrp_pdu_init(struct mrp_applicant *app)
{
struct sk_buff *skb;
struct mrp_pdu_hdr *ph;
skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->dev = app->dev;
skb->protocol = app->app->pkttype.type;
skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
ph = __skb_put(skb, sizeof(*ph));
ph->version = app->app->version;
app->pdu = skb;
return 0;
}
static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
{
__be16 *endmark;
if (skb_tailroom(app->pdu) < sizeof(*endmark))
return -1;
endmark = __skb_put(app->pdu, sizeof(*endmark));
put_unaligned(MRP_END_MARK, endmark);
return 0;
}
static void mrp_pdu_queue(struct mrp_applicant *app)
{
if (!app->pdu)
return;
if (mrp_cb(app->pdu)->mh)
mrp_pdu_append_end_mark(app);
mrp_pdu_append_end_mark(app);
dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
app->app->group_address, app->dev->dev_addr,
app->pdu->len);
skb_queue_tail(&app->queue, app->pdu);
app->pdu = NULL;
}
static void mrp_queue_xmit(struct mrp_applicant *app)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&app->queue)))
dev_queue_xmit(skb);
}
static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
u8 attrtype, u8 attrlen)
{
struct mrp_msg_hdr *mh;
if (mrp_cb(app->pdu)->mh) {
if (mrp_pdu_append_end_mark(app) < 0)
return -1;
mrp_cb(app->pdu)->mh = NULL;
mrp_cb(app->pdu)->vah = NULL;
}
if (skb_tailroom(app->pdu) < sizeof(*mh))
return -1;
mh = __skb_put(app->pdu, sizeof(*mh));
mh->attrtype = attrtype;
mh->attrlen = attrlen;
mrp_cb(app->pdu)->mh = mh;
return 0;
}
static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
const void *firstattrvalue, u8 attrlen)
{
struct mrp_vecattr_hdr *vah;
if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
return -1;
vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
put_unaligned(0, &vah->lenflags);
memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
mrp_cb(app->pdu)->vah = vah;
memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
return 0;
}
static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
const struct mrp_attr *attr,
enum mrp_vecattr_event vaevent)
{
u16 len, pos;
u8 *vaevents;
int err;
again:
if (!app->pdu) {
err = mrp_pdu_init(app);
if (err < 0)
return err;
}
/* If there is no Message header in the PDU, or the Message header is
* for a different attribute type, add an EndMark (if necessary) and a
* new Message header to the PDU.
*/
if (!mrp_cb(app->pdu)->mh ||
mrp_cb(app->pdu)->mh->attrtype != attr->type ||
mrp_cb(app->pdu)->mh->attrlen != attr->len) {
if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
goto queue;
}
/* If there is no VectorAttribute header for this Message in the PDU,
* or this attribute's value does not sequentially follow the previous
* attribute's value, add a new VectorAttribute header to the PDU.
*/
if (!mrp_cb(app->pdu)->vah ||
memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
goto queue;
}
len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
pos = len % 3;
/* Events are packed into Vectors in the PDU, three to a byte. Add a
* byte to the end of the Vector if necessary.
*/
if (!pos) {
if (skb_tailroom(app->pdu) < sizeof(u8))
goto queue;
vaevents = __skb_put(app->pdu, sizeof(u8));
} else {
vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
}
switch (pos) {
case 0:
*vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
break;
case 1:
*vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
break;
case 2:
*vaevents += vaevent;
break;
default:
WARN_ON(1);
}
/* Increment the length of the VectorAttribute in the PDU, as well as
* the value of the next attribute that would continue its Vector.
*/
put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
return 0;
queue:
mrp_pdu_queue(app);
goto again;
}
static void mrp_attr_event(struct mrp_applicant *app,
struct mrp_attr *attr, enum mrp_event event)
{
enum mrp_applicant_state state;
state = mrp_applicant_state_table[attr->state][event];
if (state == MRP_APPLICANT_INVALID) {
WARN_ON(1);
return;
}
if (event == MRP_EVENT_TX) {
/* When appending the attribute fails, don't update its state
* in order to retry at the next TX event.
*/
switch (mrp_tx_action_table[attr->state]) {
case MRP_TX_ACTION_NONE:
case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
case MRP_TX_ACTION_S_IN_OPTIONAL:
break;
case MRP_TX_ACTION_S_NEW:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_NEW) < 0)
return;
break;
case MRP_TX_ACTION_S_JOIN_IN:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
return;
break;
case MRP_TX_ACTION_S_LV:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_LV) < 0)
return;
/* As a pure applicant, sending a leave message
* implies that the attribute was unregistered and
* can be destroyed.
*/
mrp_attr_destroy(app, attr);
return;
default:
WARN_ON(1);
}
}
attr->state = state;
}
int mrp_request_join(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
sizeof_field(struct sk_buff, cb))
return -ENOMEM;
spin_lock_bh(&app->lock);
attr = mrp_attr_create(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return -ENOMEM;
}
mrp_attr_event(app, attr, MRP_EVENT_JOIN);
spin_unlock_bh(&app->lock);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_request_join);
void mrp_request_leave(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
sizeof_field(struct sk_buff, cb))
return;
spin_lock_bh(&app->lock);
attr = mrp_attr_lookup(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return;
}
mrp_attr_event(app, attr, MRP_EVENT_LV);
spin_unlock_bh(&app->lock);
}
EXPORT_SYMBOL_GPL(mrp_request_leave);
static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_event(app, attr, event);
}
}
static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
static void mrp_join_timer(struct timer_list *t)
{
struct mrp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
spin_unlock(&app->lock);
mrp_queue_xmit(app);
spin_lock(&app->lock);
if (likely(app->active))
mrp_join_timer_arm(app);
spin_unlock(&app->lock);
}
static void mrp_periodic_timer_arm(struct mrp_applicant *app)
{
mod_timer(&app->periodic_timer,
jiffies + msecs_to_jiffies(mrp_periodic_time));
}
static void mrp_periodic_timer(struct timer_list *t)
{
struct mrp_applicant *app = from_timer(app, t, periodic_timer);
spin_lock(&app->lock);
if (likely(app->active)) {
mrp_mad_event(app, MRP_EVENT_PERIODIC);
mrp_pdu_queue(app);
mrp_periodic_timer_arm(app);
}
spin_unlock(&app->lock);
}
static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
{
__be16 endmark;
if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
return -1;
if (endmark == MRP_END_MARK) {
*offset += sizeof(endmark);
return -1;
}
return 0;
}
static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
struct sk_buff *skb,
enum mrp_vecattr_event vaevent)
{
struct mrp_attr *attr;
enum mrp_event event;
attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen,
mrp_cb(skb)->mh->attrtype);
if (attr == NULL)
return;
switch (vaevent) {
case MRP_VECATTR_EVENT_NEW:
event = MRP_EVENT_R_NEW;
break;
case MRP_VECATTR_EVENT_JOIN_IN:
event = MRP_EVENT_R_JOIN_IN;
break;
case MRP_VECATTR_EVENT_IN:
event = MRP_EVENT_R_IN;
break;
case MRP_VECATTR_EVENT_JOIN_MT:
event = MRP_EVENT_R_JOIN_MT;
break;
case MRP_VECATTR_EVENT_MT:
event = MRP_EVENT_R_MT;
break;
case MRP_VECATTR_EVENT_LV:
event = MRP_EVENT_R_LV;
break;
default:
return;
}
mrp_attr_event(app, attr, event);
}
static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
struct sk_buff *skb, int *offset)
{
struct mrp_vecattr_hdr _vah;
u16 valen;
u8 vaevents, vaevent;
mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
&_vah);
if (!mrp_cb(skb)->vah)
return -1;
*offset += sizeof(_vah);
if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_FLAG_LA)
mrp_mad_event(app, MRP_EVENT_R_LA);
valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_LEN_MASK);
/* The VectorAttribute structure in a PDU carries event information
* about one or more attributes having consecutive values. Only the
* value for the first attribute is contained in the structure. So
* we make a copy of that value, and then increment it each time we
* advance to the next event in its Vector.
*/
if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
sizeof_field(struct sk_buff, cb))
return -1;
if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen) < 0)
return -1;
*offset += mrp_cb(skb)->mh->attrlen;
/* In a VectorAttribute, the Vector contains events which are packed
* three to a byte. We process one byte of the Vector at a time.
*/
while (valen > 0) {
if (skb_copy_bits(skb, *offset, &vaevents,
sizeof(vaevents)) < 0)
return -1;
*offset += sizeof(vaevents);
/* Extract and process the first event. */
vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
/* The byte is malformed; stop processing. */
return -1;
}
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the second event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the third event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= __MRP_VECATTR_EVENT_MAX;
vaevent = vaevents;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
}
return 0;
}
static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
int *offset)
{
struct mrp_msg_hdr _mh;
mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
if (!mrp_cb(skb)->mh)
return -1;
*offset += sizeof(_mh);
if (mrp_cb(skb)->mh->attrtype == 0 ||
mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
mrp_cb(skb)->mh->attrlen == 0)
return -1;
while (skb->len > *offset) {
if (mrp_pdu_parse_end_mark(skb, offset) < 0)
break;
if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
return -1;
}
return 0;
}
static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct mrp_application *appl = container_of(pt, struct mrp_application,
pkttype);
struct mrp_port *port;
struct mrp_applicant *app;
struct mrp_pdu_hdr _ph;
const struct mrp_pdu_hdr *ph;
int offset = skb_network_offset(skb);
/* If the interface is in promiscuous mode, drop the packet if
* it was unicast to another host.
*/
if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto out;
port = rcu_dereference(dev->mrp_port);
if (unlikely(!port))
goto out;
app = rcu_dereference(port->applicants[appl->type]);
if (unlikely(!app))
goto out;
ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
if (!ph)
goto out;
offset += sizeof(_ph);
if (ph->version != app->app->version)
goto out;
spin_lock(&app->lock);
while (skb->len > offset) {
if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
break;
if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
break;
}
spin_unlock(&app->lock);
out:
kfree_skb(skb);
return 0;
}
static int mrp_init_port(struct net_device *dev)
{
struct mrp_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
rcu_assign_pointer(dev->mrp_port, port);
return 0;
}
static void mrp_release_port(struct net_device *dev)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
unsigned int i;
for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
if (rtnl_dereference(port->applicants[i]))
return;
}
RCU_INIT_POINTER(dev->mrp_port, NULL);
kfree_rcu(port, rcu);
}
int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_applicant *app;
int err;
ASSERT_RTNL();
if (!rtnl_dereference(dev->mrp_port)) {
err = mrp_init_port(dev);
if (err < 0)
goto err1;
}
err = -ENOMEM;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
goto err2;
err = dev_mc_add(dev, appl->group_address);
if (err < 0)
goto err3;
app->dev = dev;
app->app = appl;
app->mad = RB_ROOT;
app->active = true;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
timer_setup(&app->join_timer, mrp_join_timer, 0);
mrp_join_timer_arm(app);
timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
mrp_periodic_timer_arm(app);
return 0;
err3:
kfree(app);
err2:
mrp_release_port(dev);
err1:
return err;
}
EXPORT_SYMBOL_GPL(mrp_init_applicant);
void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
ASSERT_RTNL();
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
spin_lock_bh(&app->lock);
app->active = false;
spin_unlock_bh(&app->lock);
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
timer_shutdown_sync(&app->join_timer);
timer_shutdown_sync(&app->periodic_timer);
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_attr_destroy_all(app);
mrp_pdu_queue(app);
spin_unlock_bh(&app->lock);
mrp_queue_xmit(app);
dev_mc_del(dev, appl->group_address);
kfree_rcu(app, rcu);
mrp_release_port(dev);
}
EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
int mrp_register_application(struct mrp_application *appl)
{
appl->pkttype.func = mrp_rcv;
dev_add_pack(&appl->pkttype);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_register_application);
void mrp_unregister_application(struct mrp_application *appl)
{
dev_remove_pack(&appl->pkttype);
}
EXPORT_SYMBOL_GPL(mrp_unregister_application);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
* Copyright (C) 2011 Jean Delvare <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/*
* Addresses to scan
*/
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/*
* The EMC6W201 registers
*/
#define EMC6W201_REG_IN(nr) (0x20 + (nr))
#define EMC6W201_REG_TEMP(nr) (0x26 + (nr))
#define EMC6W201_REG_FAN(nr) (0x2C + (nr) * 2)
#define EMC6W201_REG_COMPANY 0x3E
#define EMC6W201_REG_VERSTEP 0x3F
#define EMC6W201_REG_CONFIG 0x40
#define EMC6W201_REG_IN_LOW(nr) (0x4A + (nr) * 2)
#define EMC6W201_REG_IN_HIGH(nr) (0x4B + (nr) * 2)
#define EMC6W201_REG_TEMP_LOW(nr) (0x56 + (nr) * 2)
#define EMC6W201_REG_TEMP_HIGH(nr) (0x57 + (nr) * 2)
#define EMC6W201_REG_FAN_MIN(nr) (0x62 + (nr) * 2)
enum subfeature { input, min, max };
/*
* Per-device data
*/
struct emc6w201_data {
struct i2c_client *client;
struct mutex update_lock;
bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
u8 in[3][6];
s8 temp[3][6];
u16 fan[2][5];
};
/*
* Combine LSB and MSB registers in a single value
* Locking: must be called with data->update_lock held
*/
static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
{
int lsb, msb;
lsb = i2c_smbus_read_byte_data(client, reg);
msb = i2c_smbus_read_byte_data(client, reg + 1);
if (unlikely(lsb < 0 || msb < 0)) {
dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
16, "read", reg);
return 0xFFFF; /* Arbitrary value */
}
return (msb << 8) | lsb;
}
/*
* Write 16-bit value to LSB and MSB registers
* Locking: must be called with data->update_lock held
*/
static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
{
int err;
err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
if (likely(!err))
err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
if (unlikely(err < 0))
dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
16, "write", reg);
return err;
}
/* Read 8-bit value from register */
static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
{
int val;
val = i2c_smbus_read_byte_data(client, reg);
if (unlikely(val < 0)) {
dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
8, "read", reg);
return 0x00; /* Arbitrary value */
}
return val;
}
/* Write 8-bit value to register */
static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
{
int err;
err = i2c_smbus_write_byte_data(client, reg, val);
if (unlikely(err < 0))
dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
8, "write", reg);
return err;
}
static struct emc6w201_data *emc6w201_update_device(struct device *dev)
{
struct emc6w201_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int nr;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (nr = 0; nr < 6; nr++) {
data->in[input][nr] =
emc6w201_read8(client,
EMC6W201_REG_IN(nr));
data->in[min][nr] =
emc6w201_read8(client,
EMC6W201_REG_IN_LOW(nr));
data->in[max][nr] =
emc6w201_read8(client,
EMC6W201_REG_IN_HIGH(nr));
}
for (nr = 0; nr < 6; nr++) {
data->temp[input][nr] =
emc6w201_read8(client,
EMC6W201_REG_TEMP(nr));
data->temp[min][nr] =
emc6w201_read8(client,
EMC6W201_REG_TEMP_LOW(nr));
data->temp[max][nr] =
emc6w201_read8(client,
EMC6W201_REG_TEMP_HIGH(nr));
}
for (nr = 0; nr < 5; nr++) {
data->fan[input][nr] =
emc6w201_read16(client,
EMC6W201_REG_FAN(nr));
data->fan[min][nr] =
emc6w201_read16(client,
EMC6W201_REG_FAN_MIN(nr));
}
data->last_updated = jiffies;
data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
}
/*
* Sysfs callback functions
*/
static const s16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct emc6w201_data *data = emc6w201_update_device(dev);
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
return sprintf(buf, "%u\n",
(unsigned)data->in[sf][nr] * nominal_mv[nr] / 0xC0);
}
static ssize_t in_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct emc6w201_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
int err;
long val;
u8 reg;
err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(val, 0, 255 * nominal_mv[nr] / 192);
val = DIV_ROUND_CLOSEST(val * 192, nominal_mv[nr]);
reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
data->in[sf][nr] = val;
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
}
static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct emc6w201_data *data = emc6w201_update_device(dev);
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
return sprintf(buf, "%d\n", (int)data->temp[sf][nr] * 1000);
}
static ssize_t temp_store(struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count)
{
struct emc6w201_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
int err;
long val;
u8 reg;
err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
val = clamp_val(val, -127000, 127000);
val = DIV_ROUND_CLOSEST(val, 1000);
reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
data->temp[sf][nr] = val;
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
}
static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct emc6w201_data *data = emc6w201_update_device(dev);
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
unsigned rpm;
if (data->fan[sf][nr] == 0 || data->fan[sf][nr] == 0xFFFF)
rpm = 0;
else
rpm = 5400000U / data->fan[sf][nr];
return sprintf(buf, "%u\n", rpm);
}
static ssize_t fan_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct emc6w201_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
int sf = to_sensor_dev_attr_2(devattr)->index;
int nr = to_sensor_dev_attr_2(devattr)->nr;
int err;
unsigned long val;
err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
if (val == 0) {
val = 0xFFFF;
} else {
val = DIV_ROUND_CLOSEST(5400000U, val);
val = clamp_val(val, 0, 0xFFFE);
}
mutex_lock(&data->update_lock);
data->fan[sf][nr] = val;
err = emc6w201_write16(client, EMC6W201_REG_FAN_MIN(nr),
data->fan[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
}
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, input);
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, min);
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, max);
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, input);
static SENSOR_DEVICE_ATTR_2_RW(in1_min, in, 1, min);
static SENSOR_DEVICE_ATTR_2_RW(in1_max, in, 1, max);
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, input);
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, min);
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, max);
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, input);
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, min);
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, max);
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, input);
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, min);
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, max);
static SENSOR_DEVICE_ATTR_2_RO(in5_input, in, 5, input);
static SENSOR_DEVICE_ATTR_2_RW(in5_min, in, 5, min);
static SENSOR_DEVICE_ATTR_2_RW(in5_max, in, 5, max);
static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, input);
static SENSOR_DEVICE_ATTR_2_RW(temp1_min, temp, 0, min);
static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp, 0, max);
static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 1, input);
static SENSOR_DEVICE_ATTR_2_RW(temp2_min, temp, 1, min);
static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp, 1, max);
static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 2, input);
static SENSOR_DEVICE_ATTR_2_RW(temp3_min, temp, 2, min);
static SENSOR_DEVICE_ATTR_2_RW(temp3_max, temp, 2, max);
static SENSOR_DEVICE_ATTR_2_RO(temp4_input, temp, 3, input);
static SENSOR_DEVICE_ATTR_2_RW(temp4_min, temp, 3, min);
static SENSOR_DEVICE_ATTR_2_RW(temp4_max, temp, 3, max);
static SENSOR_DEVICE_ATTR_2_RO(temp5_input, temp, 4, input);
static SENSOR_DEVICE_ATTR_2_RW(temp5_min, temp, 4, min);
static SENSOR_DEVICE_ATTR_2_RW(temp5_max, temp, 4, max);
static SENSOR_DEVICE_ATTR_2_RO(temp6_input, temp, 5, input);
static SENSOR_DEVICE_ATTR_2_RW(temp6_min, temp, 5, min);
static SENSOR_DEVICE_ATTR_2_RW(temp6_max, temp, 5, max);
static SENSOR_DEVICE_ATTR_2_RO(fan1_input, fan, 0, input);
static SENSOR_DEVICE_ATTR_2_RW(fan1_min, fan, 0, min);
static SENSOR_DEVICE_ATTR_2_RO(fan2_input, fan, 1, input);
static SENSOR_DEVICE_ATTR_2_RW(fan2_min, fan, 1, min);
static SENSOR_DEVICE_ATTR_2_RO(fan3_input, fan, 2, input);
static SENSOR_DEVICE_ATTR_2_RW(fan3_min, fan, 2, min);
static SENSOR_DEVICE_ATTR_2_RO(fan4_input, fan, 3, input);
static SENSOR_DEVICE_ATTR_2_RW(fan4_min, fan, 3, min);
static SENSOR_DEVICE_ATTR_2_RO(fan5_input, fan, 4, input);
static SENSOR_DEVICE_ATTR_2_RW(fan5_min, fan, 4, min);
static struct attribute *emc6w201_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp4_min.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp5_input.dev_attr.attr,
&sensor_dev_attr_temp5_min.dev_attr.attr,
&sensor_dev_attr_temp5_max.dev_attr.attr,
&sensor_dev_attr_temp6_input.dev_attr.attr,
&sensor_dev_attr_temp6_min.dev_attr.attr,
&sensor_dev_attr_temp6_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan5_input.dev_attr.attr,
&sensor_dev_attr_fan5_min.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(emc6w201);
/*
* Driver interface
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int emc6w201_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int company, verstep, config;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Identification */
company = i2c_smbus_read_byte_data(client, EMC6W201_REG_COMPANY);
if (company != 0x5C)
return -ENODEV;
verstep = i2c_smbus_read_byte_data(client, EMC6W201_REG_VERSTEP);
if (verstep < 0 || (verstep & 0xF0) != 0xB0)
return -ENODEV;
if ((verstep & 0x0F) > 2) {
dev_dbg(&client->dev, "Unknown EMC6W201 stepping %d\n",
verstep & 0x0F);
return -ENODEV;
}
/* Check configuration */
config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
if (config < 0 || (config & 0xF4) != 0x04)
return -ENODEV;
if (!(config & 0x01)) {
dev_err(&client->dev, "Monitoring not enabled\n");
return -ENODEV;
}
strscpy(info->type, "emc6w201", I2C_NAME_SIZE);
return 0;
}
static int emc6w201_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct emc6w201_data *data;
struct device *hwmon_dev;
data = devm_kzalloc(dev, sizeof(struct emc6w201_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
mutex_init(&data->update_lock);
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data,
emc6w201_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct i2c_device_id emc6w201_id[] = {
{ "emc6w201" },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc6w201_id);
static struct i2c_driver emc6w201_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "emc6w201",
},
.probe = emc6w201_probe,
.id_table = emc6w201_id,
.detect = emc6w201_detect,
.address_list = normal_i2c,
};
module_i2c_driver(emc6w201_driver);
MODULE_AUTHOR("Jean Delvare <[email protected]>");
MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
MODULE_LICENSE("GPL");
|
// SPDX-License-Identifier: GPL-2.0
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
.probe = map_ram_probe,
.name = "map_ram",
.module = THIS_MODULE
};
static struct mtd_info *map_ram_probe(struct map_info *map)
{
struct mtd_info *mtd;
/* Check the first byte is RAM */
#if 0
map_write8(map, 0x55, 0);
if (map_read8(map, 0) != 0x55)
return NULL;
map_write8(map, 0xAA, 0);
if (map_read8(map, 0) != 0xAA)
return NULL;
/* Check the last byte is RAM */
map_write8(map, 0x55, map->size-1);
if (map_read8(map, map->size-1) != 0x55)
return NULL;
map_write8(map, 0xAA, map->size-1);
if (map_read8(map, map->size-1) != 0xAA)
return NULL;
#endif
/* OK. It seems to be RAM. */
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
map->fldrv = &mapram_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
mtd->_sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
/* Disable direct access when NO_XIP is set */
if (map->phys != NO_XIP) {
mtd->_point = mapram_point;
mtd->_unpoint = mapram_unpoint;
}
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
__module_get(THIS_MODULE);
return mtd;
}
static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
if (!map->virt)
return -EINVAL;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return 0;
}
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_to(map, to, buf, len);
*retlen = len;
return 0;
}
static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
{
/* Yeah, it's inefficient. Who cares? It's faster than a _real_
flash erase. */
struct map_info *map = mtd->priv;
map_word allff;
unsigned long i;
allff = map_word_ff(map);
for (i=0; i<instr->len; i += map_bankwidth(map))
map_write(map, allff, instr->addr + i);
return 0;
}
static void mapram_nop(struct mtd_info *mtd)
{
/* Nothing to see here */
}
static int __init map_ram_init(void)
{
register_mtd_chip_driver(&mapram_chipdrv);
return 0;
}
static void __exit map_ram_exit(void)
{
unregister_mtd_chip_driver(&mapram_chipdrv);
}
module_init(map_ram_init);
module_exit(map_ram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("MTD chip driver for RAM chips");
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_priv.h"
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/idr.h>
/*
* This extension supports a kernel level doorbells management for the
* kernel queues using the first doorbell page reserved for the kernel.
*/
/*
* Each device exposes a doorbell aperture, a PCI MMIO aperture that
* receives 32-bit writes that are passed to queues as wptr values.
* The doorbells are intended to be written by applications as part
* of queueing work on user-mode queues.
* We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
* We map the doorbell address space into user-mode when a process creates
* its first queue on each device.
* Although the mapping is done by KFD, it is equivalent to an mmap of
* the /dev/kfd with the particular device encoded in the mmap offset.
* There will be other uses for mmap of /dev/kfd, so only a range of
* offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
*/
/* # of doorbell bytes allocated for each process. */
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
{
if (!kfd->shared_resources.enable_mes)
return roundup(kfd->device_info.doorbell_size *
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
PAGE_SIZE);
else
return amdgpu_mes_doorbell_process_slice(
(struct amdgpu_device *)kfd->adev);
}
/* Doorbell calculations for device init. */
int kfd_doorbell_init(struct kfd_dev *kfd)
{
int size = PAGE_SIZE;
int r;
/*
* Todo: KFD kernel level operations need only one doorbell for
* ring test/HWS. So instead of reserving a whole page here for
* kernel, reserve and consume a doorbell from existing KGD kernel
* doorbell page.
*/
/* Bitmap to dynamically allocate doorbells from kernel page */
kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL);
if (!kfd->doorbell_bitmap) {
DRM_ERROR("Failed to allocate kernel doorbell bitmap\n");
return -ENOMEM;
}
/* Alloc a doorbell page for KFD kernel usages */
r = amdgpu_bo_create_kernel(kfd->adev,
size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_DOORBELL,
&kfd->doorbells,
NULL,
(void **)&kfd->doorbell_kernel_ptr);
if (r) {
pr_err("failed to allocate kernel doorbells\n");
bitmap_free(kfd->doorbell_bitmap);
return r;
}
pr_debug("Doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
return 0;
}
void kfd_doorbell_fini(struct kfd_dev *kfd)
{
bitmap_free(kfd->doorbell_bitmap);
amdgpu_bo_free_kernel(&kfd->doorbells, NULL,
(void **)&kfd->doorbell_kernel_ptr);
}
int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
struct kfd_process_device *pdd;
/*
* For simplicitly we only allow mapping of the entire doorbell
* allocation of a single device & process.
*/
if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd))
return -EINVAL;
pdd = kfd_get_process_device_data(dev, process);
if (!pdd)
return -EINVAL;
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
(unsigned long long) vma->vm_start, address, vma->vm_flags,
kfd_doorbell_process_slice(dev->kfd));
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
kfd_doorbell_process_slice(dev->kfd),
vma->vm_page_prot);
}
/* get kernel iomem pointer for a doorbell */
void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off)
{
u32 inx;
mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_bitmap, PAGE_SIZE / sizeof(u32));
__set_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev,
kfd->doorbells,
inx,
kfd->device_info.doorbell_size);
inx *= 2;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
" doorbell index == 0x%x\n",
*doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
inx /= 2;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
}
void write_kernel_doorbell(void __iomem *db, u32 value)
{
if (db) {
writel(value, db);
pr_debug("Writing %d to doorbell address %p\n", value, db);
}
}
void write_kernel_doorbell64(void __iomem *db, u64 value)
{
if (db) {
WARN(((unsigned long)db & 7) != 0,
"Unaligned 64-bit doorbell");
writeq(value, (u64 __iomem *)db);
pr_debug("writing %llu to doorbell address %p\n", value, db);
}
}
static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
int range_start = dev->shared_resources.non_cp_doorbells_start;
int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev))
return 0;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
if (i >= range_start && i <= range_end) {
__set_bit(i, qpd->doorbell_bitmap);
__set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
}
}
return 0;
}
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
struct amdgpu_device *adev = pdd->dev->adev;
uint32_t first_db_index;
if (!pdd->qpd.proc_doorbells) {
if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd))
/* phys_addr_t 0 is error */
return 0;
}
first_db_index = amdgpu_doorbell_index_on_bar(adev,
pdd->qpd.proc_doorbells,
0,
pdd->dev->kfd->device_info.doorbell_size);
return adev->doorbell.base + first_db_index * sizeof(uint32_t);
}
int kfd_alloc_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
int r;
struct qcm_process_device *qpd = &pdd->qpd;
/* Allocate bitmap for dynamic doorbell allocation */
qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
GFP_KERNEL);
if (!qpd->doorbell_bitmap) {
DRM_ERROR("Failed to allocate process doorbell bitmap\n");
return -ENOMEM;
}
r = init_doorbell_bitmap(&pdd->qpd, kfd);
if (r) {
DRM_ERROR("Failed to initialize process doorbells\n");
r = -ENOMEM;
goto err;
}
/* Allocate doorbells for this process */
r = amdgpu_bo_create_kernel(kfd->adev,
kfd_doorbell_process_slice(kfd),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_DOORBELL,
&qpd->proc_doorbells,
NULL,
NULL);
if (r) {
DRM_ERROR("Failed to allocate process doorbells\n");
goto err;
}
return 0;
err:
bitmap_free(qpd->doorbell_bitmap);
qpd->doorbell_bitmap = NULL;
return r;
}
void kfd_free_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
if (qpd->doorbell_bitmap) {
bitmap_free(qpd->doorbell_bitmap);
qpd->doorbell_bitmap = NULL;
}
amdgpu_bo_free_kernel(&qpd->proc_doorbells, NULL, NULL);
}
|
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
#define __ICP_QAT_FW_LOADER_HANDLE_H__
#include "icp_qat_uclo.h"
struct icp_qat_fw_loader_ae_data {
unsigned int state;
unsigned int ustore_size;
unsigned int free_addr;
unsigned int free_size;
unsigned int live_ctx_mask;
};
struct icp_qat_fw_loader_hal_handle {
struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
unsigned int ae_mask;
unsigned int admin_ae_mask;
unsigned int slice_mask;
unsigned int revision_id;
unsigned int ae_max_num;
unsigned int upc_mask;
unsigned int max_ustore;
};
struct icp_qat_fw_loader_chip_info {
int mmp_sram_size;
bool nn;
bool lm2lm3;
u32 lm_size;
u32 icp_rst_csr;
u32 icp_rst_mask;
u32 glb_clk_enable_csr;
u32 misc_ctl_csr;
u32 wakeup_event_val;
bool fw_auth;
bool css_3k;
bool tgroup_share_ustore;
u32 fcu_ctl_csr;
u32 fcu_sts_csr;
u32 fcu_dram_addr_hi;
u32 fcu_dram_addr_lo;
u32 fcu_loaded_ae_csr;
u8 fcu_loaded_ae_pos;
};
struct icp_qat_fw_loader_handle {
struct icp_qat_fw_loader_hal_handle *hal_handle;
struct icp_qat_fw_loader_chip_info *chip_info;
struct pci_dev *pci_dev;
void *obj_handle;
void *sobj_handle;
void *mobj_handle;
unsigned int cfg_ae_mask;
void __iomem *hal_sram_addr_v;
void __iomem *hal_cap_g_ctl_csr_addr_v;
void __iomem *hal_cap_ae_xfer_csr_addr_v;
void __iomem *hal_cap_ae_local_csr_addr_v;
void __iomem *hal_ep_csr_addr_v;
};
struct icp_firml_dram_desc {
void __iomem *dram_base_addr;
void *dram_base_addr_v;
dma_addr_t dram_bus_addr;
u64 dram_size;
};
#endif
|
#include "aes-glue.c"
|
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RIIC driver
*
* Copyright (C) 2013 Wolfram Sang <[email protected]>
* Copyright (C) 2013 Renesas Solutions Corp.
*/
/*
* This i2c core has a lot of interrupts, namely 8. We use their chaining as
* some kind of state machine.
*
* 1) The main xfer routine kicks off a transmission by putting the start bit
* (or repeated start) on the bus and enabling the transmit interrupt (TIE)
* since we need to send the target address + RW bit in every case.
*
* 2) TIE sends target address + RW bit and selects how to continue.
*
* 3a) Write case: We keep utilizing TIE as long as we have data to send. If we
* are done, we switch over to the transmission done interrupt (TEIE) and mark
* the message as completed (includes sending STOP) there.
*
* 3b) Read case: We switch over to receive interrupt (RIE). One dummy read is
* needed to start clocking, then we keep receiving until we are done. Note
* that we use the RDRFS mode all the time, i.e. we ACK/NACK every byte by
* writing to the ACKBT bit. I tried using the RDRFS mode only at the end of a
* message to create the final NACK as sketched in the datasheet. This caused
* some subtle races (when byte n was processed and byte n+1 was already
* waiting), though, and I started with the safe approach.
*
* 4) If we got a NACK somewhere, we flag the error and stop the transmission
* via NAKIE.
*
* Also check the comments in the interrupt routines for some gory details.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#define ICCR1_ICE 0x80
#define ICCR1_IICRST 0x40
#define ICCR1_SOWP 0x10
#define ICCR2_BBSY 0x80
#define ICCR2_SP 0x08
#define ICCR2_RS 0x04
#define ICCR2_ST 0x02
#define ICMR1_CKS_MASK 0x70
#define ICMR1_BCWP 0x08
#define ICMR1_CKS(_x) ((((_x) << 4) & ICMR1_CKS_MASK) | ICMR1_BCWP)
#define ICMR3_RDRFS 0x20
#define ICMR3_ACKWP 0x10
#define ICMR3_ACKBT 0x08
#define ICFER_FMPE 0x80
#define ICIER_TIE 0x80
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
#define ICIER_NAKIE 0x10
#define ICIER_SPIE 0x08
#define ICSR2_NACKF 0x10
#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
#define RIIC_INIT_MSG -1
enum riic_reg_list {
RIIC_ICCR1 = 0,
RIIC_ICCR2,
RIIC_ICMR1,
RIIC_ICMR3,
RIIC_ICFER,
RIIC_ICSER,
RIIC_ICIER,
RIIC_ICSR2,
RIIC_ICBRL,
RIIC_ICBRH,
RIIC_ICDRT,
RIIC_ICDRR,
RIIC_REG_END,
};
struct riic_of_data {
const u8 *regs;
bool fast_mode_plus;
};
struct riic_dev {
void __iomem *base;
u8 *buf;
struct i2c_msg *msg;
int bytes_left;
int err;
int is_last;
const struct riic_of_data *info;
struct completion msg_done;
struct i2c_adapter adapter;
struct clk *clk;
struct reset_control *rstc;
struct i2c_timings i2c_t;
};
struct riic_irq_desc {
int res_num;
irq_handler_t isr;
char *name;
};
static inline void riic_writeb(struct riic_dev *riic, u8 val, u8 offset)
{
writeb(val, riic->base + riic->info->regs[offset]);
}
static inline u8 riic_readb(struct riic_dev *riic, u8 offset)
{
return readb(riic->base + riic->info->regs[offset]);
}
static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u8 reg)
{
riic_writeb(riic, (riic_readb(riic, reg) & ~clear) | set, reg);
}
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
struct device *dev = adap->dev.parent;
unsigned long time_left;
int i, ret;
u8 start_bit;
ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
goto out;
}
reinit_completion(&riic->msg_done);
riic->err = 0;
riic_writeb(riic, 0, RIIC_ICSR2);
for (i = 0, start_bit = ICCR2_ST; i < num; i++) {
riic->bytes_left = RIIC_INIT_MSG;
riic->buf = msgs[i].buf;
riic->msg = &msgs[i];
riic->is_last = (i == num - 1);
riic_writeb(riic, ICIER_NAKIE | ICIER_TIE, RIIC_ICIER);
riic_writeb(riic, start_bit, RIIC_ICCR2);
time_left = wait_for_completion_timeout(&riic->msg_done, riic->adapter.timeout);
if (time_left == 0)
riic->err = -ETIMEDOUT;
if (riic->err)
break;
start_bit = ICCR2_RS;
}
out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return riic->err ?: num;
}
static irqreturn_t riic_tdre_isr(int irq, void *data)
{
struct riic_dev *riic = data;
u8 val;
if (!riic->bytes_left)
return IRQ_NONE;
if (riic->bytes_left == RIIC_INIT_MSG) {
if (riic->msg->flags & I2C_M_RD)
/* On read, switch over to receive interrupt */
riic_clear_set_bit(riic, ICIER_TIE, ICIER_RIE, RIIC_ICIER);
else
/* On write, initialize length */
riic->bytes_left = riic->msg->len;
val = i2c_8bit_addr_from_msg(riic->msg);
} else {
val = *riic->buf;
riic->buf++;
riic->bytes_left--;
}
/*
* Switch to transmission ended interrupt when done. Do check here
* after bytes_left was initialized to support SMBUS_QUICK (new msg has
* 0 length then)
*/
if (riic->bytes_left == 0)
riic_clear_set_bit(riic, ICIER_TIE, ICIER_TEIE, RIIC_ICIER);
/*
* This acks the TIE interrupt. We get another TIE immediately if our
* value could be moved to the shadow shift register right away. So
* this must be after updates to ICIER (where we want to disable TIE)!
*/
riic_writeb(riic, val, RIIC_ICDRT);
return IRQ_HANDLED;
}
static irqreturn_t riic_tend_isr(int irq, void *data)
{
struct riic_dev *riic = data;
if (riic_readb(riic, RIIC_ICSR2) & ICSR2_NACKF) {
/* We got a NACKIE */
riic_readb(riic, RIIC_ICDRR); /* dummy read */
riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
riic->err = -ENXIO;
} else if (riic->bytes_left) {
return IRQ_NONE;
}
if (riic->is_last || riic->err) {
riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
riic_writeb(riic, ICCR2_SP, RIIC_ICCR2);
} else {
/* Transfer is complete, but do not send STOP */
riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
complete(&riic->msg_done);
}
return IRQ_HANDLED;
}
static irqreturn_t riic_rdrf_isr(int irq, void *data)
{
struct riic_dev *riic = data;
if (!riic->bytes_left)
return IRQ_NONE;
if (riic->bytes_left == RIIC_INIT_MSG) {
riic->bytes_left = riic->msg->len;
riic_readb(riic, RIIC_ICDRR); /* dummy read */
return IRQ_HANDLED;
}
if (riic->bytes_left == 1) {
/* STOP must come before we set ACKBT! */
if (riic->is_last) {
riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
riic_writeb(riic, ICCR2_SP, RIIC_ICCR2);
}
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
} else {
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
}
/* Reading acks the RIE interrupt */
*riic->buf = riic_readb(riic, RIIC_ICDRR);
riic->buf++;
riic->bytes_left--;
return IRQ_HANDLED;
}
static irqreturn_t riic_stop_isr(int irq, void *data)
{
struct riic_dev *riic = data;
/* read back registers to confirm writes have fully propagated */
riic_writeb(riic, 0, RIIC_ICSR2);
riic_readb(riic, RIIC_ICSR2);
riic_writeb(riic, 0, RIIC_ICIER);
riic_readb(riic, RIIC_ICIER);
complete(&riic->msg_done);
return IRQ_HANDLED;
}
static u32 riic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm riic_algo = {
.xfer = riic_xfer,
.functionality = riic_func,
};
static int riic_init_hw(struct riic_dev *riic)
{
int ret;
unsigned long rate;
int total_ticks, cks, brl, brh;
struct i2c_timings *t = &riic->i2c_t;
struct device *dev = riic->adapter.dev.parent;
bool fast_mode_plus = riic->info->fast_mode_plus;
u32 max_freq = fast_mode_plus ? I2C_MAX_FAST_MODE_PLUS_FREQ
: I2C_MAX_FAST_MODE_FREQ;
if (t->bus_freq_hz > max_freq)
return dev_err_probe(&riic->adapter.dev, -EINVAL,
"unsupported bus speed %uHz (%u max)\n",
t->bus_freq_hz, max_freq);
rate = clk_get_rate(riic->clk);
/*
* Assume the default register settings:
* FER.SCLE = 1 (SCL sync circuit enabled, adds 2 or 3 cycles)
* FER.NFE = 1 (noise circuit enabled)
* MR3.NF = 0 (1 cycle of noise filtered out)
*
* Freq (CKS=000) = (I2CCLK + tr + tf)/ (BRH + 3 + 1) + (BRL + 3 + 1)
* Freq (CKS!=000) = (I2CCLK + tr + tf)/ (BRH + 2 + 1) + (BRL + 2 + 1)
*/
/*
* Determine reference clock rate. We must be able to get the desired
* frequency with only 62 clock ticks max (31 high, 31 low).
* Aim for a duty of 60% LOW, 40% HIGH.
*/
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
for (cks = 0; cks < 7; cks++) {
/*
* 60% low time must be less than BRL + 2 + 1
* BRL max register value is 0x1F.
*/
brl = ((total_ticks * 6) / 10);
if (brl <= (0x1F + 3))
break;
total_ticks /= 2;
rate /= 2;
}
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
return -EINVAL;
}
brh = total_ticks - brl;
/* Remove automatic clock ticks for sync circuit and NF */
if (cks == 0) {
brl -= 4;
brh -= 4;
} else {
brl -= 3;
brh -= 3;
}
/*
* Remove clock ticks for rise and fall times. Convert ns to clock
* ticks.
*/
brl -= t->scl_fall_ns / (1000000000 / rate);
brh -= t->scl_rise_ns / (1000000000 / rate);
/* Adjust for min register values for when SCLE=1 and NFE=1 */
if (brl < 1)
brl = 1;
if (brh < 1)
brh = 1;
pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
t->scl_fall_ns / (1000000000 / rate),
t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
/* Changing the order of accessing IICRST and ICE may break things! */
riic_writeb(riic, ICCR1_IICRST | ICCR1_SOWP, RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
riic_writeb(riic, ICMR1_CKS(cks), RIIC_ICMR1);
riic_writeb(riic, brh | ICBR_RESERVED, RIIC_ICBRH);
riic_writeb(riic, brl | ICBR_RESERVED, RIIC_ICBRL);
riic_writeb(riic, 0, RIIC_ICSER);
riic_writeb(riic, ICMR3_ACKWP | ICMR3_RDRFS, RIIC_ICMR3);
if (fast_mode_plus && t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)
riic_clear_set_bit(riic, 0, ICFER_FMPE, RIIC_ICFER);
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
{ .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
static void riic_reset_control_assert(void *data)
{
reset_control_assert(data);
}
static int riic_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct riic_dev *riic;
struct i2c_adapter *adap;
int i, ret;
riic = devm_kzalloc(dev, sizeof(*riic), GFP_KERNEL);
if (!riic)
return -ENOMEM;
riic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
riic->clk = devm_clk_get(dev, NULL);
if (IS_ERR(riic->clk)) {
dev_err(dev, "missing controller clock");
return PTR_ERR(riic->clk);
}
riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(riic->rstc))
return dev_err_probe(dev, PTR_ERR(riic->rstc),
"Error: missing reset ctrl\n");
ret = reset_control_deassert(riic->rstc);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
ret = platform_get_irq(pdev, riic_irqs[i].res_num);
if (ret < 0)
return ret;
ret = devm_request_irq(dev, ret, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
if (ret) {
dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
}
}
riic->info = of_device_get_match_data(dev);
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
init_completion(&riic->msg_done);
i2c_parse_fw_timings(dev, &riic->i2c_t, true);
/* Default 0 to save power. Can be overridden via sysfs for lower latency. */
pm_runtime_set_autosuspend_delay(dev, 0);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
ret = riic_init_hw(riic);
if (ret)
goto out;
ret = i2c_add_adapter(adap);
if (ret)
goto out;
platform_set_drvdata(pdev, riic);
dev_info(dev, "registered with %dHz bus speed\n", riic->i2c_t.bus_freq_hz);
return 0;
out:
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
return ret;
}
static void riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret;
ret = pm_runtime_resume_and_get(dev);
if (!ret) {
riic_writeb(riic, 0, RIIC_ICIER);
pm_runtime_put(dev);
}
i2c_del_adapter(&riic->adapter);
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
}
static const u8 riic_rz_a_regs[RIIC_REG_END] = {
[RIIC_ICCR1] = 0x00,
[RIIC_ICCR2] = 0x04,
[RIIC_ICMR1] = 0x08,
[RIIC_ICMR3] = 0x10,
[RIIC_ICFER] = 0x14,
[RIIC_ICSER] = 0x18,
[RIIC_ICIER] = 0x1c,
[RIIC_ICSR2] = 0x24,
[RIIC_ICBRL] = 0x34,
[RIIC_ICBRH] = 0x38,
[RIIC_ICDRT] = 0x3c,
[RIIC_ICDRR] = 0x40,
};
static const struct riic_of_data riic_rz_a_info = {
.regs = riic_rz_a_regs,
.fast_mode_plus = true,
};
static const struct riic_of_data riic_rz_a1h_info = {
.regs = riic_rz_a_regs,
};
static const u8 riic_rz_v2h_regs[RIIC_REG_END] = {
[RIIC_ICCR1] = 0x00,
[RIIC_ICCR2] = 0x01,
[RIIC_ICMR1] = 0x02,
[RIIC_ICMR3] = 0x04,
[RIIC_ICFER] = 0x05,
[RIIC_ICSER] = 0x06,
[RIIC_ICIER] = 0x07,
[RIIC_ICSR2] = 0x09,
[RIIC_ICBRL] = 0x10,
[RIIC_ICBRH] = 0x11,
[RIIC_ICDRT] = 0x12,
[RIIC_ICDRR] = 0x13,
};
static const struct riic_of_data riic_rz_v2h_info = {
.regs = riic_rz_v2h_regs,
.fast_mode_plus = true,
};
static int riic_i2c_suspend(struct device *dev)
{
struct riic_dev *riic = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret)
return ret;
i2c_mark_adapter_suspended(&riic->adapter);
/* Disable output on SDA, SCL pins. */
riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync(dev);
return reset_control_assert(riic->rstc);
}
static int riic_i2c_resume(struct device *dev)
{
struct riic_dev *riic = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(riic->rstc);
if (ret)
return ret;
ret = riic_init_hw(riic);
if (ret) {
/*
* In case this happens there is no way to recover from this
* state. The driver will remain loaded. We want to avoid
* keeping the reset line de-asserted for no reason.
*/
reset_control_assert(riic->rstc);
return ret;
}
i2c_mark_adapter_resumed(&riic->adapter);
return 0;
}
static const struct dev_pm_ops riic_i2c_pm_ops = {
SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume)
};
static const struct of_device_id riic_i2c_dt_ids[] = {
{ .compatible = "renesas,riic-rz", .data = &riic_rz_a_info },
{ .compatible = "renesas,riic-r7s72100", .data = &riic_rz_a1h_info, },
{ .compatible = "renesas,riic-r9a09g057", .data = &riic_rz_v2h_info },
{ /* Sentinel */ },
};
static struct platform_driver riic_i2c_driver = {
.probe = riic_i2c_probe,
.remove = riic_i2c_remove,
.driver = {
.name = "i2c-riic",
.of_match_table = riic_i2c_dt_ids,
.pm = pm_ptr(&riic_i2c_pm_ops),
},
};
module_platform_driver(riic_i2c_driver);
MODULE_DESCRIPTION("Renesas RIIC adapter");
MODULE_AUTHOR("Wolfram Sang <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, riic_i2c_dt_ids);
|
/*
* Copyright © 2008 Intel Corporation
* 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_test.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_scaler.h"
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
return INT_MAX;
/*
* DSC->DPT interface width:
* ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used)
* LNL+: 144 bits (not a bottleneck in any config)
*
* Bspec/49259 suggests that the FEC overhead needs to be
* applied here, though HW people claim that neither this FEC
* or any other overhead is applicable here (that is the actual
* available_bw is just symbol_clock * 72). However based on
* testing on MTL-P the
* - DELL U3224KBA display
* - Unigraf UCD-500 CTS test sink
* devices the
* - 5120x2880/995.59Mhz
* - 6016x3384/1357.23Mhz
* - 6144x3456/1413.39Mhz
* modes (all the ones having a DPT limit on the above devices),
* both the channel coding efficiency and an additional 3%
* overhead needs to be accounted for.
*/
return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
drm_dp_bw_channel_coding_efficiency(true)),
mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
const struct intel_connector *connector,
bool ssc, int dsc_slice_count, int bpp_x16)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
int overhead;
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
if (dsc_slice_count)
flags |= DRM_DP_BW_OVERHEAD_DSC;
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
adjusted_mode->hdisplay,
dsc_slice_count,
bpp_x16,
flags);
/*
* TODO: clarify whether a minimum required by the fixed FEC overhead
* in the bspec audio programming sequence is required here.
*/
return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
}
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_connector *connector,
int overhead,
int bpp_x16,
struct intel_link_m_n *m_n)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
overhead,
m_n);
m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
}
static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
{
int effective_data_rate =
intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
/*
* TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
* to calculate PBN with the BW overhead passed to it.
*/
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
}
static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
return intel_dp_dsc_get_slice_count(connector,
adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
}
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
int min_bpp,
struct link_config_limits *limits,
struct drm_connector_state *conn_state,
int step,
bool dsc)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
int dsc_slice_count = 0;
int max_dpt_bpp;
int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
return -EINVAL;
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
crtc_state->port_clock,
crtc_state->lane_count);
max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
if (max_bpp > max_dpt_bpp) {
drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
max_bpp, max_dpt_bpp);
max_bpp = max_dpt_bpp;
}
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
if (!dsc_slice_count) {
drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n");
return -ENOSPC;
}
}
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
int local_bw_overhead;
int remote_bw_overhead;
int link_bpp_x16;
int remote_tu;
fixed20_12 pbn;
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
false, dsc_slice_count, link_bpp_x16);
remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
true, dsc_slice_count, link_bpp_x16);
intel_dp_mst_compute_m_n(crtc_state, connector,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
/*
* The TU size programmed to the HW determines which slots in
* an MTP frame are used for this stream, which needs to match
* the payload size programmed to the first downstream branch
* device's payload table.
*
* Note that atm the payload's PBN value DRM core sends via
* the ALLOCATE_PAYLOAD side-band message matches the payload
* size (which it calculates from the PBN value) it programs
* to the first branch device's payload table. The allocation
* in the payload table could be reduced though (to
* crtc_state->dp_m_n.tu), provided that the driver doesn't
* enable SSC on the corresponding link.
*/
pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
link_bpp_x16,
remote_bw_overhead));
remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
/*
* Aligning the TUs ensures that symbols consisting of multiple
* (4) symbol cycles don't get split between two consecutive
* MTPs, as required by Bspec.
* TODO: remove the alignment restriction for 128b/132b links
* on some platforms, where Bspec allows this.
*/
remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
/*
* Also align PBNs accordingly, since MST core will derive its
* own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
* The above comment about the difference between the PBN
* allocated for the whole path and the TUs allocated for the
* first branch device's link also applies here.
*/
pbn.full = remote_tu * mst_state->pbn_div.full;
crtc_state->pbn = dfixed_trunc(pbn);
drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_tu;
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0) {
drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
break;
}
}
/* We failed to find a proper bpp/timeslots, return error */
if (ret)
slots = ret;
if (slots < 0) {
drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
slots);
} else {
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
return slots;
}
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
int slots = -EINVAL;
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
fxp_q4_to_int(limits->link.max_bpp_x16),
fxp_q4_to_int(limits->link.min_bpp_x16),
limits,
conn_state, 2 * 3, false);
if (slots < 0)
return slots;
return 0;
}
static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
int slots = -EINVAL;
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
u8 dsc_max_bpc;
int min_compressed_bpp, max_compressed_bpp;
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
if (DISPLAY_VER(i915) >= 12)
dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
else
dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
min_bpp = limits->pipe.min_bpp;
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
sink_max_bpp = dsc_bpc[0] * 3;
sink_min_bpp = sink_max_bpp;
for (i = 1; i < num_bpc; i++) {
if (sink_min_bpp > dsc_bpc[i] * 3)
sink_min_bpp = dsc_bpc[i] * 3;
if (sink_max_bpp < dsc_bpc[i] * 3)
sink_max_bpp = dsc_bpc[i] * 3;
}
drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
if (min_bpp < sink_min_bpp)
min_bpp = sink_min_bpp;
if (max_bpp > sink_max_bpp)
max_bpp = sink_max_bpp;
crtc_state->pipe_bpp = max_bpp;
max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
crtc_state,
max_bpp / 3);
max_compressed_bpp = min(max_compressed_bpp,
fxp_q4_to_int(limits->link.max_bpp_x16));
min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
min_compressed_bpp = max(min_compressed_bpp,
fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
/* Align compressed bpps according to our own constraints */
max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
crtc_state->pipe_bpp);
min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
crtc_state->pipe_bpp);
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
min_compressed_bpp, limits,
conn_state, 1, true);
if (slots < 0)
return slots;
return 0;
}
static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
if (IS_ERR(topology_state)) {
drm_dbg_kms(&i915->drm, "slot update failed\n");
return PTR_ERR(topology_state);
}
drm_dp_mst_update_slots(topology_state, link_coding_cap);
return 0;
}
static int mode_hblank_period_ns(const struct drm_display_mode *mode)
{
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
NSEC_PER_SEC / 1000),
mode->crtc_clock);
}
static bool
hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
const struct link_config_limits *limits)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
bool is_uhbr_sink = connector->mst_port &&
drm_dp_128b132b_supported(connector->mst_port->dpcd);
int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
return false;
if (is_uhbr_sink && !drm_dp_is_uhbr_rate(limits->max_rate))
return false;
if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
return false;
return true;
}
static bool
adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
struct link_config_limits *limits,
bool dsc)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits))
return true;
if (!dsc) {
if (intel_dp_supports_dsc(connector, crtc_state)) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
return false;
}
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
if (limits->link.max_bpp_x16 < fxp_q4_from_int(24))
return false;
limits->link.min_bpp_x16 = fxp_q4_from_int(24);
return true;
}
drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
min_bpp_x16 = fxp_q4_from_int(13);
else if (limits->max_rate < 810000)
min_bpp_x16 = fxp_q4_from_int(10);
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
FXP_Q4_ARGS(min_bpp_x16));
if (limits->link.max_bpp_x16 < min_bpp_x16)
return false;
limits->link.min_bpp_x16 = min_bpp_x16;
return true;
}
static bool
intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
limits->min_rate = limits->max_rate =
intel_dp_max_link_rate(intel_dp);
limits->min_lane_count = limits->max_lane_count =
intel_dp_max_lane_count(intel_dp);
limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
/*
* FIXME: If all the streams can't fit into the link with
* their current pipe_bpp we should reduce pipe_bpp across
* the board until things start to fit. Until then we
* limit to <= 8bpc since that's what was hardcoded for all
* MST streams previously. This hack should be removed once
* we have the proper retry logic in place.
*/
limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
crtc_state,
dsc,
limits))
return false;
return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
crtc_state,
limits,
dsc);
}
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
int num_joined_pipes;
int ret = 0;
if (pipe_config->fec_enable &&
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
return -EINVAL;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock);
if (num_joined_pipes > 1)
pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
connector,
pipe_config,
false,
&limits);
if (!dsc_needed) {
ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
conn_state, &limits);
if (ret == -EDEADLK)
return ret;
if (ret)
dsc_needed = true;
}
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_supports_dsc(connector, pipe_config))
return -EINVAL;
if (!intel_dp_mst_compute_config_limits(intel_dp,
connector,
pipe_config,
true,
&limits))
return -EINVAL;
/*
* FIXME: As bpc is hardcoded to 8, as mentioned above,
* WARN and ignore the debug flag force_dsc_bpc for now.
*/
drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
/*
* Try to get at least some timeslots and then see, if
* we can fit there with DSC.
*/
drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
conn_state, &limits);
if (ret < 0)
return ret;
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
pipe_config->dp_m_n.tu, false);
}
if (ret)
return ret;
ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
if (ret)
return ret;
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
pipe_config);
}
/*
* Iterate over all connectors and return a mask of
* all CPU transcoders streaming over the same DP link.
*/
static unsigned int
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
struct intel_dp *mst_port)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_digital_connector_state *conn_state;
struct intel_connector *connector;
u8 transcoders = 0;
int i;
if (DISPLAY_VER(dev_priv) < 12)
return 0;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
if (connector->mst_port != mst_port || !conn_state->base.crtc)
continue;
crtc = to_intel_crtc(conn_state->base.crtc);
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->hw.active)
continue;
transcoders |= BIT(crtc_state->cpu_transcoder);
}
return transcoders;
}
static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct drm_dp_mst_port *parent_port)
{
const struct intel_digital_connector_state *conn_state;
struct intel_connector *connector;
u8 mask = 0;
int i;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->base.crtc)
continue;
if (&connector->mst_port->mst_mgr != mst_mgr)
continue;
if (connector->port != parent_port &&
!drm_dp_mst_port_downstream_of_parent(mst_mgr,
connector->port,
parent_port))
continue;
mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
}
return mask;
}
static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
u8 fec_pipe_mask = 0;
int ret;
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
/* Atomic connector check should've added all the MST CRTCs. */
if (drm_WARN_ON(&i915->drm, !crtc_state))
return -EINVAL;
if (crtc_state->fec_enable)
fec_pipe_mask |= BIT(crtc->pipe);
}
if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
return 0;
limits->force_fec_pipes |= mst_pipe_mask;
ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
mst_pipe_mask);
return ret ? : -EAGAIN;
}
static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct drm_dp_mst_topology_state *mst_state,
struct intel_link_bw_limits *limits)
{
struct drm_dp_mst_port *mst_port;
u8 mst_port_pipes;
int ret;
ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
if (ret != -ENOSPC)
return ret;
mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
ret = intel_link_bw_reduce_bpp(state, limits,
mst_port_pipes, "MST link BW");
return ret ? : -EAGAIN;
}
/**
* intel_dp_mst_atomic_check_link - check all modeset MST link configuration
* @state: intel atomic state
* @limits: link BW limits
*
* Check the link configuration for all modeset MST outputs. If the
* configuration is invalid @limits will be updated if possible to
* reduce the total BW, after which the configuration for all CRTCs in
* @state must be recomputed with the updated @limits.
*
* Returns:
* - 0 if the confugration is valid
* - %-EAGAIN, if the configuration is invalid and @limits got updated
* with fallback values with which the configuration of all CRTCs in
* @state must be recomputed
* - Other negative error, if the configuration is invalid without a
* fallback possibility, or the check failed for another reason
*/
int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
int ret;
int i;
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
ret = intel_dp_mst_check_fec_change(state, mgr, limits);
if (ret)
return ret;
ret = intel_dp_mst_check_bw(state, mgr, mst_state,
limits);
if (ret)
return ret;
}
return 0;
}
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
/* lowest numbered transcoder will be designated master */
crtc_state->mst_master_transcoder =
ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
return 0;
}
/*
* If one of the connectors in a MST stream needs a modeset, mark all CRTCs
* that shares the same MST stream as mode changed,
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
* a fastset when possible.
*
* On TGL+ this is required since each stream go through a master transcoder,
* so if the master transcoder needs modeset, all other streams in the
* topology need a modeset. All platforms need to add the atomic state
* for all streams in the topology, since a modeset on one may require
* changing the MST link BW usage of the others, which in turn needs a
* recomputation of the corresponding CRTC states.
*/
static int
intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
if (connector_iter->mst_port != connector->mst_port ||
connector_iter == connector)
continue;
conn_iter_state = intel_atomic_get_digital_connector_state(state,
connector_iter);
if (IS_ERR(conn_iter_state)) {
ret = PTR_ERR(conn_iter_state);
break;
}
if (!conn_iter_state->base.crtc)
continue;
crtc = to_intel_crtc(conn_iter_state->base.crtc);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
break;
}
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
break;
crtc_state->uapi.mode_changed = true;
}
drm_connector_list_iter_end(&connector_list_iter);
return ret;
}
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_connector *intel_connector =
to_intel_connector(connector);
int ret;
ret = intel_digital_connector_atomic_check(connector, &state->base);
if (ret)
return ret;
ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
if (intel_connector_needs_modeset(state, connector)) {
ret = intel_dp_tunnel_atomic_check_state(state,
intel_connector->mst_port,
intel_connector);
if (ret)
return ret;
}
return drm_dp_atomic_release_time_slots(&state->base,
&intel_connector->mst_port->mst_mgr,
intel_connector->port);
}
static void clear_act_sent(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_ACT_SENT);
}
static void wait_for_act_sent(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_ACT_SENT, 1))
drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
}
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
intel_dp->active_mst_links);
if (intel_dp->active_mst_links == 1)
intel_dp->link_trained = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_dp_mst_topology_state *new_mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
int i;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
drm_WARN_ON(&dev_priv->drm,
DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
intel_crtc_vblank_off(old_pipe_crtc_state);
}
intel_disable_transcoder(old_crtc_state);
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv,
TRANS_DDI_FUNC_CTL(dev_priv, old_crtc_state->cpu_transcoder),
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
wait_for_act_sent(encoder, old_crtc_state);
drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
intel_dsc_disable(old_pipe_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
}
/*
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
/*
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
intel_dp_set_infoframes(&dig_port->base, false,
old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
*
* From older GENs spec: "Configure Transcoder Clock Select to direct
* no clock to the transcoder"
*/
if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
if (last_mst_stream)
dig_port->base.post_disable(state, &dig_port->base,
old_crtc_state, NULL);
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
}
static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
if (intel_dp->active_mst_links == 0 &&
dig_port->base.post_pll_disable)
dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
}
static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
if (intel_dp->active_mst_links == 0)
dig_port->base.pre_pll_enable(state, &dig_port->base,
pipe_config, NULL);
else
/*
* The port PLL state needs to get updated for secondary
* streams as for the primary stream.
*/
intel_ddi_update_active_dpll(state, &dig_port->base,
to_intel_crtc(pipe_config->uapi.crtc));
}
static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
return intel_dp->link.mst_probed_rate == link_rate &&
intel_dp->link.mst_probed_lane_count == lane_count;
}
static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
intel_dp->link.mst_probed_rate = link_rate;
intel_dp->link.mst_probed_lane_count = lane_count;
}
static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
if (intel_mst_probed_link_params_valid(intel_dp,
crtc_state->port_clock, crtc_state->lane_count))
return;
drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
intel_mst_set_probed_link_params(intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
}
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
int ret;
bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
drm_WARN_ON(&dev_priv->drm,
DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
intel_dp_sink_enable_decompression(state, connector, pipe_config);
if (first_mst_stream) {
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
intel_dp->active_mst_links++;
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
/*
* Before Gen 12 this is not done as part of
* dig_port->base.pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
*/
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
if (!IS_ALDERLAKE_P(i915))
return;
if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
return;
/* Wa_14013163432:adlp */
if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
clear |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
if (crtc_state->fec_enable || intel_dp_is_uhbr(crtc_state))
set |= DP_MST_DPT_DPTP_ALIGN_WA(crtc_state->cpu_transcoder);
}
if (!clear && !set)
return;
intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
}
static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
struct intel_crtc *pipe_crtc;
int ret, i;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
clear_act_sent(encoder, pipe_config);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
wait_for_act_sent(encoder, pipe_config);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
drm_atomic_get_mst_payload_state(mst_state,
connector->port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
intel_audio_sdp_split_update(pipe_config);
intel_enable_transcoder(pipe_config);
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
intel_crtc_vblank_on(pipe_crtc_state);
}
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
if (intel_mst->connector)
return true;
return false;
}
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
dig_port->base.get_config(&dig_port->base, pipe_config);
}
static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
if (!intel_display_driver_check_access(i915))
return drm_edid_connector_add_modes(connector);
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, drm_edid);
drm_edid_free(drm_edid);
return ret;
}
static int
intel_dp_mst_connector_late_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
int ret;
ret = drm_dp_mst_connector_late_register(connector,
intel_connector->port);
if (ret < 0)
return ret;
ret = intel_connector_register(connector);
if (ret < 0)
drm_dp_mst_connector_early_unregister(connector,
intel_connector->port);
return ret;
}
static void
intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
intel_connector_unregister(connector);
drm_dp_mst_connector_early_unregister(connector,
intel_connector->port);
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_mst_connector_late_register,
.early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
{
return intel_dp_mst_get_ddc_modes(connector);
}
static int
intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
bool dsc = false;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
int target_clock = mode->clock;
int num_joined_pipes;
if (drm_connector_is_unregistered(connector)) {
*status = MODE_ERROR;
return 0;
}
*status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (*status != MODE_OK)
return 0;
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
*status = MODE_H_ILLEGAL;
return 0;
}
if (mode->clock < 10000) {
*status = MODE_CLOCK_LOW;
return 0;
}
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_link_data_rate(intel_dp,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
/*
* TODO:
* - Also check if compression would allow for the mode
* - Calculate the overhead using drm_dp_bw_overhead() /
* drm_dp_bw_channel_coding_efficiency(), similarly to the
* compute config code, as drm_dp_calc_pbn_mode() doesn't
* account with all the overheads.
* - Check here and during compute config the BW reported by
* DFP_Link_Available_Payload_Bandwidth_Number (or the
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
return ret;
if (mode_rate > max_rate || mode->clock > max_dotclk ||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
if (intel_dp_has_dsc(intel_connector)) {
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
*/
int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX);
if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
intel_dp_dsc_get_max_compressed_bpp(dev_priv,
max_link_clock,
max_lanes,
target_clock,
mode->hdisplay,
num_joined_pipes,
INTEL_OUTPUT_FORMAT_RGB,
pipe_bpp, 64);
dsc_slice_count =
intel_dp_dsc_get_slice_count(intel_connector,
target_clock,
mode->hdisplay,
num_joined_pipes);
}
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
if (intel_dp_joiner_needs_dsc(dev_priv, num_joined_pipes) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
if (mode_rate > max_rate && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
*status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
return 0;
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
static int
intel_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
if (!intel_display_device_enabled(i915))
return connector_status_disconnected;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
if (!intel_display_driver_check_access(i915))
return connector->status;
intel_dp_flush_connector_commits(intel_connector);
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
.detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
drm_encoder_cleanup(encoder);
kfree(intel_mst);
}
static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
.destroy = intel_dp_mst_encoder_destroy,
};
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
if (intel_attached_encoder(connector) && connector->base.state->crtc) {
enum pipe pipe;
if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
return false;
return true;
}
return false;
}
static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
struct drm_connector *connector,
const char *pathprop)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.path_property, 0);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tile_property, 0);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
/*
* Reuse the prop from the SST connector because we're
* not allowed to create new props after device registration.
*/
connector->max_bpc_property =
intel_dp->attached_connector->base.max_bpc_property;
if (connector->max_bpc_property)
drm_connector_attach_max_bpc_property(connector, 6, 12);
return drm_connector_set_path_property(connector, pathprop);
}
static void
intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
if (!connector->dp.dsc_decompression_aux)
return;
if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
return;
intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
}
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (!aux)
return false;
/*
* A logical port's OUI (at least for affected sinks) is all 0, so
* instead of that the parent port's OUI is used for identification.
*/
if (drm_dp_mst_port_is_logical(connector->port)) {
aux = drm_dp_mst_aux_for_parent(connector->port);
if (!aux)
aux = &connector->mst_port->aux;
}
if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
return false;
if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0)
return false;
if (!drm_dp_has_quirk(&desc,
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
return false;
/*
* UHBR (MST sink) devices requiring this quirk don't advertise the
* HBLANK expansion support. Presuming that they perform HBLANK
* expansion internally, or are affected by this issue on modes with a
* short HBLANK for other reasons.
*/
if (!drm_dp_128b132b_supported(dpcd) &&
!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
connector->base.base.id, connector->base.name);
return true;
}
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
enum pipe pipe;
int ret;
intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
intel_dp_init_modeset_retry_work(intel_connector);
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
intel_connector->dp.dsc_hblank_expansion_quirk =
detect_dsc_hblank_expansion_quirk(intel_connector);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
ret = drm_connector_attach_encoder(&intel_connector->base, enc);
if (ret)
goto err;
}
ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
if (ret)
goto err;
ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
return connector;
err:
drm_connector_cleanup(connector);
return NULL;
}
static void
intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
};
static struct intel_dp_mst_encoder *
intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
{
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
struct drm_device *dev = dig_port->base.base.dev;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
if (!intel_mst)
return NULL;
intel_mst->pipe = pipe;
intel_encoder = &intel_mst->base;
intel_mst->primary = dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = dig_port->base.power_domain;
intel_encoder->port = dig_port->base.port;
intel_encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
* of possible_crtcs of all the encoders of a given connector
* to figure out which crtcs can drive said connector. What
* should be used instead is the union of possible_crtcs.
* To keep such userspace functioning we must misconfigure
* this to make sure the intersection is not empty :(
*/
intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->audio_enable = intel_audio_codec_enable;
intel_encoder->audio_disable = intel_audio_codec_disable;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
return intel_mst;
}
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
{
struct intel_dp *intel_dp = &dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe)
intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
return true;
}
int
intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
{
return dig_port->dp.active_mst_links;
}
int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
if (DISPLAY_VER(i915) < 12 && port == PORT_A)
return 0;
if (DISPLAY_VER(i915) < 11 && port == PORT_E)
return 0;
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
}
return 0;
}
bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
{
return intel_dp->mst_mgr.cbs;
}
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
struct intel_dp *intel_dp = &dig_port->dp;
if (!intel_dp_mst_source_support(intel_dp))
return;
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
intel_dp->mst_mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
{
return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
}
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
{
return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
}
/**
* intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
* @state: atomic state
* @connector: connector to add the state for
* @crtc: the CRTC @connector is attached to
*
* Add the MST topology state for @connector to @state.
*
* Returns 0 on success, negative error code on failure.
*/
static int
intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
struct drm_dp_mst_topology_state *mst_state;
if (!connector->mst_port)
return 0;
mst_state = drm_atomic_get_mst_topology_state(&state->base,
&connector->mst_port->mst_mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
return 0;
}
/**
* intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
* @state: atomic state
* @crtc: CRTC to add the state for
*
* Add the MST topology state for @crtc to @state.
*
* Returns 0 on success, negative error code on failure.
*/
int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_connector *_connector;
struct drm_connector_state *conn_state;
int i;
for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
struct intel_connector *connector = to_intel_connector(_connector);
int ret;
if (conn_state->crtc != &crtc->base)
continue;
ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
if (ret)
return ret;
}
return 0;
}
static struct intel_connector *
get_connector_in_state_for_crtc(struct intel_atomic_state *state,
const struct intel_crtc *crtc)
{
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_connector *_connector;
int i;
for_each_oldnew_connector_in_state(&state->base, _connector,
old_conn_state, new_conn_state, i) {
struct intel_connector *connector =
to_intel_connector(_connector);
if (old_conn_state->crtc == &crtc->base ||
new_conn_state->crtc == &crtc->base)
return connector;
}
return NULL;
}
/**
* intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
* @state: atomic state
* @crtc: CRTC for which to check the modeset requirement
*
* Check if any change in a MST topology requires a forced modeset on @crtc in
* this topology. One such change is enabling/disabling the DSC decompression
* state in the first branch device's UFP DPCD as required by one CRTC, while
* the other @crtc in the same topology is still active, requiring a full modeset
* on @crtc.
*/
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_connector *crtc_connector;
const struct drm_connector_state *conn_state;
const struct drm_connector *_connector;
int i;
if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
INTEL_OUTPUT_DP_MST))
return false;
crtc_connector = get_connector_in_state_for_crtc(state, crtc);
if (!crtc_connector)
/* None of the connectors in the topology needs modeset */
return false;
for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
const struct intel_connector *connector =
to_intel_connector(_connector);
const struct intel_crtc_state *new_crtc_state;
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc_iter;
if (connector->mst_port != crtc_connector->mst_port ||
!conn_state->crtc)
continue;
crtc_iter = to_intel_crtc(conn_state->crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
if (old_crtc_state->dsc.compression_enable ==
new_crtc_state->dsc.compression_enable)
continue;
/*
* Toggling the decompression flag because of this stream in
* the first downstream branch device's UFP DPCD may reset the
* whole branch device. To avoid the reset while other streams
* are also active modeset the whole MST topology in this
* case.
*/
if (connector->dp.dsc_decompression_aux ==
&connector->mst_port->aux)
return true;
}
return false;
}
/**
* intel_dp_mst_prepare_probe - Prepare an MST link for topology probing
* @intel_dp: DP port object
*
* Prepare an MST link for topology probing, programming the target
* link parameters to DPCD. This step is a requirement of the enumaration
* of path resources during probing.
*/
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
{
int link_rate = intel_dp_max_link_rate(intel_dp);
int lane_count = intel_dp_max_lane_count(intel_dp);
u8 rate_select;
u8 link_bw;
if (intel_dp->link_trained)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
return;
intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select);
intel_dp_link_training_set_mode(intel_dp, link_rate, false);
intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
drm_dp_enhanced_frame_cap(intel_dp->dpcd));
intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
}
/*
* intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
* @intel_dp: DP port object
*
* Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
* state. A long HPD pulse - not long enough to be detected as a disconnected
* state - could've reset the DPCD state, which requires tearing
* down/recreating the MST topology.
*
* Returns %true if the SW MST enabled and DPCD states match, %false
* otherwise.
*/
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
int ret;
u8 val;
if (!intel_dp->is_mst)
return true;
ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
/* Adjust the expected register value for SST + SideBand. */
if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
ret, val);
return false;
}
return true;
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <[email protected]>
* (C) 2011 Thomas Renninger <[email protected]> Novell Inc.
*/
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "cpuidle.h"
#include "cpupower_intern.h"
/*
* helper function to check whether a file under "../cpuX/cpuidle/stateX/" dir
* exists.
* For example the functionality to disable c-states was introduced in later
* kernel versions, this function can be used to explicitly check for this
* feature.
*
* returns 1 if the file exists, 0 otherwise.
*/
static
unsigned int cpuidle_state_file_exists(unsigned int cpu,
unsigned int idlestate,
const char *fname)
{
char path[SYSFS_PATH_MAX];
struct stat statbuf;
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
cpu, idlestate, fname);
if (stat(path, &statbuf) != 0)
return 0;
return 1;
}
/*
* helper function to read file from /sys into given buffer
* fname is a relative path under "cpuX/cpuidle/stateX/" dir
* cstates starting with 0, C0 is not counted as cstate.
* This means if you want C1 info, pass 0 as idlestate param
*/
static
unsigned int cpuidle_state_read_file(unsigned int cpu,
unsigned int idlestate,
const char *fname, char *buf,
size_t buflen)
{
char path[SYSFS_PATH_MAX];
int fd;
ssize_t numread;
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
cpu, idlestate, fname);
fd = open(path, O_RDONLY);
if (fd == -1)
return 0;
numread = read(fd, buf, buflen - 1);
if (numread < 1) {
close(fd);
return 0;
}
buf[numread] = '\0';
close(fd);
return (unsigned int) numread;
}
/*
* helper function to write a new value to a /sys file
* fname is a relative path under "../cpuX/cpuidle/cstateY/" dir
*
* Returns the number of bytes written or 0 on error
*/
static
unsigned int cpuidle_state_write_file(unsigned int cpu,
unsigned int idlestate,
const char *fname,
const char *value, size_t len)
{
char path[SYSFS_PATH_MAX];
int fd;
ssize_t numwrite;
snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
cpu, idlestate, fname);
fd = open(path, O_WRONLY);
if (fd == -1)
return 0;
numwrite = write(fd, value, len);
if (numwrite < 1) {
close(fd);
return 0;
}
close(fd);
return (unsigned int) numwrite;
}
/* read access to files which contain one numeric value */
enum idlestate_value {
IDLESTATE_USAGE,
IDLESTATE_POWER,
IDLESTATE_LATENCY,
IDLESTATE_RESIDENCY,
IDLESTATE_TIME,
IDLESTATE_DISABLE,
MAX_IDLESTATE_VALUE_FILES
};
static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = {
[IDLESTATE_USAGE] = "usage",
[IDLESTATE_POWER] = "power",
[IDLESTATE_LATENCY] = "latency",
[IDLESTATE_RESIDENCY] = "residency",
[IDLESTATE_TIME] = "time",
[IDLESTATE_DISABLE] = "disable",
};
static
unsigned long long cpuidle_state_get_one_value(unsigned int cpu,
unsigned int idlestate,
enum idlestate_value which)
{
unsigned long long value;
unsigned int len;
char linebuf[MAX_LINE_LEN];
char *endp;
if (which >= MAX_IDLESTATE_VALUE_FILES)
return 0;
len = cpuidle_state_read_file(cpu, idlestate,
idlestate_value_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return 0;
value = strtoull(linebuf, &endp, 0);
if (endp == linebuf || errno == ERANGE)
return 0;
return value;
}
/* read access to files which contain one string */
enum idlestate_string {
IDLESTATE_DESC,
IDLESTATE_NAME,
MAX_IDLESTATE_STRING_FILES
};
static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = {
[IDLESTATE_DESC] = "desc",
[IDLESTATE_NAME] = "name",
};
static char *cpuidle_state_get_one_string(unsigned int cpu,
unsigned int idlestate,
enum idlestate_string which)
{
char linebuf[MAX_LINE_LEN];
char *result;
unsigned int len;
if (which >= MAX_IDLESTATE_STRING_FILES)
return NULL;
len = cpuidle_state_read_file(cpu, idlestate,
idlestate_string_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
result = strdup(linebuf);
if (result == NULL)
return NULL;
if (result[strlen(result) - 1] == '\n')
result[strlen(result) - 1] = '\0';
return result;
}
/*
* Returns:
* 1 if disabled
* 0 if enabled
* -1 if idlestate is not available
* -2 if disabling is not supported by the kernel
*/
int cpuidle_is_state_disabled(unsigned int cpu,
unsigned int idlestate)
{
if (cpuidle_state_count(cpu) <= idlestate)
return -1;
if (!cpuidle_state_file_exists(cpu, idlestate,
idlestate_value_files[IDLESTATE_DISABLE]))
return -2;
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
}
/*
* Pass 1 as last argument to disable or 0 to enable the state
* Returns:
* 0 on success
* negative values on error, for example:
* -1 if idlestate is not available
* -2 if disabling is not supported by the kernel
* -3 No write access to disable/enable C-states
*/
int cpuidle_state_disable(unsigned int cpu,
unsigned int idlestate,
unsigned int disable)
{
char value[SYSFS_PATH_MAX];
int bytes_written;
if (cpuidle_state_count(cpu) <= idlestate)
return -1;
if (!cpuidle_state_file_exists(cpu, idlestate,
idlestate_value_files[IDLESTATE_DISABLE]))
return -2;
snprintf(value, SYSFS_PATH_MAX, "%u", disable);
bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
value, sizeof(disable));
if (bytes_written)
return 0;
return -3;
}
unsigned long cpuidle_state_latency(unsigned int cpu,
unsigned int idlestate)
{
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
}
unsigned long cpuidle_state_residency(unsigned int cpu,
unsigned int idlestate)
{
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_RESIDENCY);
}
unsigned long cpuidle_state_usage(unsigned int cpu,
unsigned int idlestate)
{
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
}
unsigned long long cpuidle_state_time(unsigned int cpu,
unsigned int idlestate)
{
return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_TIME);
}
char *cpuidle_state_name(unsigned int cpu, unsigned int idlestate)
{
return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_NAME);
}
char *cpuidle_state_desc(unsigned int cpu, unsigned int idlestate)
{
return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_DESC);
}
/*
* Returns number of supported C-states of CPU core cpu
* Negativ in error case
* Zero if cpuidle does not export any C-states
*/
unsigned int cpuidle_state_count(unsigned int cpu)
{
char file[SYSFS_PATH_MAX];
struct stat statbuf;
int idlestates = 1;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
return 0;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
return 0;
while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) {
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU
"cpu%u/cpuidle/state%d", cpu, idlestates);
idlestates++;
}
idlestates--;
return idlestates;
}
/* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/
/*
* helper function to read file from /sys into given buffer
* fname is a relative path under "cpu/cpuidle/" dir
*/
static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
size_t buflen)
{
char path[SYSFS_PATH_MAX];
snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
return cpupower_read_sysfs(path, buf, buflen);
}
/* read access to files which contain one string */
enum cpuidle_string {
CPUIDLE_GOVERNOR,
CPUIDLE_GOVERNOR_RO,
CPUIDLE_DRIVER,
MAX_CPUIDLE_STRING_FILES
};
static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = {
[CPUIDLE_GOVERNOR] = "current_governor",
[CPUIDLE_GOVERNOR_RO] = "current_governor_ro",
[CPUIDLE_DRIVER] = "current_driver",
};
static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which)
{
char linebuf[MAX_LINE_LEN];
char *result;
unsigned int len;
if (which >= MAX_CPUIDLE_STRING_FILES)
return NULL;
len = sysfs_cpuidle_read_file(cpuidle_string_files[which],
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
result = strdup(linebuf);
if (result == NULL)
return NULL;
if (result[strlen(result) - 1] == '\n')
result[strlen(result) - 1] = '\0';
return result;
}
char *cpuidle_get_governor(void)
{
char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO);
if (!tmp)
return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR);
else
return tmp;
}
char *cpuidle_get_driver(void)
{
return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER);
}
/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: exresolv - AML Interpreter object resolution
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exresolv")
/* Local prototypes */
static acpi_status
acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ex_resolve_to_value
*
* PARAMETERS: **stack_ptr - Points to entry on obj_stack, which can
* be either an (union acpi_operand_object *)
* or an acpi_handle.
* walk_state - Current method state
*
* RETURN: Status
*
* DESCRIPTION: Convert Reference objects to values
*
******************************************************************************/
acpi_status
acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
struct acpi_walk_state *walk_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
if (!stack_ptr || !*stack_ptr) {
ACPI_ERROR((AE_INFO, "Internal - null pointer"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
/*
* The entity pointed to by the stack_ptr can be either
* 1) A valid union acpi_operand_object, or
* 2) A struct acpi_namespace_node (named_obj)
*/
if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) {
status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (!*stack_ptr) {
ACPI_ERROR((AE_INFO, "Internal - null pointer"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
}
/*
* Object on the stack may have changed if acpi_ex_resolve_object_to_value()
* was called (i.e., we can't use an _else_ here.)
*/
if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) {
status =
acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
(struct acpi_namespace_node,
stack_ptr), walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_resolve_object_to_value
*
* PARAMETERS: stack_ptr - Pointer to an internal object
* walk_state - Current method state
*
* RETURN: Status
*
* DESCRIPTION: Retrieve the value from an internal object. The Reference type
* uses the associated AML opcode to determine the value.
*
******************************************************************************/
static acpi_status
acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object *stack_desc;
union acpi_operand_object *obj_desc = NULL;
u8 ref_type;
ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
stack_desc = *stack_ptr;
/* This is an object of type union acpi_operand_object */
switch (stack_desc->common.type) {
case ACPI_TYPE_LOCAL_REFERENCE:
ref_type = stack_desc->reference.class;
switch (ref_type) {
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
/*
* Get the local from the method's state info
* Note: this increments the local's object reference count
*/
status = acpi_ds_method_data_get_value(ref_type,
stack_desc->
reference.value,
walk_state,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Arg/Local %X] ValueObj is %p\n",
stack_desc->reference.value,
obj_desc));
/*
* Now we can delete the original Reference Object and
* replace it with the resolved value
*/
acpi_ut_remove_reference(stack_desc);
*stack_ptr = obj_desc;
break;
case ACPI_REFCLASS_INDEX:
switch (stack_desc->reference.target_type) {
case ACPI_TYPE_BUFFER_FIELD:
/* Just return - do not dereference */
break;
case ACPI_TYPE_PACKAGE:
/* If method call or copy_object - do not dereference */
if ((walk_state->opcode ==
AML_INT_METHODCALL_OP)
|| (walk_state->opcode ==
AML_COPY_OBJECT_OP)) {
break;
}
/* Otherwise, dereference the package_index to a package element */
obj_desc = *stack_desc->reference.where;
if (obj_desc) {
/*
* Valid object descriptor, copy pointer to return value
* (i.e., dereference the package index)
* Delete the ref object, increment the returned object
*/
acpi_ut_add_reference(obj_desc);
*stack_ptr = obj_desc;
} else {
/*
* A NULL object descriptor means an uninitialized element of
* the package, can't dereference it
*/
ACPI_ERROR((AE_INFO,
"Attempt to dereference an Index to "
"NULL package element Idx=%p",
stack_desc));
status = AE_AML_UNINITIALIZED_ELEMENT;
}
break;
default:
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
"Unknown TargetType 0x%X in Index/Reference object %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
break;
}
break;
case ACPI_REFCLASS_REFOF:
case ACPI_REFCLASS_DEBUG:
case ACPI_REFCLASS_TABLE:
/* Just leave the object as-is, do not dereference */
break;
case ACPI_REFCLASS_NAME: /* Reference to a named object */
/* Dereference the name */
if ((stack_desc->reference.node->type ==
ACPI_TYPE_DEVICE)
|| (stack_desc->reference.node->type ==
ACPI_TYPE_THERMAL)) {
/* These node types do not have 'real' subobjects */
*stack_ptr = (void *)stack_desc->reference.node;
} else {
/* Get the object pointed to by the namespace node */
*stack_ptr =
(stack_desc->reference.node)->object;
acpi_ut_add_reference(*stack_ptr);
}
acpi_ut_remove_reference(stack_desc);
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown Reference type 0x%X in %p",
ref_type, stack_desc));
status = AE_AML_INTERNAL;
break;
}
break;
case ACPI_TYPE_BUFFER:
status = acpi_ds_get_buffer_arguments(stack_desc);
break;
case ACPI_TYPE_PACKAGE:
status = acpi_ds_get_package_arguments(stack_desc);
break;
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"FieldRead SourceDesc=%p Type=%X\n",
stack_desc, stack_desc->common.type));
status =
acpi_ex_read_data_from_field(walk_state, stack_desc,
&obj_desc);
/* Remove a reference to the original operand, then override */
acpi_ut_remove_reference(*stack_ptr);
*stack_ptr = (void *)obj_desc;
break;
default:
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_resolve_multiple
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
* operand - Starting point for resolution
* return_type - Where the object type is returned
* return_desc - Where the resolved object is returned
*
* RETURN: Status
*
* DESCRIPTION: Return the base object and type. Traverse a reference list if
* necessary to get to the base object.
*
******************************************************************************/
acpi_status
acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand,
acpi_object_type *return_type,
union acpi_operand_object **return_desc)
{
union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
struct acpi_namespace_node *node =
ACPI_CAST_PTR(struct acpi_namespace_node, operand);
acpi_object_type type;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
/* Operand can be either a namespace node or an operand descriptor */
switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
case ACPI_DESC_TYPE_OPERAND:
type = obj_desc->common.type;
break;
case ACPI_DESC_TYPE_NAMED:
type = ((struct acpi_namespace_node *)obj_desc)->type;
obj_desc = acpi_ns_get_attached_object(node);
/* If we had an Alias node, use the attached object for type info */
if (type == ACPI_TYPE_LOCAL_ALIAS) {
type = ((struct acpi_namespace_node *)obj_desc)->type;
obj_desc = acpi_ns_get_attached_object((struct
acpi_namespace_node
*)obj_desc);
}
switch (type) {
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
/* These types have no attached subobject */
break;
default:
/* All other types require a subobject */
if (!obj_desc) {
ACPI_ERROR((AE_INFO,
"[%4.4s] Node is unresolved or uninitialized",
acpi_ut_get_node_name(node)));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
}
break;
}
break;
default:
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/* If type is anything other than a reference, we are done */
if (type != ACPI_TYPE_LOCAL_REFERENCE) {
goto exit;
}
/*
* For reference objects created via the ref_of, Index, or Load/load_table
* operators, we need to get to the base object (as per the ACPI
* specification of the object_type and size_of operators). This means
* traversing the list of possibly many nested references.
*/
while (obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
switch (obj_desc->reference.class) {
case ACPI_REFCLASS_REFOF:
case ACPI_REFCLASS_NAME:
/* Dereference the reference pointer */
if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
node = obj_desc->reference.object;
} else { /* AML_INT_NAMEPATH_OP */
node = obj_desc->reference.node;
}
/* All "References" point to a NS node */
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
ACPI_ERROR((AE_INFO,
"Not a namespace node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/* Get the attached object */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, use the NS node type */
type = acpi_ns_get_type(node);
goto exit;
}
/* Check for circular references */
if (obj_desc == operand) {
return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
}
break;
case ACPI_REFCLASS_INDEX:
/* Get the type of this reference (index into another object) */
type = obj_desc->reference.target_type;
if (type != ACPI_TYPE_PACKAGE) {
goto exit;
}
/*
* The main object is a package, we want to get the type
* of the individual package element that is referenced by
* the index.
*
* This could of course in turn be another reference object.
*/
obj_desc = *(obj_desc->reference.where);
if (!obj_desc) {
/* NULL package elements are allowed */
type = 0; /* Uninitialized */
goto exit;
}
break;
case ACPI_REFCLASS_TABLE:
type = ACPI_TYPE_DDB_HANDLE;
goto exit;
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
if (return_desc) {
status =
acpi_ds_method_data_get_value(obj_desc->
reference.
class,
obj_desc->
reference.
value,
walk_state,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_ut_remove_reference(obj_desc);
} else {
status =
acpi_ds_method_data_get_node(obj_desc->
reference.
class,
obj_desc->
reference.
value,
walk_state,
&node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
type = ACPI_TYPE_ANY;
goto exit;
}
}
break;
case ACPI_REFCLASS_DEBUG:
/* The Debug Object is of type "DebugObject" */
type = ACPI_TYPE_DEBUG_OBJECT;
goto exit;
default:
ACPI_ERROR((AE_INFO,
"Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
/*
* Now we are guaranteed to have an object that has not been created
* via the ref_of or Index operators.
*/
type = obj_desc->common.type;
exit:
/* Convert internal types to external types */
switch (type) {
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
type = ACPI_TYPE_FIELD_UNIT;
break;
case ACPI_TYPE_LOCAL_SCOPE:
/* Per ACPI Specification, Scope is untyped */
type = ACPI_TYPE_ANY;
break;
default:
/* No change to Type required */
break;
}
*return_type = type;
if (return_desc) {
*return_desc = obj_desc;
}
return_ACPI_STATUS(AE_OK);
}
|
/*
* Cryptographic API.
*
* Khazad Algorithm
*
* The Khazad algorithm was developed by Paulo S. L. M. Barreto and
* Vincent Rijmen. It was a finalist in the NESSIE encryption contest.
*
* The original authors have disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* By Aaron Grothe [email protected], August 1, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
#define KHAZAD_BLOCK_SIZE 8
#define KHAZAD_ROUNDS 8
struct khazad_ctx {
u64 E[KHAZAD_ROUNDS + 1];
u64 D[KHAZAD_ROUNDS + 1];
};
static const u64 T0[256] = {
0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL,
0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL,
0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL,
0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL,
0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL,
0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL,
0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL,
0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL,
0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL,
0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL,
0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL,
0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL,
0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL,
0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL,
0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL,
0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL,
0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL,
0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL,
0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL,
0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL,
0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL,
0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL,
0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL,
0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL,
0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL,
0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL,
0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL,
0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL,
0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL,
0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL,
0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL,
0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL,
0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL,
0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL,
0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL,
0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL,
0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL,
0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL,
0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL,
0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL,
0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL,
0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL,
0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL,
0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL,
0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL,
0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL,
0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL,
0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL,
0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL,
0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL,
0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL,
0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL,
0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL,
0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL,
0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL,
0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL,
0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL,
0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL,
0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL,
0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL,
0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL,
0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL,
0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL,
0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL,
0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL,
0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL,
0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL,
0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL,
0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL,
0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL,
0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL,
0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL,
0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL,
0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL,
0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL,
0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL,
0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL,
0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL,
0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL,
0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL,
0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL,
0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL,
0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL,
0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL,
0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL,
0x42c61557912aecd3ULL
};
static const u64 T1[256] = {
0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL,
0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL,
0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL,
0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL,
0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL,
0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL,
0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL,
0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL,
0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL,
0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL,
0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL,
0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL,
0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL,
0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL,
0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL,
0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL,
0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL,
0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL,
0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL,
0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL,
0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL,
0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL,
0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL,
0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL,
0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL,
0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL,
0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL,
0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL,
0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL,
0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL,
0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL,
0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL,
0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL,
0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL,
0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL,
0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL,
0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL,
0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL,
0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL,
0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL,
0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL,
0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL,
0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL,
0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL,
0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL,
0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL,
0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL,
0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL,
0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL,
0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL,
0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL,
0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL,
0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL,
0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL,
0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL,
0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL,
0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL,
0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL,
0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL,
0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL,
0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL,
0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL,
0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL,
0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL,
0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL,
0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL,
0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL,
0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL,
0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL,
0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL,
0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL,
0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL,
0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL,
0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL,
0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL,
0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL,
0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL,
0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL,
0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL,
0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL,
0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL,
0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL,
0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL,
0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL,
0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL,
0xc64257152a91d3ecULL
};
static const u64 T2[256] = {
0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL,
0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL,
0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL,
0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL,
0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL,
0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL,
0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL,
0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL,
0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL,
0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL,
0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL,
0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL,
0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL,
0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL,
0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL,
0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL,
0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL,
0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL,
0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL,
0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL,
0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL,
0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL,
0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL,
0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL,
0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL,
0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL,
0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL,
0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL,
0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL,
0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL,
0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL,
0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL,
0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL,
0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL,
0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL,
0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL,
0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL,
0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL,
0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL,
0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL,
0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL,
0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL,
0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL,
0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL,
0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL,
0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL,
0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL,
0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL,
0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL,
0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL,
0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL,
0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL,
0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL,
0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL,
0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL,
0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL,
0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL,
0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL,
0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL,
0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL,
0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL,
0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL,
0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL,
0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL,
0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL,
0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL,
0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL,
0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL,
0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL,
0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL,
0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL,
0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL,
0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL,
0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL,
0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL,
0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL,
0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL,
0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL,
0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL,
0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL,
0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL,
0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL,
0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL,
0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL,
0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL,
0x155742c6ecd3912aULL
};
static const u64 T3[256] = {
0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL,
0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL,
0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL,
0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL,
0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL,
0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL,
0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL,
0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL,
0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL,
0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL,
0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL,
0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL,
0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL,
0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL,
0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL,
0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL,
0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL,
0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL,
0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL,
0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL,
0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL,
0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL,
0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL,
0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL,
0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL,
0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL,
0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL,
0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL,
0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL,
0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL,
0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL,
0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL,
0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL,
0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL,
0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL,
0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL,
0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL,
0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL,
0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL,
0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL,
0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL,
0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL,
0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL,
0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL,
0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL,
0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL,
0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL,
0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL,
0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL,
0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL,
0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL,
0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL,
0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL,
0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL,
0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL,
0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL,
0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL,
0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL,
0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL,
0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL,
0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL,
0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL,
0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL,
0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL,
0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL,
0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL,
0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL,
0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL,
0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL,
0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL,
0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL,
0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL,
0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL,
0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL,
0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL,
0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL,
0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL,
0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL,
0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL,
0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL,
0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL,
0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL,
0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL,
0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL,
0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL,
0x5715c642d3ec2a91ULL
};
static const u64 T4[256] = {
0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL,
0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL,
0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL,
0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL,
0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL,
0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL,
0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL,
0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL,
0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL,
0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL,
0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL,
0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL,
0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL,
0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL,
0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL,
0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL,
0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL,
0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL,
0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL,
0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL,
0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL,
0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL,
0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL,
0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL,
0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL,
0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL,
0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL,
0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL,
0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL,
0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL,
0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL,
0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL,
0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL,
0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL,
0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL,
0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL,
0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL,
0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL,
0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL,
0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL,
0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL,
0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL,
0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL,
0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL,
0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL,
0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL,
0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL,
0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL,
0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL,
0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL,
0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL,
0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL,
0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL,
0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL,
0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL,
0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL,
0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL,
0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL,
0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL,
0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL,
0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL,
0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL,
0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL,
0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL,
0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL,
0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL,
0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL,
0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL,
0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL,
0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL,
0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL,
0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL,
0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL,
0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL,
0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL,
0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL,
0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL,
0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL,
0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL,
0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL,
0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL,
0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL,
0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL,
0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL,
0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL,
0x912aecd342c61557ULL
};
static const u64 T5[256] = {
0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL,
0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL,
0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL,
0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL,
0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL,
0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL,
0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL,
0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL,
0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL,
0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL,
0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL,
0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL,
0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL,
0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL,
0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL,
0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL,
0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL,
0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL,
0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL,
0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL,
0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL,
0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL,
0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL,
0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL,
0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL,
0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL,
0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL,
0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL,
0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL,
0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL,
0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL,
0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL,
0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL,
0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL,
0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL,
0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL,
0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL,
0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL,
0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL,
0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL,
0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL,
0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL,
0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL,
0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL,
0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL,
0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL,
0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL,
0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL,
0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL,
0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL,
0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL,
0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL,
0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL,
0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL,
0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL,
0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL,
0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL,
0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL,
0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL,
0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL,
0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL,
0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL,
0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL,
0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL,
0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL,
0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL,
0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL,
0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL,
0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL,
0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL,
0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL,
0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL,
0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL,
0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL,
0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL,
0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL,
0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL,
0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL,
0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL,
0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL,
0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL,
0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL,
0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL,
0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL,
0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL,
0x2a91d3ecc6425715ULL
};
static const u64 T6[256] = {
0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL,
0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL,
0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL,
0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL,
0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL,
0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL,
0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL,
0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL,
0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL,
0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL,
0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL,
0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL,
0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL,
0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL,
0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL,
0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL,
0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL,
0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL,
0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL,
0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL,
0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL,
0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL,
0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL,
0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL,
0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL,
0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL,
0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL,
0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL,
0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL,
0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL,
0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL,
0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL,
0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL,
0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL,
0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL,
0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL,
0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL,
0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL,
0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL,
0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL,
0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL,
0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL,
0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL,
0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL,
0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL,
0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL,
0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL,
0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL,
0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL,
0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL,
0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL,
0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL,
0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL,
0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL,
0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL,
0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL,
0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL,
0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL,
0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL,
0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL,
0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL,
0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL,
0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL,
0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL,
0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL,
0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL,
0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL,
0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL,
0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL,
0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL,
0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL,
0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL,
0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL,
0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL,
0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL,
0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL,
0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL,
0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL,
0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL,
0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL,
0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL,
0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL,
0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL,
0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL,
0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL,
0xecd3912a155742c6ULL
};
static const u64 T7[256] = {
0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL,
0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL,
0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL,
0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL,
0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL,
0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL,
0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL,
0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL,
0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL,
0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL,
0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL,
0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL,
0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL,
0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL,
0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL,
0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL,
0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL,
0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL,
0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL,
0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL,
0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL,
0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL,
0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL,
0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL,
0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL,
0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL,
0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL,
0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL,
0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL,
0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL,
0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL,
0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL,
0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL,
0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL,
0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL,
0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL,
0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL,
0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL,
0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL,
0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL,
0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL,
0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL,
0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL,
0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL,
0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL,
0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL,
0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL,
0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL,
0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL,
0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL,
0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL,
0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL,
0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL,
0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL,
0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL,
0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL,
0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL,
0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL,
0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL,
0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL,
0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL,
0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL,
0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL,
0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL,
0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL,
0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL,
0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL,
0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL,
0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL,
0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL,
0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL,
0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL,
0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL,
0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL,
0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL,
0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL,
0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL,
0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL,
0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL,
0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL,
0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL,
0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL,
0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL,
0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL,
0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL,
0xd3ec2a915715c642ULL
};
static const u64 c[KHAZAD_ROUNDS + 1] = {
0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL,
0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL,
0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
};
static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
/* key is supposed to be 32-bit aligned */
K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
ctx->E[r] = T0[(int)(K1 >> 56) ] ^
T1[(int)(K1 >> 48) & 0xff] ^
T2[(int)(K1 >> 40) & 0xff] ^
T3[(int)(K1 >> 32) & 0xff] ^
T4[(int)(K1 >> 24) & 0xff] ^
T5[(int)(K1 >> 16) & 0xff] ^
T6[(int)(K1 >> 8) & 0xff] ^
T7[(int)(K1 ) & 0xff] ^
c[r] ^ K2;
K2 = K1;
K1 = ctx->E[r];
}
/* Setup the decrypt key */
ctx->D[0] = ctx->E[KHAZAD_ROUNDS];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
K1 = ctx->E[KHAZAD_ROUNDS - r];
ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^
T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^
T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^
T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^
T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^
T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^
T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^
T7[(int)S[(int)(K1 ) & 0xff] & 0xff];
}
ctx->D[KHAZAD_ROUNDS] = ctx->E[0];
return 0;
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
u8 *ciphertext, const u8 *plaintext)
{
const __be64 *src = (const __be64 *)plaintext;
__be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
state = be64_to_cpu(*src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
T1[(int)(state >> 48) & 0xff] ^
T2[(int)(state >> 40) & 0xff] ^
T3[(int)(state >> 32) & 0xff] ^
T4[(int)(state >> 24) & 0xff] ^
T5[(int)(state >> 16) & 0xff] ^
T6[(int)(state >> 8) & 0xff] ^
T7[(int)(state ) & 0xff] ^
roundKey[r];
}
state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^
(T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^
(T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^
(T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^
(T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^
(T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^
(T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
*dst = cpu_to_be64(state);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->E, dst, src);
}
static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->D, dst, src);
}
static struct crypto_alg khazad_alg = {
.cra_name = "khazad",
.cra_driver_name = "khazad-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
.cia_max_keysize = KHAZAD_KEY_SIZE,
.cia_setkey = khazad_setkey,
.cia_encrypt = khazad_encrypt,
.cia_decrypt = khazad_decrypt } }
};
static int __init khazad_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&khazad_alg);
return ret;
}
static void __exit khazad_mod_fini(void)
{
crypto_unregister_alg(&khazad_alg);
}
subsys_initcall(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
MODULE_ALIAS_CRYPTO("khazad");
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEM_TYPES_H
#define _LINUX_SEM_TYPES_H
struct sem_undo_list;
struct sysv_sem {
#ifdef CONFIG_SYSVIPC
struct sem_undo_list *undo_list;
#endif
};
#endif /* _LINUX_SEM_TYPES_H */
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CEPH_STRIPER_H
#define _LINUX_CEPH_STRIPER_H
#include <linux/list.h>
#include <linux/types.h>
struct ceph_file_layout;
void ceph_calc_file_object_mapping(struct ceph_file_layout *l,
u64 off, u64 len,
u64 *objno, u64 *objoff, u32 *xlen);
struct ceph_object_extent {
struct list_head oe_item;
u64 oe_objno;
u64 oe_off;
u64 oe_len;
};
static inline void ceph_object_extent_init(struct ceph_object_extent *ex)
{
INIT_LIST_HEAD(&ex->oe_item);
}
/*
* Called for each mapped stripe unit.
*
* @bytes: number of bytes mapped, i.e. the minimum of the full length
* requested (file extent length) or the remainder of the stripe
* unit within an object
*/
typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex,
u32 bytes, void *arg);
int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len,
struct list_head *object_extents,
struct ceph_object_extent *alloc_fn(void *arg),
void *alloc_arg,
ceph_object_extent_fn_t action_fn,
void *action_arg);
int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len,
struct list_head *object_extents,
ceph_object_extent_fn_t action_fn,
void *action_arg);
struct ceph_file_extent {
u64 fe_off;
u64 fe_len;
};
static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents,
u32 num_file_extents)
{
u64 bytes = 0;
u32 i;
for (i = 0; i < num_file_extents; i++)
bytes += file_extents[i].fe_len;
return bytes;
}
int ceph_extent_to_file(struct ceph_file_layout *l,
u64 objno, u64 objoff, u64 objlen,
struct ceph_file_extent **file_extents,
u32 *num_file_extents);
u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
#endif
|
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* RDA8810PL SoC
*
* Copyright (c) 2017 Andreas Färber
* Copyright (c) 2018 Manivannan Sadhasivam
*/
#include <dt-bindings/interrupt-controller/irq.h>
/ {
compatible = "rda,8810pl";
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a5";
reg = <0x0>;
};
};
sram@100000 {
compatible = "mmio-sram";
reg = <0x100000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
};
modem@10000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x10000000 0xfffffff>;
gpioc@1a08000 {
compatible = "rda,8810pl-gpio";
reg = <0x1a08000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
ngpios = <32>;
};
};
apb@20800000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x20800000 0x100000>;
intc: interrupt-controller@0 {
compatible = "rda,8810pl-intc";
reg = <0x0 0x1000>;
interrupt-controller;
#interrupt-cells = <2>;
};
};
apb@20900000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x20900000 0x100000>;
timer@10000 {
compatible = "rda,8810pl-timer";
reg = <0x10000 0x1000>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH>,
<17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hwtimer", "ostimer";
};
gpioa@30000 {
compatible = "rda,8810pl-gpio";
reg = <0x30000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
};
gpiob@31000 {
compatible = "rda,8810pl-gpio";
reg = <0x31000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
};
gpiod@32000 {
compatible = "rda,8810pl-gpio";
reg = <0x32000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
ngpios = <32>;
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
};
};
apb@20a00000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x20a00000 0x100000>;
uart1: serial@0 {
compatible = "rda,8810pl-uart";
reg = <0x0 0x1000>;
interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
uart2: serial@10000 {
compatible = "rda,8810pl-uart";
reg = <0x10000 0x1000>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
uart3: serial@90000 {
compatible = "rda,8810pl-uart";
reg = <0x90000 0x1000>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
};
l2: cache-controller@21100000 {
compatible = "arm,pl310-cache";
reg = <0x21100000 0x1000>;
cache-unified;
cache-level = <2>;
};
};
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPI access for Dialog DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/mfd/core.h>
#include <linux/spi/spi.h>
#include <linux/err.h>
#include <linux/mfd/da9052/da9052.h>
static int da9052_spi_probe(struct spi_device *spi)
{
struct regmap_config config;
int ret;
const struct spi_device_id *id = spi_get_device_id(spi);
struct da9052 *da9052;
da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
da9052->dev = &spi->dev;
da9052->chip_irq = spi->irq;
spi_set_drvdata(spi, da9052);
config = da9052_regmap_config;
config.write_flag_mask = 1;
config.reg_bits = 7;
config.pad_bits = 1;
config.val_bits = 8;
config.use_single_read = true;
config.use_single_write = true;
da9052->regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
return da9052_device_init(da9052, id->driver_data);
}
static void da9052_spi_remove(struct spi_device *spi)
{
struct da9052 *da9052 = spi_get_drvdata(spi);
da9052_device_exit(da9052);
}
static const struct spi_device_id da9052_spi_id[] = {
{"da9052", DA9052},
{"da9053-aa", DA9053_AA},
{"da9053-ba", DA9053_BA},
{"da9053-bb", DA9053_BB},
{"da9053-bc", DA9053_BC},
{}
};
static struct spi_driver da9052_spi_driver = {
.probe = da9052_spi_probe,
.remove = da9052_spi_remove,
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
},
};
static int __init da9052_spi_init(void)
{
int ret;
ret = spi_register_driver(&da9052_spi_driver);
if (ret != 0) {
pr_err("Failed to register DA9052 SPI driver, %d\n", ret);
return ret;
}
return 0;
}
subsys_initcall(da9052_spi_init);
static void __exit da9052_spi_exit(void)
{
spi_unregister_driver(&da9052_spi_driver);
}
module_exit(da9052_spi_exit);
MODULE_AUTHOR("David Dajun Chen <[email protected]>");
MODULE_DESCRIPTION("SPI driver for Dialog DA9052 PMIC");
|
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/fault-inject.h>
/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
int setup_fault_attr(struct fault_attr *attr, char *str)
{
unsigned long probability;
unsigned long interval;
int times;
int space;
/* "<interval>,<probability>,<space>,<times>" */
if (sscanf(str, "%lu,%lu,%d,%d",
&interval, &probability, &space, ×) < 4) {
printk(KERN_WARNING
"FAULT_INJECTION: failed to parse arguments\n");
return 0;
}
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
atomic_set(&attr->space, space);
return 1;
}
EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
"space %d, times %d\n", attr->dname,
attr->interval, attr->probability,
atomic_read(&attr->space),
atomic_read(&attr->times));
if (attr->verbose > 1)
dump_stack();
}
}
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
static bool fail_task(struct fault_attr *attr, struct task_struct *task)
{
return in_task() && task->make_it_fail;
}
#define MAX_STACK_TRACE_DEPTH 32
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static bool fail_stacktrace(struct fault_attr *attr)
{
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
int n, nr_entries;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0 || (found && !attr->reject_start && !attr->reject_end))
return found;
nr_entries = stack_trace_save(entries, depth, 1);
for (n = 0; n < nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;
if (attr->require_start <= entries[n] &&
entries[n] < attr->require_end)
found = true;
}
return found;
}
#else
static inline bool fail_stacktrace(struct fault_attr *attr)
{
return true;
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
/*
* This code is stolen from failmalloc-1.0
* http://www.nongnu.org/failmalloc/
*/
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
bool stack_checked = false;
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
if (fail_nth) {
if (!fail_stacktrace(attr))
return false;
stack_checked = true;
fail_nth--;
WRITE_ONCE(current->fail_nth, fail_nth);
if (!fail_nth)
goto fail;
return false;
}
}
/* No need to check any other properties if the probability is 0 */
if (attr->probability == 0)
return false;
if (attr->task_filter && !fail_task(attr, current))
return false;
if (atomic_read(&attr->times) == 0)
return false;
if (!stack_checked && !fail_stacktrace(attr))
return false;
if (atomic_read(&attr->space) > size) {
atomic_sub(size, &attr->space);
return false;
}
if (attr->interval > 1) {
attr->count++;
if (attr->count % attr->interval)
return false;
}
if (attr->probability <= get_random_u32_below(100))
return false;
fail:
if (!(flags & FAULT_NOWARN))
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
bool should_fail(struct fault_attr *attr, ssize_t size)
{
return should_fail_ex(attr, size, 0);
}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int debugfs_ul_set(void *data, u64 val)
{
*(unsigned long *)data = val;
return 0;
}
static int debugfs_ul_get(void *data, u64 *val)
{
*val = *(unsigned long *)data;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
static void debugfs_create_ul(const char *name, umode_t mode,
struct dentry *parent, unsigned long *value)
{
debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static int debugfs_stacktrace_depth_set(void *data, u64 val)
{
*(unsigned long *)data =
min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
struct dentry *parent,
unsigned long *value)
{
debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *parent, struct fault_attr *attr)
{
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
if (IS_ERR(dir))
return dir;
debugfs_create_ul("probability", mode, dir, &attr->probability);
debugfs_create_ul("interval", mode, dir, &attr->interval);
debugfs_create_atomic_t("times", mode, dir, &attr->times);
debugfs_create_atomic_t("space", mode, dir, &attr->space);
debugfs_create_ul("verbose", mode, dir, &attr->verbose);
debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
&attr->ratelimit_state.interval);
debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
&attr->ratelimit_state.burst);
debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
&attr->stacktrace_depth);
debugfs_create_xul("require-start", mode, dir, &attr->require_start);
debugfs_create_xul("require-end", mode, dir, &attr->require_end);
debugfs_create_xul("reject-start", mode, dir, &attr->reject_start);
debugfs_create_xul("reject-end", mode, dir, &attr->reject_end);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
attr->dname = dget(dir);
return dir;
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */
static ssize_t fault_uint_attr_show(unsigned int val, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", val);
}
static ssize_t fault_ulong_attr_show(unsigned long val, char *page)
{
return snprintf(page, PAGE_SIZE, "%lu\n", val);
}
static ssize_t fault_bool_attr_show(bool val, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", val);
}
static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val));
}
static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count)
{
unsigned int tmp;
int result;
result = kstrtouint(page, 0, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count)
{
int result;
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count)
{
bool tmp;
int result;
result = kstrtobool(page, &tmp);
if (result < 0)
return result;
*val = tmp;
return count;
}
static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count)
{
int tmp;
int result;
result = kstrtoint(page, 0, &tmp);
if (result < 0)
return result;
atomic_set(val, tmp);
return count;
}
#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = _attr_name, \
.ca_mode = 0644, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
static struct fault_config *to_fault_config(struct config_item *item)
{
return container_of(to_config_group(item), struct fault_config, group);
}
#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \
static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \
{ \
return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \
} \
static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \
{ \
struct fault_config *config = to_fault_config(item); \
return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \
} \
CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME)
#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \
FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE)
FAULT_CONFIGFS_ATTR(probability, ulong);
FAULT_CONFIGFS_ATTR(interval, ulong);
FAULT_CONFIGFS_ATTR(times, atomic_t);
FAULT_CONFIGFS_ATTR(space, atomic_t);
FAULT_CONFIGFS_ATTR(verbose, ulong);
FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms",
ratelimit_state.interval, uint);
FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst",
ratelimit_state.burst, uint);
FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page)
{
return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page);
}
static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page,
size_t count)
{
int result;
unsigned long tmp;
result = kstrtoul(page, 0, &tmp);
if (result < 0)
return result;
to_fault_config(item)->attr.stacktrace_depth =
min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH);
return count;
}
CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth");
static ssize_t fault_xul_attr_show(unsigned long val, char *page)
{
return snprintf(page, PAGE_SIZE,
sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val);
}
static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count)
{
return fault_ulong_attr_store(val, page, count);
}
FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul);
FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul);
FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul);
FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static struct configfs_attribute *fault_config_attrs[] = {
&fault_attr_probability,
&fault_attr_interval,
&fault_attr_times,
&fault_attr_space,
&fault_attr_verbose,
&fault_attr_ratelimit_interval,
&fault_attr_ratelimit_burst,
&fault_attr_task_filter,
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
&fault_attr_stacktrace_depth,
&fault_attr_require_start,
&fault_attr_require_end,
&fault_attr_reject_start,
&fault_attr_reject_end,
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
NULL,
};
static const struct config_item_type fault_config_type = {
.ct_attrs = fault_config_attrs,
.ct_owner = THIS_MODULE,
};
void fault_config_init(struct fault_config *config, const char *name)
{
config_group_init_type_name(&config->group, name, &fault_config_type);
}
EXPORT_SYMBOL_GPL(fault_config_init);
#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
|
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010-2015, Intel Corporation.
*/
#include <type_support.h> /*uint32_t */
#include "gp_timer.h" /*system_local.h,
gp_timer_public.h*/
#ifndef __INLINE_GP_TIMER__
#include "gp_timer_private.h" /*device_access.h*/
#endif /* __INLINE_GP_TIMER__ */
#include "system_local.h"
/* FIXME: not sure if reg_load(), reg_store() should be API.
*/
static uint32_t
gp_timer_reg_load(uint32_t reg);
static void
gp_timer_reg_store(u32 reg, uint32_t value);
static uint32_t
gp_timer_reg_load(uint32_t reg)
{
return ia_css_device_load_uint32(
GP_TIMER_BASE +
(reg * sizeof(uint32_t)));
}
static void
gp_timer_reg_store(u32 reg, uint32_t value)
{
ia_css_device_store_uint32((GP_TIMER_BASE +
(reg * sizeof(uint32_t))),
value);
}
void gp_timer_init(gp_timer_ID_t ID)
{
/* set_overall_enable*/
gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1);
/*set enable*/
gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1);
/* set signal select */
gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT);
/*set count type */
gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW);
/*reset gp timer */
gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF);
}
uint32_t
gp_timer_read(gp_timer_ID_t ID)
{
return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID));
}
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Red Hat
*/
#include "io-submitter.h"
#include <linux/bio.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include "memory-alloc.h"
#include "permassert.h"
#include "data-vio.h"
#include "logger.h"
#include "types.h"
#include "vdo.h"
#include "vio.h"
/*
* Submission of bio operations to the underlying storage device will go through a separate work
* queue thread (or more than one) to prevent blocking in other threads if the storage device has a
* full queue. The plug structure allows that thread to do better batching of requests to make the
* I/O more efficient.
*
* When multiple worker threads are used, a thread is chosen for a I/O operation submission based
* on the PBN, so a given PBN will consistently wind up on the same thread. Flush operations are
* assigned round-robin.
*
* The map (protected by the mutex) collects pending I/O operations so that the worker thread can
* reorder them to try to encourage I/O request merging in the request queue underneath.
*/
struct bio_queue_data {
struct vdo_work_queue *queue;
struct blk_plug plug;
struct int_map *map;
struct mutex lock;
unsigned int queue_number;
};
struct io_submitter {
unsigned int num_bio_queues_used;
unsigned int bio_queue_rotation_interval;
struct bio_queue_data bio_queue_data[];
};
static void start_bio_queue(void *ptr)
{
struct bio_queue_data *bio_queue_data = ptr;
blk_start_plug(&bio_queue_data->plug);
}
static void finish_bio_queue(void *ptr)
{
struct bio_queue_data *bio_queue_data = ptr;
blk_finish_plug(&bio_queue_data->plug);
}
static const struct vdo_work_queue_type bio_queue_type = {
.start = start_bio_queue,
.finish = finish_bio_queue,
.max_priority = BIO_Q_MAX_PRIORITY,
.default_priority = BIO_Q_DATA_PRIORITY,
};
/**
* count_all_bios() - Determine which bio counter to use.
* @vio: The vio associated with the bio.
* @bio: The bio to count.
*/
static void count_all_bios(struct vio *vio, struct bio *bio)
{
struct atomic_statistics *stats = &vio->completion.vdo->stats;
if (is_data_vio(vio)) {
vdo_count_bios(&stats->bios_out, bio);
return;
}
vdo_count_bios(&stats->bios_meta, bio);
if (vio->type == VIO_TYPE_RECOVERY_JOURNAL)
vdo_count_bios(&stats->bios_journal, bio);
else if (vio->type == VIO_TYPE_BLOCK_MAP)
vdo_count_bios(&stats->bios_page_cache, bio);
}
/**
* assert_in_bio_zone() - Assert that a vio is in the correct bio zone and not in interrupt
* context.
* @vio: The vio to check.
*/
static void assert_in_bio_zone(struct vio *vio)
{
VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
assert_vio_in_bio_zone(vio);
}
/**
* send_bio_to_device() - Update stats and tracing info, then submit the supplied bio to the OS for
* processing.
* @vio: The vio associated with the bio.
* @bio: The bio to submit to the OS.
*/
static void send_bio_to_device(struct vio *vio, struct bio *bio)
{
struct vdo *vdo = vio->completion.vdo;
assert_in_bio_zone(vio);
atomic64_inc(&vdo->stats.bios_submitted);
count_all_bios(vio, bio);
bio_set_dev(bio, vdo_get_backing_device(vdo));
submit_bio_noacct(bio);
}
/**
* vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device
* is busy. This callback should be used by vios which did not attempt to merge.
*/
void vdo_submit_vio(struct vdo_completion *completion)
{
struct vio *vio = as_vio(completion);
send_bio_to_device(vio, vio->bio);
}
/**
* get_bio_list() - Extract the list of bios to submit from a vio.
* @vio: The vio submitting I/O.
*
* The list will always contain at least one entry (the bio for the vio on which it is called), but
* other bios may have been merged with it as well.
*
* Return: bio The head of the bio list to submit.
*/
static struct bio *get_bio_list(struct vio *vio)
{
struct bio *bio;
struct io_submitter *submitter = vio->completion.vdo->io_submitter;
struct bio_queue_data *bio_queue_data = &(submitter->bio_queue_data[vio->bio_zone]);
assert_in_bio_zone(vio);
mutex_lock(&bio_queue_data->lock);
vdo_int_map_remove(bio_queue_data->map,
vio->bios_merged.head->bi_iter.bi_sector);
vdo_int_map_remove(bio_queue_data->map,
vio->bios_merged.tail->bi_iter.bi_sector);
bio = vio->bios_merged.head;
bio_list_init(&vio->bios_merged);
mutex_unlock(&bio_queue_data->lock);
return bio;
}
/**
* submit_data_vio() - Submit a data_vio's bio to the storage below along with
* any bios that have been merged with it.
*
* Context: This call may block and so should only be called from a bio thread.
*/
static void submit_data_vio(struct vdo_completion *completion)
{
struct bio *bio, *next;
struct vio *vio = as_vio(completion);
assert_in_bio_zone(vio);
for (bio = get_bio_list(vio); bio != NULL; bio = next) {
next = bio->bi_next;
bio->bi_next = NULL;
send_bio_to_device((struct vio *) bio->bi_private, bio);
}
}
/**
* get_mergeable_locked() - Attempt to find an already queued bio that the current bio can be
* merged with.
* @map: The bio map to use for merging.
* @vio: The vio we want to merge.
* @back_merge: Set to true for a back merge, false for a front merge.
*
* There are two types of merging possible, forward and backward, which are distinguished by a flag
* that uses kernel elevator terminology.
*
* Return: the vio to merge to, NULL if no merging is possible.
*/
static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
bool back_merge)
{
struct bio *bio = vio->bio;
sector_t merge_sector = bio->bi_iter.bi_sector;
struct vio *vio_merge;
if (back_merge)
merge_sector -= VDO_SECTORS_PER_BLOCK;
else
merge_sector += VDO_SECTORS_PER_BLOCK;
vio_merge = vdo_int_map_get(map, merge_sector);
if (vio_merge == NULL)
return NULL;
if (vio->completion.priority != vio_merge->completion.priority)
return NULL;
if (bio_data_dir(bio) != bio_data_dir(vio_merge->bio))
return NULL;
if (bio_list_empty(&vio_merge->bios_merged))
return NULL;
if (back_merge) {
return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ?
vio_merge : NULL);
}
return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ?
vio_merge : NULL);
}
static int map_merged_vio(struct int_map *bio_map, struct vio *vio)
{
int result;
sector_t bio_sector;
bio_sector = vio->bios_merged.head->bi_iter.bi_sector;
result = vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
if (result != VDO_SUCCESS)
return result;
bio_sector = vio->bios_merged.tail->bi_iter.bi_sector;
return vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
}
static int merge_to_prev_tail(struct int_map *bio_map, struct vio *vio,
struct vio *prev_vio)
{
vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector);
bio_list_merge(&prev_vio->bios_merged, &vio->bios_merged);
return map_merged_vio(bio_map, prev_vio);
}
static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
struct vio *next_vio)
{
/*
* Handle "next merge" and "gap fill" cases the same way so as to reorder bios in a way
* that's compatible with using funnel queues in work queues. This avoids removing an
* existing completion.
*/
vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector);
bio_list_merge_head(&next_vio->bios_merged, &vio->bios_merged);
return map_merged_vio(bio_map, next_vio);
}
/**
* try_bio_map_merge() - Attempt to merge a vio's bio with other pending I/Os.
* @vio: The vio to merge.
*
* Currently this is only used for data_vios, but is broken out for future use with metadata vios.
*
* Return: whether or not the vio was merged.
*/
static bool try_bio_map_merge(struct vio *vio)
{
int result;
bool merged = true;
struct bio *bio = vio->bio;
struct vio *prev_vio, *next_vio;
struct vdo *vdo = vio->completion.vdo;
struct bio_queue_data *bio_queue_data =
&vdo->io_submitter->bio_queue_data[vio->bio_zone];
bio->bi_next = NULL;
bio_list_init(&vio->bios_merged);
bio_list_add(&vio->bios_merged, bio);
mutex_lock(&bio_queue_data->lock);
prev_vio = get_mergeable_locked(bio_queue_data->map, vio, true);
next_vio = get_mergeable_locked(bio_queue_data->map, vio, false);
if (prev_vio == next_vio)
next_vio = NULL;
if ((prev_vio == NULL) && (next_vio == NULL)) {
/* no merge. just add to bio_queue */
merged = false;
result = vdo_int_map_put(bio_queue_data->map,
bio->bi_iter.bi_sector,
vio, true, NULL);
} else if (next_vio == NULL) {
/* Only prev. merge to prev's tail */
result = merge_to_prev_tail(bio_queue_data->map, vio, prev_vio);
} else {
/* Only next. merge to next's head */
result = merge_to_next_head(bio_queue_data->map, vio, next_vio);
}
mutex_unlock(&bio_queue_data->lock);
/* We don't care about failure of int_map_put in this case. */
VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
return merged;
}
/**
* vdo_submit_data_vio() - Submit I/O for a data_vio.
* @data_vio: the data_vio for which to issue I/O.
*
* If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to
* the appropriate bio zone directly.
*/
void vdo_submit_data_vio(struct data_vio *data_vio)
{
if (try_bio_map_merge(&data_vio->vio))
return;
launch_data_vio_bio_zone_callback(data_vio, submit_data_vio);
}
/**
* __submit_metadata_vio() - Submit I/O for a metadata vio.
* @vio: the vio for which to issue I/O
* @physical: the physical block number to read or write
* @callback: the bio endio function which will be called after the I/O completes
* @error_handler: the handler for submission or I/O errors (may be NULL)
* @operation: the type of I/O to perform
* @data: the buffer to read or write (may be NULL)
*
* The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
* other vdo threads.
*
* That the error handler will run on the correct thread is only true so long as the thread calling
* this function, and the thread set in the endio callback are the same, as well as the fact that
* no error can occur on the bio queue. Currently this is true for all callers, but additional care
* will be needed if this ever changes.
*/
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
blk_opf_t operation, char *data)
{
int result;
struct vdo_completion *completion = &vio->completion;
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
vdo_reset_completion(completion);
completion->error_handler = error_handler;
result = vio_reset_bio(vio, data, callback, operation | REQ_META, physical);
if (result != VDO_SUCCESS) {
continue_vio(vio, result);
return;
}
vdo_set_completion_callback(completion, vdo_submit_vio,
get_vio_bio_zone_thread_id(vio));
vdo_launch_completion_with_priority(completion, get_metadata_priority(vio));
}
/**
* vdo_make_io_submitter() - Create an io_submitter structure.
* @thread_count: Number of bio-submission threads to set up.
* @rotation_interval: Interval to use when rotating between bio-submission threads when enqueuing
* completions.
* @max_requests_active: Number of bios for merge tracking.
* @vdo: The vdo which will use this submitter.
* @io_submitter_ptr: pointer to the new data structure.
*
* Return: VDO_SUCCESS or an error.
*/
int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
unsigned int max_requests_active, struct vdo *vdo,
struct io_submitter **io_submitter_ptr)
{
unsigned int i;
struct io_submitter *io_submitter;
int result;
result = vdo_allocate_extended(struct io_submitter, thread_count,
struct bio_queue_data, "bio submission data",
&io_submitter);
if (result != VDO_SUCCESS)
return result;
io_submitter->bio_queue_rotation_interval = rotation_interval;
/* Setup for each bio-submission work queue */
for (i = 0; i < thread_count; i++) {
struct bio_queue_data *bio_queue_data = &io_submitter->bio_queue_data[i];
mutex_init(&bio_queue_data->lock);
/*
* One I/O operation per request, but both first & last sector numbers.
*
* If requests are assigned to threads round-robin, they should be distributed
* quite evenly. But if they're assigned based on PBN, things can sometimes be very
* uneven. So for now, we'll assume that all requests *may* wind up on one thread,
* and thus all in the same map.
*/
result = vdo_int_map_create(max_requests_active * 2,
&bio_queue_data->map);
if (result != VDO_SUCCESS) {
/*
* Clean up the partially initialized bio-queue entirely and indicate that
* initialization failed.
*/
vdo_log_error("bio map initialization failed %d", result);
vdo_cleanup_io_submitter(io_submitter);
vdo_free_io_submitter(io_submitter);
return result;
}
bio_queue_data->queue_number = i;
result = vdo_make_thread(vdo, vdo->thread_config.bio_threads[i],
&bio_queue_type, 1, (void **) &bio_queue_data);
if (result != VDO_SUCCESS) {
/*
* Clean up the partially initialized bio-queue entirely and indicate that
* initialization failed.
*/
vdo_int_map_free(vdo_forget(bio_queue_data->map));
vdo_log_error("bio queue initialization failed %d", result);
vdo_cleanup_io_submitter(io_submitter);
vdo_free_io_submitter(io_submitter);
return result;
}
bio_queue_data->queue = vdo->threads[vdo->thread_config.bio_threads[i]].queue;
io_submitter->num_bio_queues_used++;
}
*io_submitter_ptr = io_submitter;
return VDO_SUCCESS;
}
/**
* vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer.
* @io_submitter: The I/O submitter data to tear down (may be NULL).
*/
void vdo_cleanup_io_submitter(struct io_submitter *io_submitter)
{
int i;
if (io_submitter == NULL)
return;
for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--)
vdo_finish_work_queue(io_submitter->bio_queue_data[i].queue);
}
/**
* vdo_free_io_submitter() - Free the io_submitter fields and structure as needed.
* @io_submitter: The I/O submitter data to destroy.
*
* This must be called after vdo_cleanup_io_submitter(). It is used to release resources late in
* the shutdown process to avoid or reduce the chance of race conditions.
*/
void vdo_free_io_submitter(struct io_submitter *io_submitter)
{
int i;
if (io_submitter == NULL)
return;
for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--) {
io_submitter->num_bio_queues_used--;
/* vdo_destroy() will free the work queue, so just give up our reference to it. */
vdo_forget(io_submitter->bio_queue_data[i].queue);
vdo_int_map_free(vdo_forget(io_submitter->bio_queue_data[i].map));
}
vdo_free(io_submitter);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
* Author: Peter Ujfalusi <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <video/mipi_display.h>
struct osd101t2587_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
struct regulator *supply;
const struct drm_display_mode *default_mode;
};
static inline struct osd101t2587_panel *ti_osd_panel(struct drm_panel *panel)
{
return container_of(panel, struct osd101t2587_panel, base);
}
static int osd101t2587_panel_disable(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
int ret;
ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
return ret;
}
static int osd101t2587_panel_unprepare(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
regulator_disable(osd101t2587->supply);
return 0;
}
static int osd101t2587_panel_prepare(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
return regulator_enable(osd101t2587->supply);
}
static int osd101t2587_panel_enable(struct drm_panel *panel)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
int ret;
ret = mipi_dsi_turn_on_peripheral(osd101t2587->dsi);
if (ret)
return ret;
return ret;
}
static const struct drm_display_mode default_mode_osd101t2587 = {
.clock = 164400,
.hdisplay = 1920,
.hsync_start = 1920 + 152,
.hsync_end = 1920 + 152 + 52,
.htotal = 1920 + 152 + 52 + 20,
.vdisplay = 1200,
.vsync_start = 1200 + 24,
.vsync_end = 1200 + 24 + 6,
.vtotal = 1200 + 24 + 6 + 48,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static int osd101t2587_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, osd101t2587->default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
osd101t2587->default_mode->hdisplay,
osd101t2587->default_mode->vdisplay,
drm_mode_vrefresh(osd101t2587->default_mode));
return -ENOMEM;
}
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = 217;
connector->display_info.height_mm = 136;
return 1;
}
static const struct drm_panel_funcs osd101t2587_panel_funcs = {
.disable = osd101t2587_panel_disable,
.unprepare = osd101t2587_panel_unprepare,
.prepare = osd101t2587_panel_prepare,
.enable = osd101t2587_panel_enable,
.get_modes = osd101t2587_panel_get_modes,
};
static const struct of_device_id osd101t2587_of_match[] = {
{
.compatible = "osddisplays,osd101t2587-53ts",
.data = &default_mode_osd101t2587,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
{
struct device *dev = &osd101t2587->dsi->dev;
int ret;
osd101t2587->supply = devm_regulator_get(dev, "power");
if (IS_ERR(osd101t2587->supply))
return PTR_ERR(osd101t2587->supply);
drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
&osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_of_backlight(&osd101t2587->base);
if (ret)
return ret;
drm_panel_add(&osd101t2587->base);
return 0;
}
static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587;
const struct of_device_id *id;
int ret;
id = of_match_node(osd101t2587_of_match, dsi->dev.of_node);
if (!id)
return -ENODEV;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET;
osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
if (!osd101t2587)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, osd101t2587);
osd101t2587->dsi = dsi;
osd101t2587->default_mode = id->data;
ret = osd101t2587_panel_add(osd101t2587);
if (ret < 0)
return ret;
ret = mipi_dsi_attach(dsi);
if (ret)
drm_panel_remove(&osd101t2587->base);
return ret;
}
static void osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
drm_panel_remove(&osd101t2587->base);
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
}
static struct mipi_dsi_driver osd101t2587_panel_driver = {
.driver = {
.name = "panel-osd-osd101t2587-53ts",
.of_match_table = osd101t2587_of_match,
},
.probe = osd101t2587_panel_probe,
.remove = osd101t2587_panel_remove,
};
module_mipi_dsi_driver(osd101t2587_panel_driver);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("OSD101T2587-53TS DSI panel");
MODULE_LICENSE("GPL v2");
|
// SPDX-License-Identifier: GPL-2.0
/*
* UIO Hilscher CIF card driver
*
* (C) 2007 Hans J. Koch <[email protected]>
* Original code (C) 2005 Benedikt Spranger <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uio_driver.h>
#include <asm/io.h>
#define PLX9030_INTCSR 0x4C
#define INTSCR_INT1_ENABLE 0x01
#define INTSCR_INT1_STATUS 0x04
#define INT1_ENABLED_AND_ACTIVE (INTSCR_INT1_ENABLE | INTSCR_INT1_STATUS)
#define PCI_SUBVENDOR_ID_PEP 0x1518
#define CIF_SUBDEVICE_PROFIBUS 0x430
#define CIF_SUBDEVICE_DEVICENET 0x432
static irqreturn_t hilscher_handler(int irq, struct uio_info *dev_info)
{
void __iomem *plx_intscr = dev_info->mem[0].internal_addr
+ PLX9030_INTCSR;
if ((ioread8(plx_intscr) & INT1_ENABLED_AND_ACTIVE)
!= INT1_ENABLED_AND_ACTIVE)
return IRQ_NONE;
/* Disable interrupt */
iowrite8(ioread8(plx_intscr) & ~INTSCR_INT1_ENABLE, plx_intscr);
return IRQ_HANDLED;
}
static int hilscher_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct uio_info *info;
info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (pci_enable_device(dev))
return -ENODEV;
if (pci_request_regions(dev, "hilscher"))
goto out_disable;
info->mem[0].addr = pci_resource_start(dev, 0);
if (!info->mem[0].addr)
goto out_release;
info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
if (!info->mem[0].internal_addr)
goto out_release;
info->mem[0].size = pci_resource_len(dev, 0);
info->mem[0].memtype = UIO_MEM_PHYS;
info->mem[1].addr = pci_resource_start(dev, 2);
info->mem[1].size = pci_resource_len(dev, 2);
info->mem[1].memtype = UIO_MEM_PHYS;
switch (id->subdevice) {
case CIF_SUBDEVICE_PROFIBUS:
info->name = "CIF_Profibus";
break;
case CIF_SUBDEVICE_DEVICENET:
info->name = "CIF_Devicenet";
break;
default:
info->name = "CIF_???";
}
info->version = "0.0.1";
info->irq = dev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = hilscher_handler;
if (uio_register_device(&dev->dev, info))
goto out_unmap;
pci_set_drvdata(dev, info);
return 0;
out_unmap:
iounmap(info->mem[0].internal_addr);
out_release:
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
return -ENODEV;
}
static void hilscher_pci_remove(struct pci_dev *dev)
{
struct uio_info *info = pci_get_drvdata(dev);
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
iounmap(info->mem[0].internal_addr);
}
static struct pci_device_id hilscher_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_SUBVENDOR_ID_PEP,
.subdevice = CIF_SUBDEVICE_PROFIBUS,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_SUBVENDOR_ID_PEP,
.subdevice = CIF_SUBDEVICE_DEVICENET,
},
{ 0, }
};
static struct pci_driver hilscher_pci_driver = {
.name = "hilscher",
.id_table = hilscher_pci_ids,
.probe = hilscher_pci_probe,
.remove = hilscher_pci_remove,
};
module_pci_driver(hilscher_pci_driver);
MODULE_DEVICE_TABLE(pci, hilscher_pci_ids);
MODULE_DESCRIPTION("UIO Hilscher CIF card driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger");
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for Audio DSP on Rembrandt platform
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include "../ops.h"
#include "../sof-audio.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define I2S_HS_INSTANCE 0
#define I2S_BT_INSTANCE 1
#define I2S_SP_INSTANCE 2
#define PDM_DMIC_INSTANCE 3
#define I2S_HS_VIRTUAL_INSTANCE 4
static struct snd_soc_dai_driver rembrandt_sof_dai[] = {
[I2S_HS_INSTANCE] = {
.id = I2S_HS_INSTANCE,
.name = "acp-sof-hs",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S HS controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_BT_INSTANCE] = {
.id = I2S_BT_INSTANCE,
.name = "acp-sof-bt",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S BT controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_SP_INSTANCE] = {
.id = I2S_SP_INSTANCE,
.name = "acp-sof-sp",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S SP controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[PDM_DMIC_INSTANCE] = {
.id = PDM_DMIC_INSTANCE,
.name = "acp-sof-dmic",
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 4,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_HS_VIRTUAL_INSTANCE] = {
.id = I2S_HS_VIRTUAL_INSTANCE,
.name = "acp-sof-hs-virtual",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
},
};
/* Rembrandt ops */
struct snd_sof_dsp_ops sof_rembrandt_ops;
EXPORT_SYMBOL_NS(sof_rembrandt_ops, "SND_SOC_SOF_AMD_COMMON");
int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_rembrandt_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
sof_rembrandt_ops.drv = rembrandt_sof_dai;
sof_rembrandt_ops.num_drv = ARRAY_SIZE(rembrandt_sof_dai);
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Maxime Ripard
* Maxime Ripard <[email protected]>
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mult.h"
struct _ccu_mult {
unsigned long mult, min, max;
};
static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
struct _ccu_mult *mult)
{
int _mult;
_mult = rate / parent;
if (_mult < mult->min)
_mult = mult->min;
if (_mult > mult->max)
_mult = mult->max;
mult->mult = _mult;
}
static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
struct clk_hw *parent,
unsigned long *parent_rate,
unsigned long rate,
void *data)
{
struct ccu_mult *cm = data;
struct _ccu_mult _cm;
_cm.min = cm->mult.min;
if (cm->mult.max)
_cm.max = cm->mult.max;
else
_cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
ccu_mult_find_best(*parent_rate, rate, &_cm);
return *parent_rate * _cm.mult;
}
static void ccu_mult_disable(struct clk_hw *hw)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_gate_helper_disable(&cm->common, cm->enable);
}
static int ccu_mult_enable(struct clk_hw *hw)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_gate_helper_enable(&cm->common, cm->enable);
}
static int ccu_mult_is_enabled(struct clk_hw *hw)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
}
static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
unsigned long val;
u32 reg;
if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac))
return ccu_frac_helper_read_rate(&cm->common, &cm->frac);
reg = readl(cm->common.base + cm->common.reg);
val = reg >> cm->mult.shift;
val &= (1 << cm->mult.width) - 1;
parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
parent_rate);
return parent_rate * (val + cm->mult.offset);
}
static int ccu_mult_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
req, ccu_mult_round_rate, cm);
}
static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
struct _ccu_mult _cm;
unsigned long flags;
u32 reg;
if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
ccu_frac_helper_enable(&cm->common, &cm->frac);
return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
rate, cm->lock);
} else {
ccu_frac_helper_disable(&cm->common, &cm->frac);
}
parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
parent_rate);
_cm.min = cm->mult.min;
if (cm->mult.max)
_cm.max = cm->mult.max;
else
_cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
ccu_mult_find_best(parent_rate, rate, &_cm);
spin_lock_irqsave(cm->common.lock, flags);
reg = readl(cm->common.base + cm->common.reg);
reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift);
writel(reg, cm->common.base + cm->common.reg);
spin_unlock_irqrestore(cm->common.lock, flags);
ccu_helper_wait_for_lock(&cm->common, cm->lock);
return 0;
}
static u8 ccu_mult_get_parent(struct clk_hw *hw)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
}
static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
{
struct ccu_mult *cm = hw_to_ccu_mult(hw);
return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
}
const struct clk_ops ccu_mult_ops = {
.disable = ccu_mult_disable,
.enable = ccu_mult_enable,
.is_enabled = ccu_mult_is_enabled,
.get_parent = ccu_mult_get_parent,
.set_parent = ccu_mult_set_parent,
.determine_rate = ccu_mult_determine_rate,
.recalc_rate = ccu_mult_recalc_rate,
.set_rate = ccu_mult_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, "SUNXI_CCU");
|
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef SMU14_DRIVER_IF_V14_0_H
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
#define NUM_MP0CLK_DPM_LEVELS 2
#define NUM_DCLK_DPM_LEVELS 8
#define NUM_VCLK_DPM_LEVELS 8
#define NUM_DISPCLK_DPM_LEVELS 8
#define NUM_DPPCLK_DPM_LEVELS 8
#define NUM_DPREFCLK_DPM_LEVELS 8
#define NUM_DCFCLK_DPM_LEVELS 8
#define NUM_DTBCLK_DPM_LEVELS 8
#define NUM_UCLK_DPM_LEVELS 6
#define NUM_LINK_LEVELS 3
#define NUM_FCLK_DPM_LEVELS 8
#define NUM_OD_FAN_MAX_POINTS 6
// Feature Control Defines
#define FEATURE_FW_DATA_READ_BIT 0
#define FEATURE_DPM_GFXCLK_BIT 1
#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2
#define FEATURE_DPM_UCLK_BIT 3
#define FEATURE_DPM_FCLK_BIT 4
#define FEATURE_DPM_SOCCLK_BIT 5
#define FEATURE_DPM_LINK_BIT 6
#define FEATURE_DPM_DCN_BIT 7
#define FEATURE_VMEMP_SCALING_BIT 8
#define FEATURE_VDDIO_MEM_SCALING_BIT 9
#define FEATURE_DS_GFXCLK_BIT 10
#define FEATURE_DS_SOCCLK_BIT 11
#define FEATURE_DS_FCLK_BIT 12
#define FEATURE_DS_LCLK_BIT 13
#define FEATURE_DS_DCFCLK_BIT 14
#define FEATURE_DS_UCLK_BIT 15
#define FEATURE_GFX_ULV_BIT 16
#define FEATURE_FW_DSTATE_BIT 17
#define FEATURE_GFXOFF_BIT 18
#define FEATURE_BACO_BIT 19
#define FEATURE_MM_DPM_BIT 20
#define FEATURE_SOC_MPCLK_DS_BIT 21
#define FEATURE_BACO_MPCLK_DS_BIT 22
#define FEATURE_THROTTLERS_BIT 23
#define FEATURE_SMARTSHIFT_BIT 24
#define FEATURE_GTHR_BIT 25
#define FEATURE_ACDC_BIT 26
#define FEATURE_VR0HOT_BIT 27
#define FEATURE_FW_CTF_BIT 28
#define FEATURE_FAN_CONTROL_BIT 29
#define FEATURE_GFX_DCS_BIT 30
#define FEATURE_GFX_READ_MARGIN_BIT 31
#define FEATURE_LED_DISPLAY_BIT 32
#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33
#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34
#define FEATURE_OPTIMIZED_VMIN_BIT 35
#define FEATURE_GFX_IMU_BIT 36
#define FEATURE_BOOT_TIME_CAL_BIT 37
#define FEATURE_GFX_PCC_DFLL_BIT 38
#define FEATURE_SOC_CG_BIT 39
#define FEATURE_DF_CSTATE_BIT 40
#define FEATURE_GFX_EDC_BIT 41
#define FEATURE_BOOT_POWER_OPT_BIT 42
#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43
#define FEATURE_DS_VCN_BIT 44
#define FEATURE_BACO_CG_BIT 45
#define FEATURE_MEM_TEMP_READ_BIT 46
#define FEATURE_ATHUB_MMHUB_PG_BIT 47
#define FEATURE_SOC_PCC_BIT 48
#define FEATURE_EDC_PWRBRK_BIT 49
#define FEATURE_SOC_EDC_XVMIN_BIT 50
#define FEATURE_GFX_PSM_DIDT_BIT 51
#define FEATURE_APT_ALL_ENABLE_BIT 52
#define FEATURE_APT_SQ_THROTTLE_BIT 53
#define FEATURE_APT_PF_DCS_BIT 54
#define FEATURE_GFX_EDC_XVMIN_BIT 55
#define FEATURE_GFX_DIDT_XVMIN_BIT 56
#define FEATURE_FAN_ABNORMAL_BIT 57
#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58
#define FEATURE_SPARE_59_BIT 59
#define FEATURE_SPARE_60_BIT 60
#define FEATURE_SPARE_61_BIT 61
#define FEATURE_SPARE_62_BIT 62
#define FEATURE_SPARE_63_BIT 63
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
(1 << FEATURE_DPM_UCLK_BIT) | \
(1 << FEATURE_DPM_FCLK_BIT) | \
(1 << FEATURE_DPM_SOCCLK_BIT) | \
(1 << FEATURE_DPM_LINK_BIT) | \
(1 << FEATURE_DPM_DCN_BIT) | \
(1 << FEATURE_DS_GFXCLK_BIT) | \
(1 << FEATURE_DS_SOCCLK_BIT) | \
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
(1 << FEATURE_DS_UCLK_BIT) | \
(1ULL << FEATURE_DS_VCN_BIT)
//For use with feature control messages
typedef enum {
FEATURE_PWR_ALL,
FEATURE_PWR_S5,
FEATURE_PWR_BACO,
FEATURE_PWR_SOC,
FEATURE_PWR_GFX,
FEATURE_PWR_DOMAIN_COUNT,
} FEATURE_PWR_DOMAIN_e;
//For use with feature control + BTC save restore
typedef enum {
FEATURE_BTC_NOP,
FEATURE_BTC_SAVE,
FEATURE_BTC_RESTORE,
FEATURE_BTC_COUNT,
} FEATURE_BTC_e;
// Debug Overrides Bitmask
#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010
#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020
#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040
#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080
#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100
#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
#define VR_MAPPING_VR_SELECT_SHIFT 0x00
#define VR_MAPPING_PLANE_SELECT_MASK 0x02
#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
// PSI Bit Defines
#define PSI_SEL_VR0_PLANE0_PSI0 0x01
#define PSI_SEL_VR0_PLANE0_PSI1 0x02
#define PSI_SEL_VR0_PLANE1_PSI0 0x04
#define PSI_SEL_VR0_PLANE1_PSI1 0x08
#define PSI_SEL_VR1_PLANE0_PSI0 0x10
#define PSI_SEL_VR1_PLANE0_PSI1 0x20
#define PSI_SEL_VR1_PLANE1_PSI0 0x40
#define PSI_SEL_VR1_PLANE1_PSI1 0x80
typedef enum {
SVI_PSI_0, // Full phase count (default)
SVI_PSI_1, // Phase count 1st level
SVI_PSI_2, // Phase count 2nd level
SVI_PSI_3, // Single phase operation + active diode emulation
SVI_PSI_4, // Single phase operation + passive diode emulation *optional*
SVI_PSI_5, // Reserved
SVI_PSI_6, // Power down to 0V (voltage regulation disabled)
SVI_PSI_7, // Automated phase shedding and diode emulation
} SVI_PSI_e;
// Throttler Control/Status Bits
#define THROTTLER_TEMP_EDGE_BIT 0
#define THROTTLER_TEMP_HOTSPOT_BIT 1
#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2
#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3
#define THROTTLER_TEMP_MEM_BIT 4
#define THROTTLER_TEMP_VR_GFX_BIT 5
#define THROTTLER_TEMP_VR_SOC_BIT 6
#define THROTTLER_TEMP_VR_MEM0_BIT 7
#define THROTTLER_TEMP_VR_MEM1_BIT 8
#define THROTTLER_TEMP_LIQUID0_BIT 9
#define THROTTLER_TEMP_LIQUID1_BIT 10
#define THROTTLER_TEMP_PLX_BIT 11
#define THROTTLER_TDC_GFX_BIT 12
#define THROTTLER_TDC_SOC_BIT 13
#define THROTTLER_PPT0_BIT 14
#define THROTTLER_PPT1_BIT 15
#define THROTTLER_PPT2_BIT 16
#define THROTTLER_PPT3_BIT 17
#define THROTTLER_FIT_BIT 18
#define THROTTLER_GFX_APCC_PLUS_BIT 19
#define THROTTLER_GFX_DVO_BIT 20
#define THROTTLER_COUNT 21
// FW DState Features Control Bits
#define FW_DSTATE_SOC_ULV_BIT 0
#define FW_DSTATE_G6_HSR_BIT 1
#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2
#define FW_DSTATE_SMN_DS_BIT 3
#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4
#define FW_DSTATE_SOC_LIV_MIN_BIT 5
#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6
#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7
#define FW_DSTATE_MALL_ALLOC_BIT 8
#define FW_DSTATE_MEM_PSI_BIT 9
#define FW_DSTATE_HSR_NON_STROBE_BIT 10
#define FW_DSTATE_MP0_ENTER_WFI_BIT 11
#define FW_DSTATE_MALL_FLUSH_BIT 12
#define FW_DSTATE_SOC_PSI_BIT 13
#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14
#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15
#define FW_DSTATE_CLDO_PRG_BIT 16
#define FW_DSTATE_DF_PLL_PWRDN_BIT 17
//LED Display Mask & Control Bits
#define LED_DISPLAY_GFX_DPM_BIT 0
#define LED_DISPLAY_PCIE_BIT 1
#define LED_DISPLAY_ERROR_BIT 2
#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0
#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1
#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2
typedef enum {
SMARTSHIFT_VERSION_1,
SMARTSHIFT_VERSION_2,
SMARTSHIFT_VERSION_3,
} SMARTSHIFT_VERSION_e;
typedef enum {
FOPT_CALC_AC_CALC_DC,
FOPT_PPTABLE_AC_CALC_DC,
FOPT_CALC_AC_PPTABLE_DC,
FOPT_PPTABLE_AC_PPTABLE_DC,
} FOPT_CALC_e;
typedef enum {
DRAM_BIT_WIDTH_DISABLED = 0,
DRAM_BIT_WIDTH_X_8 = 8,
DRAM_BIT_WIDTH_X_16 = 16,
DRAM_BIT_WIDTH_X_32 = 32,
DRAM_BIT_WIDTH_X_64 = 64,
DRAM_BIT_WIDTH_X_128 = 128,
DRAM_BIT_WIDTH_COUNT,
} DRAM_BIT_WIDTH_TYPE_e;
//I2C Interface
#define NUM_I2C_CONTROLLERS 8
#define I2C_CONTROLLER_ENABLED 1
#define I2C_CONTROLLER_DISABLED 0
#define MAX_SW_I2C_COMMANDS 24
typedef enum {
I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
I2C_CONTROLLER_PORT_COUNT,
} I2cControllerPort_e;
typedef enum {
I2C_CONTROLLER_NAME_VR_GFX = 0,
I2C_CONTROLLER_NAME_VR_SOC,
I2C_CONTROLLER_NAME_VR_VMEMP,
I2C_CONTROLLER_NAME_VR_VDDIO,
I2C_CONTROLLER_NAME_LIQUID0,
I2C_CONTROLLER_NAME_LIQUID1,
I2C_CONTROLLER_NAME_PLX,
I2C_CONTROLLER_NAME_FAN_INTAKE,
I2C_CONTROLLER_NAME_COUNT,
} I2cControllerName_e;
typedef enum {
I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0,
I2C_CONTROLLER_THROTTLER_VR_GFX,
I2C_CONTROLLER_THROTTLER_VR_SOC,
I2C_CONTROLLER_THROTTLER_VR_VMEMP,
I2C_CONTROLLER_THROTTLER_VR_VDDIO,
I2C_CONTROLLER_THROTTLER_LIQUID0,
I2C_CONTROLLER_THROTTLER_LIQUID1,
I2C_CONTROLLER_THROTTLER_PLX,
I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
I2C_CONTROLLER_THROTTLER_INA3221,
I2C_CONTROLLER_THROTTLER_COUNT,
} I2cControllerThrottler_e;
typedef enum {
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
I2C_CONTROLLER_PROTOCOL_INA3221,
I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
I2C_CONTROLLER_PROTOCOL_COUNT,
} I2cControllerProtocol_e;
typedef struct {
uint8_t Enabled;
uint8_t Speed;
uint8_t SlaveAddress;
uint8_t ControllerPort;
uint8_t ControllerName;
uint8_t ThermalThrotter;
uint8_t I2cProtocol;
uint8_t PaddingConfig;
} I2cControllerConfig_t;
typedef enum {
I2C_PORT_SVD_SCL = 0,
I2C_PORT_GPIO,
} I2cPort_e;
typedef enum {
I2C_SPEED_FAST_50K = 0, //50 Kbits/s
I2C_SPEED_FAST_100K, //100 Kbits/s
I2C_SPEED_FAST_400K, //400 Kbits/s
I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode)
I2C_SPEED_HIGH_2M, //2.3 Mbits/s
I2C_SPEED_COUNT,
} I2cSpeed_e;
typedef enum {
I2C_CMD_READ = 0,
I2C_CMD_WRITE,
I2C_CMD_COUNT,
} I2cCmdType_e;
#define CMDCONFIG_STOP_BIT 0
#define CMDCONFIG_RESTART_BIT 1
#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
typedef struct {
uint8_t ReadWriteData; //Return data for read. Data to send for write
uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
} SwI2cCmd_t; //SW I2C Command Table
typedef struct {
uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
uint8_t SlaveAddress; //Slave address of device
uint8_t NumCmds; //Number of commands
SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
} SwI2cRequest_t; // SW I2C Request Table
typedef struct {
SwI2cRequest_t SwI2cRequest;
uint32_t Spare[8];
uint32_t MmHubPadding[8]; // SMU internal use
} SwI2cRequestExternal_t;
typedef struct {
uint64_t mca_umc_status;
uint64_t mca_umc_addr;
uint16_t ce_count_lo_chip;
uint16_t ce_count_hi_chip;
uint32_t eccPadding;
} EccInfo_t;
typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
#define EPCS_HIGH_POWER 600
#define EPCS_NORMAL_POWER 450
#define EPCS_LOW_POWER 300
#define EPCS_SHORTED_POWER 150
#define EPCS_NO_BOOTUP 0
typedef enum{
EPCS_SHORTED_LIMIT,
EPCS_LOW_POWER_LIMIT,
EPCS_NORMAL_POWER_LIMIT,
EPCS_HIGH_POWER_LIMIT,
EPCS_NOT_CONFIGURED,
EPCS_STATUS_COUNT,
} EPCS_STATUS_e;
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
MSR_SEQUENCE,
BAMACO_SEQUENCE,
ULPS_SEQUENCE,
D3HOT_SEQUENCE_COUNT,
} D3HOTSequence_e;
//This is aligned with RSMU PGFSM Register Mapping
typedef enum {
PG_DYNAMIC_MODE = 0,
PG_STATIC_MODE,
} PowerGatingMode_e;
//This is aligned with RSMU PGFSM Register Mapping
typedef enum {
PG_POWER_DOWN = 0,
PG_POWER_UP,
} PowerGatingSettings_e;
typedef struct {
uint32_t a; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
uint32_t c; // store in IEEE float format in this variable
} QuadraticInt_t;
typedef struct {
uint32_t m; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
} LinearInt_t;
typedef struct {
uint32_t a; // store in IEEE float format in this variable
uint32_t b; // store in IEEE float format in this variable
uint32_t c; // store in IEEE float format in this variable
} DroopInt_t;
typedef enum {
DCS_ARCH_DISABLED,
DCS_ARCH_FADCS,
DCS_ARCH_ASYNC,
} DCS_ARCH_e;
//Only Clks that have DPM descriptors are listed here
typedef enum {
PPCLK_GFXCLK = 0,
PPCLK_SOCCLK,
PPCLK_UCLK,
PPCLK_FCLK,
PPCLK_DCLK_0,
PPCLK_VCLK_0,
PPCLK_DISPCLK,
PPCLK_DPPCLK,
PPCLK_DPREFCLK,
PPCLK_DCFCLK,
PPCLK_DTBCLK,
PPCLK_COUNT,
} PPCLK_e;
typedef enum {
VOLTAGE_MODE_PPTABLE = 0,
VOLTAGE_MODE_FUSES,
VOLTAGE_MODE_COUNT,
} VOLTAGE_MODE_e;
typedef enum {
AVFS_VOLTAGE_GFX = 0,
AVFS_VOLTAGE_SOC,
AVFS_VOLTAGE_COUNT,
} AVFS_VOLTAGE_TYPE_e;
typedef enum {
AVFS_TEMP_COLD = 0,
AVFS_TEMP_HOT,
AVFS_TEMP_COUNT,
} AVFS_TEMP_e;
typedef enum {
AVFS_D_G,
AVFS_D_COUNT,
} AVFS_D_e;
typedef enum {
UCLK_DIV_BY_1 = 0,
UCLK_DIV_BY_2,
UCLK_DIV_BY_4,
UCLK_DIV_BY_8,
} UCLK_DIV_e;
typedef enum {
GPIO_INT_POLARITY_ACTIVE_LOW = 0,
GPIO_INT_POLARITY_ACTIVE_HIGH,
} GpioIntPolarity_e;
typedef enum {
PWR_CONFIG_TDP = 0,
PWR_CONFIG_TGP,
PWR_CONFIG_TCP_ESTIMATED,
PWR_CONFIG_TCP_MEASURED,
PWR_CONFIG_TBP_DESKTOP,
PWR_CONFIG_TBP_MOBILE,
} PwrConfig_e;
typedef struct {
uint8_t Padding;
uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM
uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used
uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e
LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz)
uint32_t Padding3[3];
uint16_t Padding4;
uint16_t FoptimalDc; //Foptimal frequency in DC power mode.
uint16_t FoptimalAc; //Foptimal frequency in AC power mode.
uint16_t Padding2;
} DpmDescriptor_t;
typedef enum {
PPT_THROTTLER_PPT0,
PPT_THROTTLER_PPT1,
PPT_THROTTLER_PPT2,
PPT_THROTTLER_PPT3,
PPT_THROTTLER_COUNT
} PPT_THROTTLER_e;
typedef enum {
TEMP_EDGE,
TEMP_HOTSPOT,
TEMP_HOTSPOT_GFX,
TEMP_HOTSPOT_SOC,
TEMP_MEM,
TEMP_VR_GFX,
TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
TEMP_LIQUID0,
TEMP_LIQUID1,
TEMP_PLX,
TEMP_COUNT,
} TEMP_e;
typedef enum {
TDC_THROTTLER_GFX,
TDC_THROTTLER_SOC,
TDC_THROTTLER_COUNT
} TDC_THROTTLER_e;
typedef enum {
SVI_PLANE_VDD_GFX,
SVI_PLANE_VDD_SOC,
SVI_PLANE_VDDCI_MEM,
SVI_PLANE_VDDIO_MEM,
SVI_PLANE_COUNT,
} SVI_PLANE_e;
typedef enum {
PMFW_VOLT_PLANE_GFX,
PMFW_VOLT_PLANE_SOC,
PMFW_VOLT_PLANE_COUNT
} PMFW_VOLT_PLANE_e;
typedef enum {
CUSTOMER_VARIANT_ROW,
CUSTOMER_VARIANT_FALCON,
CUSTOMER_VARIANT_COUNT,
} CUSTOMER_VARIANT_e;
typedef enum {
POWER_SOURCE_AC,
POWER_SOURCE_DC,
POWER_SOURCE_COUNT,
} POWER_SOURCE_e;
typedef enum {
MEM_VENDOR_PLACEHOLDER0, // 0
MEM_VENDOR_SAMSUNG, // 1
MEM_VENDOR_INFINEON, // 2
MEM_VENDOR_ELPIDA, // 3
MEM_VENDOR_ETRON, // 4
MEM_VENDOR_NANYA, // 5
MEM_VENDOR_HYNIX, // 6
MEM_VENDOR_MOSEL, // 7
MEM_VENDOR_WINBOND, // 8
MEM_VENDOR_ESMT, // 9
MEM_VENDOR_PLACEHOLDER1, // 10
MEM_VENDOR_PLACEHOLDER2, // 11
MEM_VENDOR_PLACEHOLDER3, // 12
MEM_VENDOR_PLACEHOLDER4, // 13
MEM_VENDOR_PLACEHOLDER5, // 14
MEM_VENDOR_MICRON, // 15
MEM_VENDOR_COUNT,
} MEM_VENDOR_e;
typedef enum {
PP_GRTAVFS_HW_CPO_CTL_ZONE0,
PP_GRTAVFS_HW_CPO_CTL_ZONE1,
PP_GRTAVFS_HW_CPO_CTL_ZONE2,
PP_GRTAVFS_HW_CPO_CTL_ZONE3,
PP_GRTAVFS_HW_CPO_CTL_ZONE4,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3,
PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4,
PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4,
PP_GRTAVFS_HW_ZONE0_VF,
PP_GRTAVFS_HW_ZONE1_VF1,
PP_GRTAVFS_HW_ZONE2_VF2,
PP_GRTAVFS_HW_ZONE3_VF3,
PP_GRTAVFS_HW_VOLTAGE_GB,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3,
PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4,
PP_GRTAVFS_HW_RESERVED_0,
PP_GRTAVFS_HW_RESERVED_1,
PP_GRTAVFS_HW_RESERVED_2,
PP_GRTAVFS_HW_RESERVED_3,
PP_GRTAVFS_HW_RESERVED_4,
PP_GRTAVFS_HW_RESERVED_5,
PP_GRTAVFS_HW_RESERVED_6,
PP_GRTAVFS_HW_FUSE_COUNT,
} PP_GRTAVFS_HW_FUSE_e;
typedef enum {
PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0,
PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3,
PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4,
PP_GRTAVFS_FW_COMMON_FUSE_COUNT,
} PP_GRTAVFS_FW_COMMON_FUSE_e;
typedef enum {
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3,
PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3,
PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4,
PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY,
PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3,
PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4,
PP_GRTAVFS_FW_SEP_FUSE_COUNT,
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
typedef struct {
int8_t Offset; // in Amps
uint8_t Padding;
uint16_t MaxCurrent; // in Amps
} SviTelemetryScale_t;
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
#define PP_OD_FEATURE_GFX_VMAX_BIT 1
#define PP_OD_FEATURE_SOC_VMAX_BIT 2
#define PP_OD_FEATURE_PPT_BIT 3
#define PP_OD_FEATURE_FAN_CURVE_BIT 4
#define PP_OD_FEATURE_FAN_LEGACY_BIT 5
#define PP_OD_FEATURE_FULL_CTRL_BIT 6
#define PP_OD_FEATURE_TDC_BIT 7
#define PP_OD_FEATURE_GFXCLK_BIT 8
#define PP_OD_FEATURE_UCLK_BIT 9
#define PP_OD_FEATURE_FCLK_BIT 10
#define PP_OD_FEATURE_ZERO_FAN_BIT 11
#define PP_OD_FEATURE_TEMPERATURE_BIT 12
#define PP_OD_FEATURE_EDC_BIT 13
#define PP_OD_FEATURE_COUNT 14
typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING,
PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
} PP_OD_POWER_FEATURE_e;
typedef enum {
FAN_MODE_AUTO = 0,
FAN_MODE_MANUAL_LINEAR,
} FanMode_e;
typedef enum {
OD_NO_ERROR,
OD_REQUEST_ADVANCED_NOT_SUPPORTED,
OD_UNSUPPORTED_FEATURE,
OD_INVALID_FEATURE_COMBO_ERROR,
OD_GFXCLK_VF_CURVE_OFFSET_ERROR,
OD_VDD_GFX_VMAX_ERROR,
OD_VDD_SOC_VMAX_ERROR,
OD_PPT_ERROR,
OD_FAN_MIN_PWM_ERROR,
OD_FAN_ACOUSTIC_TARGET_ERROR,
OD_FAN_ACOUSTIC_LIMIT_ERROR,
OD_FAN_TARGET_TEMP_ERROR,
OD_FAN_ZERO_RPM_STOP_TEMP_ERROR,
OD_FAN_CURVE_PWM_ERROR,
OD_FAN_CURVE_TEMP_ERROR,
OD_FULL_CTRL_GFXCLK_ERROR,
OD_FULL_CTRL_UCLK_ERROR,
OD_FULL_CTRL_FCLK_ERROR,
OD_FULL_CTRL_VDD_GFX_ERROR,
OD_FULL_CTRL_VDD_SOC_ERROR,
OD_TDC_ERROR,
OD_GFXCLK_ERROR,
OD_UCLK_ERROR,
OD_FCLK_ERROR,
OD_OP_TEMP_ERROR,
OD_OP_GFX_EDC_ERROR,
OD_OP_GFX_PCC_ERROR,
OD_POWER_FEATURE_CTRL_ERROR,
} OD_FAIL_e;
typedef struct {
uint32_t FeatureCtrlMask;
//Voltage control
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
uint16_t VddGfxVmax; // in mV
uint16_t VddSocVmax;
uint8_t IdlePwrSavingFeaturesCtrl;
uint8_t RuntimePwrSavingFeaturesCtrl;
uint16_t Padding;
//Frequency changes
int16_t GfxclkFoffset;
uint16_t Padding1;
uint16_t UclkFmin;
uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
//PPT
int16_t Ppt; // %
int16_t Tdc;
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
uint16_t FanMinimumPwm;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
uint16_t GfxclkFmaxVmax;
uint8_t GfxclkFmaxVmaxTemperature;
uint8_t Padding4[1];
uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
typedef struct {
OverDriveTable_t OverDriveTable;
} OverDriveTableExternal_t;
typedef struct {
uint32_t FeatureCtrlMask;
//Gfx Vf Curve
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
//gfx Vmax
uint16_t VddGfxVmax; // in mV
//soc Vmax
uint16_t VddSocVmax;
//gfxclk
int16_t GfxclkFoffset;
uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
//fclk
uint16_t FclkFmin;
uint16_t FclkFmax;
//PPT
int16_t Ppt; // %
//TDC
int16_t Tdc;
//Fan Curve
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
//Fan Legacy
uint16_t FanMinimumPwm;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
//zero fan
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
typedef enum {
BOARD_GPIO_SMUIO_0,
BOARD_GPIO_SMUIO_1,
BOARD_GPIO_SMUIO_2,
BOARD_GPIO_SMUIO_3,
BOARD_GPIO_SMUIO_4,
BOARD_GPIO_SMUIO_5,
BOARD_GPIO_SMUIO_6,
BOARD_GPIO_SMUIO_7,
BOARD_GPIO_SMUIO_8,
BOARD_GPIO_SMUIO_9,
BOARD_GPIO_SMUIO_10,
BOARD_GPIO_SMUIO_11,
BOARD_GPIO_SMUIO_12,
BOARD_GPIO_SMUIO_13,
BOARD_GPIO_SMUIO_14,
BOARD_GPIO_SMUIO_15,
BOARD_GPIO_SMUIO_16,
BOARD_GPIO_SMUIO_17,
BOARD_GPIO_SMUIO_18,
BOARD_GPIO_SMUIO_19,
BOARD_GPIO_SMUIO_20,
BOARD_GPIO_SMUIO_21,
BOARD_GPIO_SMUIO_22,
BOARD_GPIO_SMUIO_23,
BOARD_GPIO_SMUIO_24,
BOARD_GPIO_SMUIO_25,
BOARD_GPIO_SMUIO_26,
BOARD_GPIO_SMUIO_27,
BOARD_GPIO_SMUIO_28,
BOARD_GPIO_SMUIO_29,
BOARD_GPIO_SMUIO_30,
BOARD_GPIO_SMUIO_31,
MAX_BOARD_GPIO_SMUIO_NUM,
BOARD_GPIO_DC_GEN_A,
BOARD_GPIO_DC_GEN_B,
BOARD_GPIO_DC_GEN_C,
BOARD_GPIO_DC_GEN_D,
BOARD_GPIO_DC_GEN_E,
BOARD_GPIO_DC_GEN_F,
BOARD_GPIO_DC_GEN_G,
BOARD_GPIO_DC_GENLK_CLK,
BOARD_GPIO_DC_GENLK_VSYNC,
BOARD_GPIO_DC_SWAPLOCK_A,
BOARD_GPIO_DC_SWAPLOCK_B,
MAX_BOARD_DC_GPIO_NUM,
BOARD_GPIO_LV_EN,
} BOARD_GPIO_TYPE_e;
#define INVALID_BOARD_GPIO 0xFF
typedef struct {
//PLL 0
uint16_t InitImuClk;
uint16_t InitSocclk;
uint16_t InitMpioclk;
uint16_t InitSmnclk;
//PLL 1
uint16_t InitDispClk;
uint16_t InitDppClk;
uint16_t InitDprefclk;
uint16_t InitDcfclk;
uint16_t InitDtbclk;
uint16_t InitDbguSocClk;
//PLL 2
uint16_t InitGfxclk_bypass;
uint16_t InitMp1clk;
uint16_t InitLclk;
uint16_t InitDbguBacoClk;
uint16_t InitBaco400clk;
uint16_t InitBaco1200clk_bypass;
uint16_t InitBaco700clk_bypass;
uint16_t InitBaco500clk;
// PLL 3
uint16_t InitDclk0;
uint16_t InitVclk0;
// PLL 4
uint16_t InitFclk;
uint16_t Padding1;
// PLL 5
//UCLK clocks, assumed all UCLK instances will be the same.
uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk
uint8_t Padding[3];
uint32_t InitVcoFreqPll0; //smu_socclk_t
uint32_t InitVcoFreqPll1; //smu_displayclk_t
uint32_t InitVcoFreqPll2; //smu_nbioclk_t
uint32_t InitVcoFreqPll3; //smu_vcnclk_t
uint32_t InitVcoFreqPll4; //smu_fclk_t
uint32_t InitVcoFreqPll5; //smu_uclk_01_t
uint32_t InitVcoFreqPll6; //smu_uclk_23_t
uint32_t InitVcoFreqPll7; //smu_uclk_45_t
uint32_t InitVcoFreqPll8; //smu_uclk_67_t
//encoding will be SVI3
uint16_t InitGfx; // In mV(Q2) , should be 0?
uint16_t InitSoc; // In mV(Q2)
uint16_t InitVddIoMem; // In mV(Q2) MemVdd
uint16_t InitVddCiMem; // In mV(Q2) VMemP
//uint16_t Padding2;
uint32_t Spare[8];
} BootValues_t;
typedef struct {
uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts
uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps
uint16_t Temperature[TEMP_COUNT]; // Celsius
uint8_t PwmLimitMin;
uint8_t PwmLimitMax;
uint8_t FanTargetTemperature;
uint8_t Spare1[1];
uint16_t AcousticTargetRpmThresholdMin;
uint16_t AcousticTargetRpmThresholdMax;
uint16_t AcousticLimitRpmThresholdMin;
uint16_t AcousticLimitRpmThresholdMax;
uint16_t PccLimitMin;
uint16_t PccLimitMax;
uint16_t FanStopTempMin;
uint16_t FanStopTempMax;
uint16_t FanStartTempMin;
uint16_t FanStartTempMax;
uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
uint32_t Spare[11];
} MsgLimits_t;
typedef struct {
uint16_t BaseClockAc;
uint16_t GameClockAc;
uint16_t BoostClockAc;
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
uint16_t MaxReportedClock;
uint16_t Padding;
uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
uint8_t DcBtcEnabled;
uint8_t Padding[3];
uint16_t DcTol; // mV Q2
uint16_t DcBtcGb; // mV Q2
uint16_t DcBtcMin; // mV Q2
uint16_t DcBtcMax; // mV Q2
LinearInt_t DcBtcGbScalar;
} AvfsDcBtcParams_t;
typedef struct {
uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C
uint16_t VftFMin; // in MHz
uint16_t VInversion; // in mV Q2
QuadraticInt_t qVft[AVFS_TEMP_COUNT];
QuadraticInt_t qAvfsGb;
QuadraticInt_t qAvfsGb2;
} AvfsFuseOverride_t;
//all settings maintained by PFE team
typedef struct {
uint8_t Version;
uint8_t Spare8[3];
// SECTION: Feature Control
uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping
// SECTION: FW DSTATE Settings
uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping
// SECTION: Advanced Options
uint32_t DebugOverrides;
uint32_t Spare[2];
} PFE_Settings_t;
typedef struct {
// SECTION: Version
uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different)
// SECTION: Miscellaneous Configuration
uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e
uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e
uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT
uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e
// SECTION: Infrastructure Limits
uint8_t SocketPowerLimitSpare[10];
//if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars
//relative index 0
uint8_t EnableLegacyPptLimit;
uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support
uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting
uint8_t PaddingPpt[7];
uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only
uint16_t PaddingInfra;
// Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years)
uint32_t FitControllerFailureRateLimit; //in IEEE float
//Expected GFX Duty Cycle at Vmax.
uint32_t FitControllerGfxDutyCycle; // in IEEE float
//Expected SOC Duty Cycle at Vmax.
uint32_t FitControllerSocDutyCycle; // in IEEE float
//This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block.
uint32_t FitControllerSocOffset; //in IEEE float
uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value
// SECTION: Throttler settings
uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping
// SECTION: Voltage Control Parameters
uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE)
uint8_t Padding[2];
uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE
// Voltage Limits
uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled
uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled
//Vmin Optimizations
int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin
int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin
uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot.
uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold.
uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
//This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
//Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts.
uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT];
//Scalar coefficient of the PSM aging degradation function
uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM
//Exponential coefficient of the PSM aging degradation function
uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM
//Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN
//Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold.
uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN
uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT];
uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT];
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
QuadraticInt_t Gfx_Vmin_droop;
QuadraticInt_t Soc_Vmin_droop;
uint32_t SpareVmin[6];
//SECTION: DPM Configuration 1
DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz
uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz
uint16_t GfxclkThrottleClock; //Used primarily in DCS
uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages
uint8_t GfxIdlePadding;
uint8_t SmsRepairWRCKClkDivEn;
uint8_t SmsRepairWRCKClkDivVal;
uint8_t GfxOffEntryEarlyMGCGEn;
uint8_t GfxOffEntryForceCGCGEn;
uint8_t GfxOffEntryForceCGCGDelayEn;
uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds
uint16_t GfxclkFreqGfxUlv; // in MHz
uint8_t GfxIdlePadding2[2];
uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry
uint32_t GfxoffSpare[15];
// DFLL
uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias
uint32_t DfllBtcMasterScalerM;
int32_t DfllBtcMasterScalerB;
uint32_t DfllBtcSlaveScalerM;
int32_t DfllBtcSlaveScalerB;
uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
uint32_t GfxDfllSpare[9];
// DVO
uint32_t DvoPsmDownThresholdVoltage; //Voltage float
uint32_t DvoPsmUpThresholdVoltage; //Voltage float
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS.
uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase.
uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
uint32_t DcsPfGfxFopt; //Default to GFX FMIN
uint32_t DcsPfUclkFopt; //Default to UCLK FMIN
uint8_t FoptEnabled;
uint8_t DcsSpare2[3];
uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float
uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float
uint32_t DcsSpare[9];
// UCLK section
uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
uint8_t PaddingMem[3];
uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow)
uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow)
uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8
uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2)
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
uint16_t LclkFreq[NUM_LINK_LEVELS];
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
uint8_t GfxAvfsPadding[1];
uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
//uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT];
uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES];
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
uint32_t GfxAvfsSpare[29];
//SECTION: VDD_SOC AVFS
uint8_t OverrideSocAvfsFuses;
uint8_t MinSocAvfsRevision;
uint8_t SocAvfsPadding[2];
AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT];
DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb
LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V
QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V
AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT];
QuadraticInt_t SSCurve_SOC;
uint32_t SocAvfsSpare[29];
//SECTION: Boot clock and voltage values
BootValues_t BootValues;
//SECTION: Driver Reported Clocks
DriverReportedClocks_t DriverReportedClocks;
//SECTION: Message Limits
MsgLimits_t MsgLimits;
//SECTION: OverDrive Limits
OverDriveLimits_t OverDriveLimitsBasicMin;
OverDriveLimits_t OverDriveLimitsBasicMax;
OverDriveLimits_t OverDriveLimitsAdvancedMin;
OverDriveLimits_t OverDriveLimitsAdvancedMax;
// Section: Total Board Power idle vs active coefficients
uint8_t TotalBoardPowerSupport;
uint8_t TotalBoardPowerPadding[1];
uint16_t TotalBoardPowerRoc;
//PMFW-11158
QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
// APT GFX to UCLK mapping
int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6];
uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6];
uint32_t AptPadding;
// Xvmin didt
QuadraticInt_t GfxXvminDidtDroopThresh;
uint32_t GfxXvminDidtResetDDWait;
uint32_t GfxXvminDidtClkStopWait;
uint32_t GfxXvminDidtFcsStepCtrl;
uint32_t GfxXvminDidtFcsWaitCtrl;
// PSM based didt controller
uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode
uint32_t P2v_a; // floating point in U32 format
uint32_t P2v_b;
uint32_t P2v_c;
uint32_t T2p_a;
uint32_t T2p_b;
uint32_t T2p_c;
uint32_t P2vTemp;
QuadraticInt_t PsmDidtStaticSettings;
QuadraticInt_t PsmDidtDynamicSettings;
uint8_t PsmDidtAvgDiv;
uint8_t PsmDidtForceStall;
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
uint32_t CacEdcCacLeakageC0;
uint32_t CacEdcCacLeakageC1;
uint32_t CacEdcCacLeakageC2;
uint32_t CacEdcCacLeakageC3;
uint32_t CacEdcCacLeakageC4;
uint32_t CacEdcCacLeakageC5;
uint32_t CacEdcGfxClkScalar;
uint32_t CacEdcGfxClkIntercept;
uint32_t CacEdcCac_m;
uint32_t CacEdcCac_b;
uint32_t CacEdcCurrLimitGuardband;
uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
uint32_t XVmin_Gfx_EdcPccAsStepCtrl;
uint32_t XVmin_Gfx_EdcPccAsWaitCtrl;
uint16_t XVmin_Gfx_EdcThreshold;
uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl;
// SOC EDC XVMIN
uint32_t XVmin_Soc_EdcThreshScalar;
uint32_t XVmin_Soc_EdcEnableFreq;
uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react.
uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted.
uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted.
uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts.
uint8_t PaddingSocEdc[3];
// Fuse Override for SOC and GFX XVMIN
uint8_t GfxXvminFuseOverride;
uint8_t SocXvminFuseOverride;
uint8_t PaddingXvminFuseOverride[2];
uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value
uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value
uint16_t GfxXvminFddVolt0; // low voltage, in VID
uint16_t GfxXvminFddVolt1; // mid voltage, in VID
uint16_t GfxXvminFddVolt2; // high voltage, in VID
uint16_t SocXvminFddVolt0; // low voltage, in VID
uint16_t SocXvminFddVolt1; // mid voltage, in VID
uint16_t SocXvminFddVolt2; // high voltage, in VID
uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse
uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse
uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse
// SECTION: Sku Reserved
uint32_t Spare;
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
} SkuTable_t;
typedef struct {
uint8_t SlewRateConditions;
uint8_t LoadLineAdjust;
uint8_t VoutOffset;
uint8_t VidMax;
uint8_t VidMin;
uint8_t TenBitTelEn;
uint8_t SixteenBitTelEn;
uint8_t OcpThresh;
uint8_t OcpWarnThresh;
uint8_t OcpSettings;
uint8_t VrhotThresh;
uint8_t OtpThresh;
uint8_t UvpOvpDeltaRef;
uint8_t PhaseShed;
uint8_t Padding[10];
uint32_t SettingOverrideMask;
} Svi3RegulatorSettings_t;
typedef struct {
// SECTION: Version
uint32_t Version; //should be unique to each board type
// SECTION: I2C Control
I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS];
//SECTION SVI3 Board Parameters
uint8_t SlaveAddrMapping[SVI_PLANE_COUNT];
uint8_t VrPsiSupport[SVI_PLANE_COUNT];
uint32_t Svi3SvcSpeed;
uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3
// SECTION: Voltage Regulator Settings
Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT];
// SECTION: GPIO Settings
uint8_t LedOffGpio;
uint8_t FanOffGpio;
uint8_t GfxVrPowerStageOffGpio;
uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
uint8_t GthrGpio; // GPIO pin configured for GTHR Event
uint8_t GthrPolarity; // replace GPIO polarity for GTHR
// LED Display Settings
uint8_t LedPin0; // GPIO number for LedPin[0]
uint8_t LedPin1; // GPIO number for LedPin[1]
uint8_t LedPin2; // GPIO number for LedPin[2]
uint8_t LedEnableMask;
uint8_t LedPcie; // GPIO number for PCIE results
uint8_t LedError; // GPIO number for Error Cases
uint8_t PaddingLed;
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
uint8_t UclkTrainingModeSpreadPercent; // Q4.4
uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT];
// DFLL Spread Spectrum
uint8_t GfxclkSpreadEnable;
// FCLK Spread Spectrum
uint8_t FclkSpreadPercent; // Q4.4
uint16_t FclkSpreadFreq; // kHz
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
uint8_t FuseWritePadding[3];
// SECTION: EDC Params
uint32_t LoadlineGfx;
uint32_t LoadlineSoc;
uint32_t GfxEdcLimit;
uint32_t SocEdcLimit;
uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry
uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors
uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0
uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1
uint8_t PaddingEpcs[2];
// SECTION: Board Reserved
uint32_t BoardSpare[52];
// SECTION: Structure Padding
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
} BoardTable_t;
typedef struct {
// SECTION: Infrastructure Limits
uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported
uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature
int16_t TotalIdleBoardPowerM;
int16_t TotalIdleBoardPowerB;
int16_t TotalBoardPowerM;
int16_t TotalBoardPowerB;
uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input
// SECTION: Fan Control
uint16_t FanStopTemp[TEMP_COUNT]; //Celsius
uint16_t FanStartTemp[TEMP_COUNT]; //Celsius
uint16_t FanGain[TEMP_COUNT];
uint16_t FanPwmMin;
uint16_t AcousticTargetRpmThreshold;
uint16_t AcousticLimitRpmThreshold;
uint16_t FanMaximumRpm;
uint16_t MGpuAcousticLimitRpmThreshold;
uint16_t FanTargetGfxclk;
uint32_t TempInputSelectMask;
uint8_t FanZeroRpmEnable;
uint8_t FanTachEdgePerRev;
uint16_t FanPadding;
uint16_t FanTargetTemperature[TEMP_COUNT];
// The following are AFC override parameters. Leave at 0 to use FW defaults.
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
uint16_t IntakeTempEnableRPM;
int16_t IntakeTempOffsetTemp;
uint16_t IntakeTempReleaseTemp;
uint16_t IntakeTempHighIntakeAcousticLimit;
uint16_t IntakeTempAcouticLimitReleaseRate;
int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset
uint16_t FanStalledTriggerRpm; //
uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
uint32_t FanSpare2[12];
uint32_t ODFeatureCtrlMask;
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
uint16_t TemperatureFwCtfLimit_Micron;
// SECTION: Board Reserved
uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail
uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported
uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift
uint16_t CustomSkuSpare16b;
uint32_t CustomSkuSpare32b[10];
// SECTION: Structure Padding
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
} CustomSkuTable_t;
typedef struct {
PFE_Settings_t PFE_Settings;
SkuTable_t SkuTable;
CustomSkuTable_t CustomSkuTable;
BoardTable_t BoardTable;
} PPTable_t;
typedef struct {
// Time constant parameters for clock averages in ms
uint16_t GfxclkAverageLpfTau;
uint16_t FclkAverageLpfTau;
uint16_t UclkAverageLpfTau;
uint16_t GfxActivityLpfTau;
uint16_t UclkActivityLpfTau;
uint16_t UclkMaxActivityLpfTau;
uint16_t SocketPowerLpfTau;
uint16_t VcnClkAverageLpfTau;
uint16_t VcnUsageAverageLpfTau;
uint16_t PcieActivityLpTau;
} DriverSmuConfig_t;
typedef struct {
DriverSmuConfig_t DriverSmuConfig;
uint32_t Spare[8];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} DriverSmuConfigExternal_t;
typedef struct {
uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz
uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz
uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz
uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t Padding;
uint32_t Spare[32];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} DriverInfoTable_t;
typedef struct {
uint32_t CurrClock[PPCLK_COUNT];
uint16_t AverageGfxclkFrequencyTarget;
uint16_t AverageGfxclkFrequencyPreDs;
uint16_t AverageGfxclkFrequencyPostDs;
uint16_t AverageFclkFrequencyPreDs;
uint16_t AverageFclkFrequencyPostDs;
uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock
uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock
uint16_t AverageVclk0Frequency ;
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
uint16_t MovingAverageGfxclkFrequencyTarget;
uint16_t MovingAverageGfxclkFrequencyPreDs;
uint16_t MovingAverageGfxclkFrequencyPostDs;
uint16_t MovingAverageFclkFrequencyPreDs;
uint16_t MovingAverageFclkFrequencyPostDs;
uint16_t MovingAverageMemclkFrequencyPreDs;
uint16_t MovingAverageMemclkFrequencyPostDs;
uint16_t MovingAverageVclk0Frequency;
uint16_t MovingAverageDclk0Frequency;
uint16_t MovingAverageGfxActivity;
uint16_t MovingAverageUclkActivity;
uint16_t MovingAverageVcn0ActivityPercentage;
uint16_t MovingAveragePCIeBusy;
uint16_t MovingAverageUclkActivity_MAX;
uint16_t MovingAverageSocketPower;
uint16_t MovingAveragePadding;
uint32_t MetricsCounter ;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
uint8_t PcieRate ;
uint8_t PcieWidth ;
uint8_t AvgFanPwm;
uint8_t Padding[1];
uint16_t AvgFanRpm;
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
uint8_t VmaxThrottlingPercentage;
uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
uint32_t PublicSerialNumberLower;
uint32_t PublicSerialNumberUpper;
} SmuMetrics_t;
typedef struct {
SmuMetrics_t SmuMetrics;
uint32_t Spare[30];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
} SmuMetricsExternal_t;
typedef struct {
uint8_t WmSetting;
uint8_t Flags;
uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
typedef enum {
WATERMARKS_CLOCK_RANGE = 0,
WATERMARKS_DUMMY_PSTATE,
WATERMARKS_MALL,
WATERMARKS_COUNT,
} WATERMARKS_FLAGS_e;
typedef struct {
// Watermarks
WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES];
} Watermarks_t;
typedef struct {
Watermarks_t Watermarks;
uint32_t Spare[16];
uint32_t MmHubPadding[8]; // SMU internal use
} WatermarksExternal_t;
typedef struct {
uint16_t avgPsmCount[76];
uint16_t minPsmCount[76];
uint16_t maxPsmCount[76];
float avgPsmVoltage[76];
float minPsmVoltage[76];
float maxPsmVoltage[76];
} AvfsDebugTable_t;
typedef struct {
AvfsDebugTable_t AvfsDebugTable;
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsDebugTableExternal_t;
typedef struct {
uint8_t Gfx_ActiveHystLimit;
uint8_t Gfx_IdleHystLimit;
uint8_t Gfx_FPS;
uint8_t Gfx_MinActiveFreqType;
uint8_t Gfx_BoosterFreqType;
uint8_t PaddingGfx;
uint16_t Gfx_MinActiveFreq; // MHz
uint16_t Gfx_BoosterFreq; // MHz
uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms
uint32_t Gfx_PD_Data_limit_a; // Q16
uint32_t Gfx_PD_Data_limit_b; // Q16
uint32_t Gfx_PD_Data_limit_c; // Q16
uint32_t Gfx_PD_Data_error_coeff; // Q16
uint32_t Gfx_PD_Data_error_rate_coeff; // Q16
uint8_t Fclk_ActiveHystLimit;
uint8_t Fclk_IdleHystLimit;
uint8_t Fclk_FPS;
uint8_t Fclk_MinActiveFreqType;
uint8_t Fclk_BoosterFreqType;
uint8_t PaddingFclk;
uint16_t Fclk_MinActiveFreq; // MHz
uint16_t Fclk_BoosterFreq; // MHz
uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms
uint32_t Fclk_PD_Data_limit_a; // Q16
uint32_t Fclk_PD_Data_limit_b; // Q16
uint32_t Fclk_PD_Data_limit_c; // Q16
uint32_t Fclk_PD_Data_error_coeff; // Q16
uint32_t Fclk_PD_Data_error_rate_coeff; // Q16
uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16
uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS];
uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS];
uint16_t Mem_Fps;
} DpmActivityMonitorCoeffInt_t;
typedef struct {
DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt;
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffIntExternal_t;
// Workload bits
#define WORKLOAD_PPLIB_DEFAULT_BIT 0
#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
#define WORKLOAD_PPLIB_VIDEO_BIT 3
#define WORKLOAD_PPLIB_VR_BIT 4
#define WORKLOAD_PPLIB_COMPUTE_BIT 5
#define WORKLOAD_PPLIB_CUSTOM_BIT 6
#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7
#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8
#define WORKLOAD_PPLIB_CGVDI_BIT 9
#define WORKLOAD_PPLIB_COUNT 10
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
// Table transfer status
#define TABLE_TRANSFER_OK 0x0
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
#define TABLE_PPT_FAILED 0x100
#define TABLE_TDC_FAILED 0x200
#define TABLE_TEMP_FAILED 0x400
#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
#define TABLE_FAN_START_TEMP_FAILED 0x2000
#define TABLE_FAN_PWM_MIN_FAILED 0x4000
#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
#define TABLE_WATERMARKS 2
#define TABLE_AVFS_PSM_DEBUG 3
#define TABLE_PMSTATUSLOG 4
#define TABLE_SMU_METRICS 5
#define TABLE_DRIVER_SMU_CONFIG 6
#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
#define TABLE_CUSTOM_SKUTABLE 12
#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2
#define IH_INTERRUPT_CONTEXT_ID_AC 0x3
#define IH_INTERRUPT_CONTEXT_ID_DC 0x4
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include <linux/module.h>
#include <linux/usb.h>
#include "main.h"
#include "rtw8822c.h"
#include "usb.h"
static const struct usb_device_id rtw_8822cu_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc812, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82e, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xd820, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xd82b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x0043, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822c_hw_spec) }, /* Alpha - Alpha */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822cu_id_table);
static int rtw8822cu_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return rtw_usb_probe(intf, id);
}
static struct usb_driver rtw_8822cu_driver = {
.name = "rtw_8822cu",
.id_table = rtw_8822cu_id_table,
.probe = rtw8822cu_probe,
.disconnect = rtw_usb_disconnect,
};
module_usb_driver(rtw_8822cu_driver);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless 8822cu driver");
MODULE_LICENSE("Dual BSD/GPL");
|
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: [email protected]
*/
#ifndef AMDGPU_CSA_MANAGER_H
#define AMDGPU_CSA_MANAGER_H
#define AMDGPU_CSA_SIZE (128 * 1024)
uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
u32 domain, uint32_t size);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size);
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
uint64_t csa_addr);
void amdgpu_free_static_csa(struct amdgpu_bo **bo);
#endif
|
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "pp_psm.h"
int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
{
int result;
unsigned int i;
struct pp_power_state *state;
int size, table_entries;
if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
return 0;
if (hwmgr->hwmgr_func->get_power_state_size == NULL)
return 0;
table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
sizeof(struct pp_power_state);
if (table_entries <= 0 || size == 0) {
pr_warn("Please check whether power state management is supported on this asic\n");
hwmgr->num_ps = 0;
hwmgr->ps_size = 0;
return 0;
}
hwmgr->num_ps = table_entries;
hwmgr->ps_size = size;
hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
if (hwmgr->ps == NULL)
return -ENOMEM;
hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
if (hwmgr->request_ps == NULL) {
kfree(hwmgr->ps);
hwmgr->ps = NULL;
return -ENOMEM;
}
hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
if (hwmgr->current_ps == NULL) {
kfree(hwmgr->request_ps);
kfree(hwmgr->ps);
hwmgr->request_ps = NULL;
hwmgr->ps = NULL;
return -ENOMEM;
}
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
if (result) {
kfree(hwmgr->current_ps);
kfree(hwmgr->request_ps);
kfree(hwmgr->ps);
hwmgr->current_ps = NULL;
hwmgr->request_ps = NULL;
hwmgr->ps = NULL;
return -EINVAL;
}
if (state->classification.flags & PP_StateClassificationFlag_Boot) {
hwmgr->boot_ps = state;
memcpy(hwmgr->current_ps, state, size);
memcpy(hwmgr->request_ps, state, size);
}
state->id = i + 1; /* assigned unique num for every power state id */
if (state->classification.flags & PP_StateClassificationFlag_Uvd)
hwmgr->uvd_ps = state;
state = (struct pp_power_state *)((unsigned long)state + size);
}
return 0;
}
int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
{
if (hwmgr == NULL)
return -EINVAL;
if (!hwmgr->ps)
return 0;
kfree(hwmgr->current_ps);
kfree(hwmgr->request_ps);
kfree(hwmgr->ps);
hwmgr->request_ps = NULL;
hwmgr->ps = NULL;
hwmgr->current_ps = NULL;
return 0;
}
static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
enum PP_StateUILabel ui_label,
unsigned long *state_id)
{
struct pp_power_state *state;
int table_entries;
int i;
table_entries = hwmgr->num_ps;
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
if (state->classification.ui_label & ui_label) {
*state_id = state->id;
return 0;
}
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
}
return -EINVAL;
}
static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
enum PP_StateClassificationFlag flag,
unsigned long *state_id)
{
struct pp_power_state *state;
int table_entries;
int i;
table_entries = hwmgr->num_ps;
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
if (state->classification.flags & flag) {
*state_id = state->id;
return 0;
}
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
}
return -EINVAL;
}
static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
{
struct pp_power_state *state;
int table_entries;
int i;
table_entries = hwmgr->num_ps;
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
if (state->id == state_id) {
memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
return 0;
}
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
}
return -EINVAL;
}
int psm_set_boot_states(struct pp_hwmgr *hwmgr)
{
unsigned long state_id;
int ret = -EINVAL;
if (!hwmgr->ps)
return 0;
if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
&state_id))
ret = psm_set_states(hwmgr, state_id);
return ret;
}
int psm_set_performance_states(struct pp_hwmgr *hwmgr)
{
unsigned long state_id;
int ret = -EINVAL;
if (!hwmgr->ps)
return 0;
if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
&state_id))
ret = psm_set_states(hwmgr, state_id);
return ret;
}
int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
enum PP_StateUILabel label_id,
struct pp_power_state **state)
{
int table_entries;
int i;
if (!hwmgr->ps)
return 0;
table_entries = hwmgr->num_ps;
*state = hwmgr->ps;
restart_search:
for (i = 0; i < table_entries; i++) {
if ((*state)->classification.ui_label & label_id)
return 0;
*state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
}
switch (label_id) {
case PP_StateUILabel_Battery:
case PP_StateUILabel_Balanced:
label_id = PP_StateUILabel_Performance;
goto restart_search;
default:
break;
}
return -EINVAL;
}
static void power_state_management(struct pp_hwmgr *hwmgr,
struct pp_power_state *new_ps)
{
struct pp_power_state *pcurrent;
struct pp_power_state *requested;
bool equal;
if (new_ps != NULL)
requested = new_ps;
else
requested = hwmgr->request_ps;
pcurrent = hwmgr->current_ps;
phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
&pcurrent->hardware, &requested->hardware, &equal)))
equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
}
}
int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_settings,
struct pp_power_state *new_ps)
{
uint32_t index;
long workload[1];
if (hwmgr->not_vf) {
if (!skip_display_settings)
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
power_state_management(hwmgr, new_ps);
else
/*
* for vega12/vega20 which does not support power state manager
* DAL clock limits should also be honoured
*/
phm_apply_clock_adjust_rules(hwmgr);
if (!skip_display_settings)
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
}
if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
hwmgr->dpm_level = hwmgr->request_dpm_level;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
index = fls(hwmgr->workload_mask);
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
workload[0] = hwmgr->workload_setting[index];
if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
}
return 0;
}
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generator for IPA pass related boilerplate code/data
*
* Supports gcc 4.5-6
*
* Usage:
*
* 1. before inclusion define PASS_NAME
* 2. before inclusion define NO_* for unimplemented callbacks
* NO_GENERATE_SUMMARY
* NO_READ_SUMMARY
* NO_WRITE_SUMMARY
* NO_READ_OPTIMIZATION_SUMMARY
* NO_WRITE_OPTIMIZATION_SUMMARY
* NO_STMT_FIXUP
* NO_FUNCTION_TRANSFORM
* NO_VARIABLE_TRANSFORM
* NO_GATE
* NO_EXECUTE
* 3. before inclusion define PROPERTIES_* and *TODO_FLAGS_* to override
* the default 0 values
* 4. for convenience, all the above will be undefined after inclusion!
* 5. the only exported name is make_PASS_NAME_pass() to register with gcc
*/
#ifndef PASS_NAME
#error at least PASS_NAME must be defined
#else
#define __GCC_PLUGIN_STRINGIFY(n) #n
#define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n)
#define _GCC_PLUGIN_CONCAT2(x, y) x ## y
#define _GCC_PLUGIN_CONCAT3(x, y, z) x ## y ## z
#define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data)
#define _PASS_NAME_PASS_DATA __PASS_NAME_PASS_DATA(PASS_NAME)
#define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass)
#define _PASS_NAME_PASS __PASS_NAME_PASS(PASS_NAME)
#define _PASS_NAME_NAME _GCC_PLUGIN_STRINGIFY(PASS_NAME)
#define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass)
#define _MAKE_PASS_NAME_PASS __MAKE_PASS_NAME_PASS(PASS_NAME)
#ifdef NO_GENERATE_SUMMARY
#define _GENERATE_SUMMARY NULL
#else
#define __GENERATE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _generate_summary)
#define _GENERATE_SUMMARY __GENERATE_SUMMARY(PASS_NAME)
#endif
#ifdef NO_READ_SUMMARY
#define _READ_SUMMARY NULL
#else
#define __READ_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_summary)
#define _READ_SUMMARY __READ_SUMMARY(PASS_NAME)
#endif
#ifdef NO_WRITE_SUMMARY
#define _WRITE_SUMMARY NULL
#else
#define __WRITE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_summary)
#define _WRITE_SUMMARY __WRITE_SUMMARY(PASS_NAME)
#endif
#ifdef NO_READ_OPTIMIZATION_SUMMARY
#define _READ_OPTIMIZATION_SUMMARY NULL
#else
#define __READ_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_optimization_summary)
#define _READ_OPTIMIZATION_SUMMARY __READ_OPTIMIZATION_SUMMARY(PASS_NAME)
#endif
#ifdef NO_WRITE_OPTIMIZATION_SUMMARY
#define _WRITE_OPTIMIZATION_SUMMARY NULL
#else
#define __WRITE_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_optimization_summary)
#define _WRITE_OPTIMIZATION_SUMMARY __WRITE_OPTIMIZATION_SUMMARY(PASS_NAME)
#endif
#ifdef NO_STMT_FIXUP
#define _STMT_FIXUP NULL
#else
#define __STMT_FIXUP(n) _GCC_PLUGIN_CONCAT2(n, _stmt_fixup)
#define _STMT_FIXUP __STMT_FIXUP(PASS_NAME)
#endif
#ifdef NO_FUNCTION_TRANSFORM
#define _FUNCTION_TRANSFORM NULL
#else
#define __FUNCTION_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _function_transform)
#define _FUNCTION_TRANSFORM __FUNCTION_TRANSFORM(PASS_NAME)
#endif
#ifdef NO_VARIABLE_TRANSFORM
#define _VARIABLE_TRANSFORM NULL
#else
#define __VARIABLE_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _variable_transform)
#define _VARIABLE_TRANSFORM __VARIABLE_TRANSFORM(PASS_NAME)
#endif
#ifdef NO_GATE
#define _GATE NULL
#define _HAS_GATE false
#else
#define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate)
#define _GATE __GATE(PASS_NAME)
#define _HAS_GATE true
#endif
#ifdef NO_EXECUTE
#define _EXECUTE NULL
#define _HAS_EXECUTE false
#else
#define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute)
#define _EXECUTE __EXECUTE(PASS_NAME)
#define _HAS_EXECUTE true
#endif
#ifndef PROPERTIES_REQUIRED
#define PROPERTIES_REQUIRED 0
#endif
#ifndef PROPERTIES_PROVIDED
#define PROPERTIES_PROVIDED 0
#endif
#ifndef PROPERTIES_DESTROYED
#define PROPERTIES_DESTROYED 0
#endif
#ifndef TODO_FLAGS_START
#define TODO_FLAGS_START 0
#endif
#ifndef TODO_FLAGS_FINISH
#define TODO_FLAGS_FINISH 0
#endif
#ifndef FUNCTION_TRANSFORM_TODO_FLAGS_START
#define FUNCTION_TRANSFORM_TODO_FLAGS_START 0
#endif
namespace {
static const pass_data _PASS_NAME_PASS_DATA = {
.type = IPA_PASS,
.name = _PASS_NAME_NAME,
.optinfo_flags = OPTGROUP_NONE,
.tv_id = TV_NONE,
.properties_required = PROPERTIES_REQUIRED,
.properties_provided = PROPERTIES_PROVIDED,
.properties_destroyed = PROPERTIES_DESTROYED,
.todo_flags_start = TODO_FLAGS_START,
.todo_flags_finish = TODO_FLAGS_FINISH,
};
class _PASS_NAME_PASS : public ipa_opt_pass_d {
public:
_PASS_NAME_PASS() : ipa_opt_pass_d(_PASS_NAME_PASS_DATA,
g,
_GENERATE_SUMMARY,
_WRITE_SUMMARY,
_READ_SUMMARY,
_WRITE_OPTIMIZATION_SUMMARY,
_READ_OPTIMIZATION_SUMMARY,
_STMT_FIXUP,
FUNCTION_TRANSFORM_TODO_FLAGS_START,
_FUNCTION_TRANSFORM,
_VARIABLE_TRANSFORM) {}
#ifndef NO_GATE
virtual bool gate(function *) { return _GATE(); }
virtual opt_pass *clone() { return new _PASS_NAME_PASS(); }
#ifndef NO_EXECUTE
virtual unsigned int execute(function *) { return _EXECUTE(); }
#endif
};
}
opt_pass *_MAKE_PASS_NAME_PASS(void)
{
return new _PASS_NAME_PASS();
}
#else
struct opt_pass *_MAKE_PASS_NAME_PASS(void)
{
return &_PASS_NAME_PASS.pass;
}
#endif
/* clean up user provided defines */
#undef PASS_NAME
#undef NO_GENERATE_SUMMARY
#undef NO_WRITE_SUMMARY
#undef NO_READ_SUMMARY
#undef NO_WRITE_OPTIMIZATION_SUMMARY
#undef NO_READ_OPTIMIZATION_SUMMARY
#undef NO_STMT_FIXUP
#undef NO_FUNCTION_TRANSFORM
#undef NO_VARIABLE_TRANSFORM
#undef NO_GATE
#undef NO_EXECUTE
#undef FUNCTION_TRANSFORM_TODO_FLAGS_START
#undef PROPERTIES_DESTROYED
#undef PROPERTIES_PROVIDED
#undef PROPERTIES_REQUIRED
#undef TODO_FLAGS_FINISH
#undef TODO_FLAGS_START
/* clean up generated defines */
#undef _EXECUTE
#undef __EXECUTE
#undef _FUNCTION_TRANSFORM
#undef __FUNCTION_TRANSFORM
#undef _GATE
#undef __GATE
#undef _GCC_PLUGIN_CONCAT2
#undef _GCC_PLUGIN_CONCAT3
#undef _GCC_PLUGIN_STRINGIFY
#undef __GCC_PLUGIN_STRINGIFY
#undef _GENERATE_SUMMARY
#undef __GENERATE_SUMMARY
#undef _HAS_EXECUTE
#undef _HAS_GATE
#undef _MAKE_PASS_NAME_PASS
#undef __MAKE_PASS_NAME_PASS
#undef _PASS_NAME_NAME
#undef _PASS_NAME_PASS
#undef __PASS_NAME_PASS
#undef _PASS_NAME_PASS_DATA
#undef __PASS_NAME_PASS_DATA
#undef _READ_OPTIMIZATION_SUMMARY
#undef __READ_OPTIMIZATION_SUMMARY
#undef _READ_SUMMARY
#undef __READ_SUMMARY
#undef _STMT_FIXUP
#undef __STMT_FIXUP
#undef _VARIABLE_TRANSFORM
#undef __VARIABLE_TRANSFORM
#undef _WRITE_OPTIMIZATION_SUMMARY
#undef __WRITE_OPTIMIZATION_SUMMARY
#undef _WRITE_SUMMARY
#undef __WRITE_SUMMARY
#endif /* PASS_NAME */
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/bits.h>
#include <linux/ktime.h>
#include <linux/bitmap.h>
#include <linux/mnt_idmapping.h>
#include "super.h"
#include "mds_client.h"
#include "crypto.h"
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
/*
* A cluster of MDS (metadata server) daemons is responsible for
* managing the file system namespace (the directory hierarchy and
* inodes) and for coordinating shared access to storage. Metadata is
* partitioning hierarchically across a number of servers, and that
* partition varies over time as the cluster adjusts the distribution
* in order to balance load.
*
* The MDS client is primarily responsible to managing synchronous
* metadata requests for operations like open, unlink, and so forth.
* If there is a MDS failure, we find out about it when we (possibly
* request and) receive a new MDS map, and can resubmit affected
* requests.
*
* For the most part, though, we take advantage of a lossless
* communications channel to the MDS, and do not need to worry about
* timing out or resubmitting requests.
*
* We maintain a stateful "session" with each MDS we interact with.
* Within each session, we sent periodic heartbeat messages to ensure
* any capabilities or leases we have been issues remain valid. If
* the session times out and goes stale, our leases and capabilities
* are no longer valid.
*/
struct ceph_reconnect_state {
struct ceph_mds_session *session;
int nr_caps, nr_realms;
struct ceph_pagelist *pagelist;
unsigned msg_version;
bool allow_multi;
};
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head);
static void ceph_cap_release_work(struct work_struct *work);
static void ceph_cap_reclaim_work(struct work_struct *work);
static const struct ceph_connection_operations mds_con_ops;
/*
* mds reply parsing
*/
static int parse_reply_info_quota(void **p, void *end,
struct ceph_mds_reply_info_in *info)
{
u8 struct_v, struct_compat;
u32 struct_len;
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
/* struct_v is expected to be >= 1. we only
* understand encoding with struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
ceph_decode_32_safe(p, end, struct_len, bad);
ceph_decode_need(p, end, struct_len, bad);
end = *p + struct_len;
ceph_decode_64_safe(p, end, info->max_bytes, bad);
ceph_decode_64_safe(p, end, info->max_files, bad);
*p = end;
return 0;
bad:
return -EIO;
}
/*
* parse individual inode info
*/
static int parse_reply_info_in(void **p, void *end,
struct ceph_mds_reply_info_in *info,
u64 features)
{
int err = 0;
u8 struct_v = 0;
if (features == (u64)-1) {
u32 struct_len;
u8 struct_compat;
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
/* struct_v is expected to be >= 1. we only understand
* encoding with struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
ceph_decode_32_safe(p, end, struct_len, bad);
ceph_decode_need(p, end, struct_len, bad);
end = *p + struct_len;
}
ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
info->in = *p;
*p += sizeof(struct ceph_mds_reply_inode) +
sizeof(*info->in->fragtree.splits) *
le32_to_cpu(info->in->fragtree.nsplits);
ceph_decode_32_safe(p, end, info->symlink_len, bad);
ceph_decode_need(p, end, info->symlink_len, bad);
info->symlink = *p;
*p += info->symlink_len;
ceph_decode_copy_safe(p, end, &info->dir_layout,
sizeof(info->dir_layout), bad);
ceph_decode_32_safe(p, end, info->xattr_len, bad);
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
*p += info->xattr_len;
if (features == (u64)-1) {
/* inline data */
ceph_decode_64_safe(p, end, info->inline_version, bad);
ceph_decode_32_safe(p, end, info->inline_len, bad);
ceph_decode_need(p, end, info->inline_len, bad);
info->inline_data = *p;
*p += info->inline_len;
/* quota */
err = parse_reply_info_quota(p, end, info);
if (err < 0)
goto out_bad;
/* pool namespace */
ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
if (info->pool_ns_len > 0) {
ceph_decode_need(p, end, info->pool_ns_len, bad);
info->pool_ns_data = *p;
*p += info->pool_ns_len;
}
/* btime */
ceph_decode_need(p, end, sizeof(info->btime), bad);
ceph_decode_copy(p, &info->btime, sizeof(info->btime));
/* change attribute */
ceph_decode_64_safe(p, end, info->change_attr, bad);
/* dir pin */
if (struct_v >= 2) {
ceph_decode_32_safe(p, end, info->dir_pin, bad);
} else {
info->dir_pin = -ENODATA;
}
/* snapshot birth time, remains zero for v<=2 */
if (struct_v >= 3) {
ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
ceph_decode_copy(p, &info->snap_btime,
sizeof(info->snap_btime));
} else {
memset(&info->snap_btime, 0, sizeof(info->snap_btime));
}
/* snapshot count, remains zero for v<=3 */
if (struct_v >= 4) {
ceph_decode_64_safe(p, end, info->rsnaps, bad);
} else {
info->rsnaps = 0;
}
if (struct_v >= 5) {
u32 alen;
ceph_decode_32_safe(p, end, alen, bad);
while (alen--) {
u32 len;
/* key */
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_skip_n(p, end, len, bad);
/* value */
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_skip_n(p, end, len, bad);
}
}
/* fscrypt flag -- ignore */
if (struct_v >= 6)
ceph_decode_skip_8(p, end, bad);
info->fscrypt_auth = NULL;
info->fscrypt_auth_len = 0;
info->fscrypt_file = NULL;
info->fscrypt_file_len = 0;
if (struct_v >= 7) {
ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad);
if (info->fscrypt_auth_len) {
info->fscrypt_auth = kmalloc(info->fscrypt_auth_len,
GFP_KERNEL);
if (!info->fscrypt_auth)
return -ENOMEM;
ceph_decode_copy_safe(p, end, info->fscrypt_auth,
info->fscrypt_auth_len, bad);
}
ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad);
if (info->fscrypt_file_len) {
info->fscrypt_file = kmalloc(info->fscrypt_file_len,
GFP_KERNEL);
if (!info->fscrypt_file)
return -ENOMEM;
ceph_decode_copy_safe(p, end, info->fscrypt_file,
info->fscrypt_file_len, bad);
}
}
*p = end;
} else {
/* legacy (unversioned) struct */
if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
ceph_decode_64_safe(p, end, info->inline_version, bad);
ceph_decode_32_safe(p, end, info->inline_len, bad);
ceph_decode_need(p, end, info->inline_len, bad);
info->inline_data = *p;
*p += info->inline_len;
} else
info->inline_version = CEPH_INLINE_NONE;
if (features & CEPH_FEATURE_MDS_QUOTA) {
err = parse_reply_info_quota(p, end, info);
if (err < 0)
goto out_bad;
} else {
info->max_bytes = 0;
info->max_files = 0;
}
info->pool_ns_len = 0;
info->pool_ns_data = NULL;
if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
if (info->pool_ns_len > 0) {
ceph_decode_need(p, end, info->pool_ns_len, bad);
info->pool_ns_data = *p;
*p += info->pool_ns_len;
}
}
if (features & CEPH_FEATURE_FS_BTIME) {
ceph_decode_need(p, end, sizeof(info->btime), bad);
ceph_decode_copy(p, &info->btime, sizeof(info->btime));
ceph_decode_64_safe(p, end, info->change_attr, bad);
}
info->dir_pin = -ENODATA;
/* info->snap_btime and info->rsnaps remain zero */
}
return 0;
bad:
err = -EIO;
out_bad:
return err;
}
static int parse_reply_info_dir(void **p, void *end,
struct ceph_mds_reply_dirfrag **dirfrag,
u64 features)
{
if (features == (u64)-1) {
u8 struct_v, struct_compat;
u32 struct_len;
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
/* struct_v is expected to be >= 1. we only understand
* encoding whose struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
ceph_decode_32_safe(p, end, struct_len, bad);
ceph_decode_need(p, end, struct_len, bad);
end = *p + struct_len;
}
ceph_decode_need(p, end, sizeof(**dirfrag), bad);
*dirfrag = *p;
*p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
if (unlikely(*p > end))
goto bad;
if (features == (u64)-1)
*p = end;
return 0;
bad:
return -EIO;
}
static int parse_reply_info_lease(void **p, void *end,
struct ceph_mds_reply_lease **lease,
u64 features, u32 *altname_len, u8 **altname)
{
u8 struct_v;
u32 struct_len;
void *lend;
if (features == (u64)-1) {
u8 struct_compat;
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
/* struct_v is expected to be >= 1. we only understand
* encoding whose struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
ceph_decode_32_safe(p, end, struct_len, bad);
} else {
struct_len = sizeof(**lease);
*altname_len = 0;
*altname = NULL;
}
lend = *p + struct_len;
ceph_decode_need(p, end, struct_len, bad);
*lease = *p;
*p += sizeof(**lease);
if (features == (u64)-1) {
if (struct_v >= 2) {
ceph_decode_32_safe(p, end, *altname_len, bad);
ceph_decode_need(p, end, *altname_len, bad);
*altname = *p;
*p += *altname_len;
} else {
*altname = NULL;
*altname_len = 0;
}
}
*p = lend;
return 0;
bad:
return -EIO;
}
/*
* parse a normal reply, which may contain a (dir+)dentry and/or a
* target inode.
*/
static int parse_reply_info_trace(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
int err;
if (info->head->is_dentry) {
err = parse_reply_info_in(p, end, &info->diri, features);
if (err < 0)
goto out_bad;
err = parse_reply_info_dir(p, end, &info->dirfrag, features);
if (err < 0)
goto out_bad;
ceph_decode_32_safe(p, end, info->dname_len, bad);
ceph_decode_need(p, end, info->dname_len, bad);
info->dname = *p;
*p += info->dname_len;
err = parse_reply_info_lease(p, end, &info->dlease, features,
&info->altname_len, &info->altname);
if (err < 0)
goto out_bad;
}
if (info->head->is_target) {
err = parse_reply_info_in(p, end, &info->targeti, features);
if (err < 0)
goto out_bad;
}
if (unlikely(*p != end))
goto bad;
return 0;
bad:
err = -EIO;
out_bad:
pr_err("problem parsing mds trace %d\n", err);
return err;
}
/*
* parse readdir results
*/
static int parse_reply_info_readdir(void **p, void *end,
struct ceph_mds_request *req,
u64 features)
{
struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
struct ceph_client *cl = req->r_mdsc->fsc->client;
u32 num, i = 0;
int err;
err = parse_reply_info_dir(p, end, &info->dir_dir, features);
if (err < 0)
goto out_bad;
ceph_decode_need(p, end, sizeof(num) + 2, bad);
num = ceph_decode_32(p);
{
u16 flags = ceph_decode_16(p);
info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
}
if (num == 0)
goto done;
BUG_ON(!info->dir_entries);
if ((unsigned long)(info->dir_entries + num) >
(unsigned long)info->dir_entries + info->dir_buf_size) {
pr_err_client(cl, "dir contents are larger than expected\n");
WARN_ON(1);
goto bad;
}
info->dir_nr = num;
while (num) {
struct inode *inode = d_inode(req->r_dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
struct fscrypt_str tname = FSTR_INIT(NULL, 0);
struct fscrypt_str oname = FSTR_INIT(NULL, 0);
struct ceph_fname fname;
u32 altname_len, _name_len;
u8 *altname, *_name;
/* dentry */
ceph_decode_32_safe(p, end, _name_len, bad);
ceph_decode_need(p, end, _name_len, bad);
_name = *p;
*p += _name_len;
doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name);
if (info->hash_order)
rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
_name, _name_len);
/* dentry lease */
err = parse_reply_info_lease(p, end, &rde->lease, features,
&altname_len, &altname);
if (err)
goto out_bad;
/*
* Try to dencrypt the dentry names and update them
* in the ceph_mds_reply_dir_entry struct.
*/
fname.dir = inode;
fname.name = _name;
fname.name_len = _name_len;
fname.ctext = altname;
fname.ctext_len = altname_len;
/*
* The _name_len maybe larger than altname_len, such as
* when the human readable name length is in range of
* (CEPH_NOHASH_NAME_MAX, CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE),
* then the copy in ceph_fname_to_usr will corrupt the
* data if there has no encryption key.
*
* Just set the no_copy flag and then if there has no
* encryption key the oname.name will be assigned to
* _name always.
*/
fname.no_copy = true;
if (altname_len == 0) {
/*
* Set tname to _name, and this will be used
* to do the base64_decode in-place. It's
* safe because the decoded string should
* always be shorter, which is 3/4 of origin
* string.
*/
tname.name = _name;
/*
* Set oname to _name too, and this will be
* used to do the dencryption in-place.
*/
oname.name = _name;
oname.len = _name_len;
} else {
/*
* This will do the decryption only in-place
* from altname cryptext directly.
*/
oname.name = altname;
oname.len = altname_len;
}
rde->is_nokey = false;
err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
if (err) {
pr_err_client(cl, "unable to decode %.*s, got %d\n",
_name_len, _name, err);
goto out_bad;
}
rde->name = oname.name;
rde->name_len = oname.len;
/* inode */
err = parse_reply_info_in(p, end, &rde->inode, features);
if (err < 0)
goto out_bad;
/* ceph_readdir_prepopulate() will update it */
rde->offset = 0;
i++;
num--;
}
done:
/* Skip over any unrecognized fields */
*p = end;
return 0;
bad:
err = -EIO;
out_bad:
pr_err_client(cl, "problem parsing dir contents %d\n", err);
return err;
}
/*
* parse fcntl F_GETLK results
*/
static int parse_reply_info_filelock(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
if (*p + sizeof(*info->filelock_reply) > end)
goto bad;
info->filelock_reply = *p;
/* Skip over any unrecognized fields */
*p = end;
return 0;
bad:
return -EIO;
}
#if BITS_PER_LONG == 64
#define DELEGATED_INO_AVAILABLE xa_mk_value(1)
static int ceph_parse_deleg_inos(void **p, void *end,
struct ceph_mds_session *s)
{
struct ceph_client *cl = s->s_mdsc->fsc->client;
u32 sets;
ceph_decode_32_safe(p, end, sets, bad);
doutc(cl, "got %u sets of delegated inodes\n", sets);
while (sets--) {
u64 start, len;
ceph_decode_64_safe(p, end, start, bad);
ceph_decode_64_safe(p, end, len, bad);
/* Don't accept a delegation of system inodes */
if (start < CEPH_INO_SYSTEM_BASE) {
pr_warn_ratelimited_client(cl,
"ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
start, len);
continue;
}
while (len--) {
int err = xa_insert(&s->s_delegated_inos, start++,
DELEGATED_INO_AVAILABLE,
GFP_KERNEL);
if (!err) {
doutc(cl, "added delegated inode 0x%llx\n", start - 1);
} else if (err == -EBUSY) {
pr_warn_client(cl,
"MDS delegated inode 0x%llx more than once.\n",
start - 1);
} else {
return err;
}
}
}
return 0;
bad:
return -EIO;
}
u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
{
unsigned long ino;
void *val;
xa_for_each(&s->s_delegated_inos, ino, val) {
val = xa_erase(&s->s_delegated_inos, ino);
if (val == DELEGATED_INO_AVAILABLE)
return ino;
}
return 0;
}
int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
{
return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
GFP_KERNEL);
}
#else /* BITS_PER_LONG == 64 */
/*
* FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
* ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
* and bottom words?
*/
static int ceph_parse_deleg_inos(void **p, void *end,
struct ceph_mds_session *s)
{
u32 sets;
ceph_decode_32_safe(p, end, sets, bad);
if (sets)
ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
return 0;
bad:
return -EIO;
}
u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
{
return 0;
}
int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
{
return 0;
}
#endif /* BITS_PER_LONG == 64 */
/*
* parse create results
*/
static int parse_reply_info_create(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features, struct ceph_mds_session *s)
{
int ret;
if (features == (u64)-1 ||
(features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
if (*p == end) {
/* Malformed reply? */
info->has_create_ino = false;
} else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
info->has_create_ino = true;
/* struct_v, struct_compat, and len */
ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
ceph_decode_64_safe(p, end, info->ino, bad);
ret = ceph_parse_deleg_inos(p, end, s);
if (ret)
return ret;
} else {
/* legacy */
ceph_decode_64_safe(p, end, info->ino, bad);
info->has_create_ino = true;
}
} else {
if (*p != end)
goto bad;
}
/* Skip over any unrecognized fields */
*p = end;
return 0;
bad:
return -EIO;
}
static int parse_reply_info_getvxattr(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
u32 value_len;
ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */
ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */
ceph_decode_skip_32(p, end, bad); /* skip payload length */
ceph_decode_32_safe(p, end, value_len, bad);
if (value_len == end - *p) {
info->xattr_info.xattr_value = *p;
info->xattr_info.xattr_value_len = value_len;
*p = end;
return value_len;
}
bad:
return -EIO;
}
/*
* parse extra results
*/
static int parse_reply_info_extra(void **p, void *end,
struct ceph_mds_request *req,
u64 features, struct ceph_mds_session *s)
{
struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
u32 op = le32_to_cpu(info->head->op);
if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
return parse_reply_info_readdir(p, end, req, features);
else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features, s);
else if (op == CEPH_MDS_OP_GETVXATTR)
return parse_reply_info_getvxattr(p, end, info, features);
else
return -EIO;
}
/*
* parse entire mds reply
*/
static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
struct ceph_mds_request *req, u64 features)
{
struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
struct ceph_client *cl = s->s_mdsc->fsc->client;
void *p, *end;
u32 len;
int err;
info->head = msg->front.iov_base;
p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
/* trace */
ceph_decode_32_safe(&p, end, len, bad);
if (len > 0) {
ceph_decode_need(&p, end, len, bad);
err = parse_reply_info_trace(&p, p+len, info, features);
if (err < 0)
goto out_bad;
}
/* extra */
ceph_decode_32_safe(&p, end, len, bad);
if (len > 0) {
ceph_decode_need(&p, end, len, bad);
err = parse_reply_info_extra(&p, p+len, req, features, s);
if (err < 0)
goto out_bad;
}
/* snap blob */
ceph_decode_32_safe(&p, end, len, bad);
info->snapblob_len = len;
info->snapblob = p;
p += len;
if (p != end)
goto bad;
return 0;
bad:
err = -EIO;
out_bad:
pr_err_client(cl, "mds parse_reply err %d\n", err);
ceph_msg_dump(msg);
return err;
}
static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
{
int i;
kfree(info->diri.fscrypt_auth);
kfree(info->diri.fscrypt_file);
kfree(info->targeti.fscrypt_auth);
kfree(info->targeti.fscrypt_file);
if (!info->dir_entries)
return;
for (i = 0; i < info->dir_nr; i++) {
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
kfree(rde->inode.fscrypt_auth);
kfree(rde->inode.fscrypt_file);
}
free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
}
/*
* In async unlink case the kclient won't wait for the first reply
* from MDS and just drop all the links and unhash the dentry and then
* succeeds immediately.
*
* For any new create/link/rename,etc requests followed by using the
* same file names we must wait for the first reply of the inflight
* unlink request, or the MDS possibly will fail these following
* requests with -EEXIST if the inflight async unlink request was
* delayed for some reasons.
*
* And the worst case is that for the none async openc request it will
* successfully open the file if the CDentry hasn't been unlinked yet,
* but later the previous delayed async unlink request will remove the
* CDentry. That means the just created file is possibly deleted later
* by accident.
*
* We need to wait for the inflight async unlink requests to finish
* when creating new files/directories by using the same file names.
*/
int ceph_wait_on_conflict_unlink(struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
struct ceph_client *cl = fsc->client;
struct dentry *pdentry = dentry->d_parent;
struct dentry *udentry, *found = NULL;
struct ceph_dentry_info *di;
struct qstr dname;
u32 hash = dentry->d_name.hash;
int err;
dname.name = dentry->d_name.name;
dname.len = dentry->d_name.len;
rcu_read_lock();
hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
hnode, hash) {
udentry = di->dentry;
spin_lock(&udentry->d_lock);
if (udentry->d_name.hash != hash)
goto next;
if (unlikely(udentry->d_parent != pdentry))
goto next;
if (!hash_hashed(&di->hnode))
goto next;
if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n",
dentry, dentry);
if (!d_same_name(udentry, pdentry, &dname))
goto next;
found = dget_dlock(udentry);
spin_unlock(&udentry->d_lock);
break;
next:
spin_unlock(&udentry->d_lock);
}
rcu_read_unlock();
if (likely(!found))
return 0;
doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry,
found, found);
err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
TASK_KILLABLE);
dput(found);
return err;
}
/*
* sessions
*/
const char *ceph_session_state_name(int s)
{
switch (s) {
case CEPH_MDS_SESSION_NEW: return "new";
case CEPH_MDS_SESSION_OPENING: return "opening";
case CEPH_MDS_SESSION_OPEN: return "open";
case CEPH_MDS_SESSION_HUNG: return "hung";
case CEPH_MDS_SESSION_CLOSING: return "closing";
case CEPH_MDS_SESSION_CLOSED: return "closed";
case CEPH_MDS_SESSION_RESTARTING: return "restarting";
case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
case CEPH_MDS_SESSION_REJECTED: return "rejected";
default: return "???";
}
}
struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
{
if (refcount_inc_not_zero(&s->s_ref))
return s;
return NULL;
}
void ceph_put_mds_session(struct ceph_mds_session *s)
{
if (IS_ERR_OR_NULL(s))
return;
if (refcount_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
WARN_ON(mutex_is_locked(&s->s_mutex));
xa_destroy(&s->s_delegated_inos);
kfree(s);
}
}
/*
* called under mdsc->mutex
*/
struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
int mds)
{
if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
return NULL;
return ceph_get_mds_session(mdsc->sessions[mds]);
}
static bool __have_session(struct ceph_mds_client *mdsc, int mds)
{
if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
return false;
else
return true;
}
static int __verify_registered_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
if (s->s_mds >= mdsc->max_sessions ||
mdsc->sessions[s->s_mds] != s)
return -ENOENT;
return 0;
}
/*
* create+register a new session for given mds.
* called under mdsc->mutex.
*/
static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
int mds)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *s;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return ERR_PTR(-EIO);
if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL);
s = kzalloc(sizeof(*s), GFP_NOFS);
if (!s)
return ERR_PTR(-ENOMEM);
if (mds >= mdsc->max_sessions) {
int newmax = 1 << get_count_order(mds + 1);
struct ceph_mds_session **sa;
doutc(cl, "realloc to %d\n", newmax);
sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
if (!sa)
goto fail_realloc;
if (mdsc->sessions) {
memcpy(sa, mdsc->sessions,
mdsc->max_sessions * sizeof(void *));
kfree(mdsc->sessions);
}
mdsc->sessions = sa;
mdsc->max_sessions = newmax;
}
doutc(cl, "mds%d\n", mds);
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
mutex_init(&s->s_mutex);
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
atomic_set(&s->s_cap_gen, 1);
s->s_cap_ttl = jiffies - 1;
spin_lock_init(&s->s_cap_lock);
INIT_LIST_HEAD(&s->s_caps);
refcount_set(&s->s_ref, 1);
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
xa_init(&s->s_delegated_inos);
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
INIT_LIST_HEAD(&s->s_cap_dirty);
INIT_LIST_HEAD(&s->s_cap_flushing);
mdsc->sessions[mds] = s;
atomic_inc(&mdsc->num_sessions);
refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
return s;
fail_realloc:
kfree(s);
return ERR_PTR(-ENOMEM);
}
/*
* called under mdsc->mutex
*/
static void __unregister_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s);
BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
ceph_con_close(&s->s_con);
ceph_put_mds_session(s);
atomic_dec(&mdsc->num_sessions);
}
/*
* drop session refs in request.
*
* should be last request ref, or hold mdsc->mutex
*/
static void put_request_session(struct ceph_mds_request *req)
{
if (req->r_session) {
ceph_put_mds_session(req->r_session);
req->r_session = NULL;
}
}
void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
void (*cb)(struct ceph_mds_session *),
bool check_state)
{
int mds;
mutex_lock(&mdsc->mutex);
for (mds = 0; mds < mdsc->max_sessions; ++mds) {
struct ceph_mds_session *s;
s = __ceph_lookup_mds_session(mdsc, mds);
if (!s)
continue;
if (check_state && !check_session_state(s)) {
ceph_put_mds_session(s);
continue;
}
mutex_unlock(&mdsc->mutex);
cb(s);
ceph_put_mds_session(s);
mutex_lock(&mdsc->mutex);
}
mutex_unlock(&mdsc->mutex);
}
void ceph_mdsc_release_request(struct kref *kref)
{
struct ceph_mds_request *req = container_of(kref,
struct ceph_mds_request,
r_kref);
ceph_mdsc_release_dir_caps_async(req);
destroy_reply_info(&req->r_reply_info);
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_reply)
ceph_msg_put(req->r_reply);
if (req->r_inode) {
ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
iput(req->r_inode);
}
if (req->r_parent) {
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
iput(req->r_parent);
}
iput(req->r_target_inode);
iput(req->r_new_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
dput(req->r_old_dentry);
if (req->r_old_dentry_dir) {
/*
* track (and drop pins for) r_old_dentry_dir
* separately, since r_old_dentry's d_parent may have
* changed between the dir mutex being dropped and
* this request being freed.
*/
ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
iput(req->r_old_dentry_dir);
}
kfree(req->r_path1);
kfree(req->r_path2);
put_cred(req->r_cred);
if (req->r_mnt_idmap)
mnt_idmap_put(req->r_mnt_idmap);
if (req->r_pagelist)
ceph_pagelist_release(req->r_pagelist);
kfree(req->r_fscrypt_auth);
kfree(req->r_altname);
put_request_session(req);
ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
WARN_ON_ONCE(!list_empty(&req->r_wait));
kmem_cache_free(ceph_mds_request_cachep, req);
}
DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
/*
* lookup session, bump ref if found.
*
* called under mdsc->mutex.
*/
static struct ceph_mds_request *
lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
{
struct ceph_mds_request *req;
req = lookup_request(&mdsc->request_tree, tid);
if (req)
ceph_mdsc_get_request(req);
return req;
}
/*
* Register an in-flight request, and assign a tid. Link to directory
* are modifying (if any).
*
* Called under mdsc->mutex.
*/
static void __register_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
struct inode *dir)
{
struct ceph_client *cl = mdsc->fsc->client;
int ret = 0;
req->r_tid = ++mdsc->last_tid;
if (req->r_num_caps) {
ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
req->r_num_caps);
if (ret < 0) {
pr_err_client(cl, "%p failed to reserve caps: %d\n",
req, ret);
/* set req->r_err to fail early from __do_request */
req->r_err = ret;
return;
}
}
doutc(cl, "%p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
insert_request(&mdsc->request_tree, req);
req->r_cred = get_current_cred();
if (!req->r_mnt_idmap)
req->r_mnt_idmap = &nop_mnt_idmap;
if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
mdsc->oldest_tid = req->r_tid;
if (dir) {
struct ceph_inode_info *ci = ceph_inode(dir);
ihold(dir);
req->r_unsafe_dir = dir;
spin_lock(&ci->i_unsafe_lock);
list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
spin_unlock(&ci->i_unsafe_lock);
}
}
static void __unregister_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid);
/* Never leave an unregistered request on an unsafe list! */
list_del_init(&req->r_unsafe_item);
if (req->r_tid == mdsc->oldest_tid) {
struct rb_node *p = rb_next(&req->r_node);
mdsc->oldest_tid = 0;
while (p) {
struct ceph_mds_request *next_req =
rb_entry(p, struct ceph_mds_request, r_node);
if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
mdsc->oldest_tid = next_req->r_tid;
break;
}
p = rb_next(p);
}
}
erase_request(&mdsc->request_tree, req);
if (req->r_unsafe_dir) {
struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
spin_lock(&ci->i_unsafe_lock);
list_del_init(&req->r_unsafe_dir_item);
spin_unlock(&ci->i_unsafe_lock);
}
if (req->r_target_inode &&
test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
spin_lock(&ci->i_unsafe_lock);
list_del_init(&req->r_unsafe_target_item);
spin_unlock(&ci->i_unsafe_lock);
}
if (req->r_unsafe_dir) {
iput(req->r_unsafe_dir);
req->r_unsafe_dir = NULL;
}
complete_all(&req->r_safe_completion);
ceph_mdsc_put_request(req);
}
/*
* Walk back up the dentry tree until we hit a dentry representing a
* non-snapshot inode. We do this using the rcu_read_lock (which must be held
* when calling this) to ensure that the objects won't disappear while we're
* working with them. Once we hit a candidate dentry, we attempt to take a
* reference to it, and return that as the result.
*/
static struct inode *get_nonsnap_parent(struct dentry *dentry)
{
struct inode *inode = NULL;
while (dentry && !IS_ROOT(dentry)) {
inode = d_inode_rcu(dentry);
if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
break;
dentry = dentry->d_parent;
}
if (inode)
inode = igrab(inode);
return inode;
}
/*
* Choose mds to send request to next. If there is a hint set in the
* request (e.g., due to a prior forward hint from the mds), use that.
* Otherwise, consult frag tree and/or caps to identify the
* appropriate mds. If all else fails, choose randomly.
*
* Called under mdsc->mutex.
*/
static int __choose_mds(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
bool *random)
{
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_cap *cap;
int mode = req->r_direct_mode;
int mds = -1;
u32 hash = req->r_direct_hash;
bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
struct ceph_client *cl = mdsc->fsc->client;
if (random)
*random = false;
/*
* is there a specific mds we should try? ignore hint if we have
* no session and the mds is not up (active or recovering).
*/
if (req->r_resend_mds >= 0 &&
(__have_session(mdsc, req->r_resend_mds) ||
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds);
return req->r_resend_mds;
}
if (mode == USE_RANDOM_MDS)
goto random;
inode = NULL;
if (req->r_inode) {
if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
inode = req->r_inode;
ihold(inode);
} else {
/* req->r_dentry is non-null for LSSNAP request */
rcu_read_lock();
inode = get_nonsnap_parent(req->r_dentry);
rcu_read_unlock();
doutc(cl, "using snapdir's parent %p %llx.%llx\n",
inode, ceph_vinop(inode));
}
} else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
struct dentry *parent;
struct inode *dir;
rcu_read_lock();
parent = READ_ONCE(req->r_dentry->d_parent);
dir = req->r_parent ? : d_inode_rcu(parent);
if (!dir || dir->i_sb != mdsc->fsc->sb) {
/* not this fs or parent went negative */
inode = d_inode(req->r_dentry);
if (inode)
ihold(inode);
} else if (ceph_snap(dir) != CEPH_NOSNAP) {
/* direct snapped/virtual snapdir requests
* based on parent dir inode */
inode = get_nonsnap_parent(parent);
doutc(cl, "using nonsnap parent %p %llx.%llx\n",
inode, ceph_vinop(inode));
} else {
/* dentry target */
inode = d_inode(req->r_dentry);
if (!inode || mode == USE_AUTH_MDS) {
/* dir + name */
inode = igrab(dir);
hash = ceph_dentry_hash(dir, req->r_dentry);
is_hash = true;
} else {
ihold(inode);
}
}
rcu_read_unlock();
}
if (!inode)
goto random;
doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode,
ceph_vinop(inode), (int)is_hash, hash, mode);
ci = ceph_inode(inode);
if (is_hash && S_ISDIR(inode->i_mode)) {
struct ceph_inode_frag frag;
int found;
ceph_choose_frag(ci, hash, &frag, &found);
if (found) {
if (mode == USE_ANY_MDS && frag.ndist > 0) {
u8 r;
/* choose a random replica */
get_random_bytes(&r, 1);
r %= frag.ndist;
mds = frag.dist[r];
doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n",
inode, ceph_vinop(inode), frag.frag,
mds, (int)r, frag.ndist);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE &&
!ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
goto out;
}
/* since this file/dir wasn't known to be
* replicated, then we want to look for the
* authoritative mds. */
if (frag.mds >= 0) {
/* choose auth mds */
mds = frag.mds;
doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n",
inode, ceph_vinop(inode), frag.frag, mds);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
CEPH_MDS_STATE_ACTIVE) {
if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
mds))
goto out;
}
}
mode = USE_AUTH_MDS;
}
}
spin_lock(&ci->i_ceph_lock);
cap = NULL;
if (mode == USE_AUTH_MDS)
cap = ci->i_auth_cap;
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
if (!cap) {
spin_unlock(&ci->i_ceph_lock);
iput(inode);
goto random;
}
mds = cap->session->s_mds;
doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode,
ceph_vinop(inode), mds,
cap == ci->i_auth_cap ? "auth " : "", cap);
spin_unlock(&ci->i_ceph_lock);
out:
iput(inode);
return mds;
random:
if (random)
*random = true;
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
doutc(cl, "chose random mds%d\n", mds);
return mds;
}
/*
* session messages
*/
struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
false);
if (!msg) {
pr_err("ENOMEM creating session %s msg\n",
ceph_session_op_name(op));
return NULL;
}
h = msg->front.iov_base;
h->op = cpu_to_le32(op);
h->seq = cpu_to_le64(seq);
return msg;
}
static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
static int encode_supported_features(void **p, void *end)
{
static const size_t count = ARRAY_SIZE(feature_bits);
if (count > 0) {
size_t i;
size_t size = FEATURE_BYTES(count);
unsigned long bit;
if (WARN_ON_ONCE(*p + 4 + size > end))
return -ERANGE;
ceph_encode_32(p, size);
memset(*p, 0, size);
for (i = 0; i < count; i++) {
bit = feature_bits[i];
((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
}
*p += size;
} else {
if (WARN_ON_ONCE(*p + 4 > end))
return -ERANGE;
ceph_encode_32(p, 0);
}
return 0;
}
static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
static int encode_metric_spec(void **p, void *end)
{
static const size_t count = ARRAY_SIZE(metric_bits);
/* header */
if (WARN_ON_ONCE(*p + 2 > end))
return -ERANGE;
ceph_encode_8(p, 1); /* version */
ceph_encode_8(p, 1); /* compat */
if (count > 0) {
size_t i;
size_t size = METRIC_BYTES(count);
if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
return -ERANGE;
/* metric spec info length */
ceph_encode_32(p, 4 + size);
/* metric spec */
ceph_encode_32(p, size);
memset(*p, 0, size);
for (i = 0; i < count; i++)
((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
*p += size;
} else {
if (WARN_ON_ONCE(*p + 4 + 4 > end))
return -ERANGE;
/* metric spec info length */
ceph_encode_32(p, 4);
/* metric spec */
ceph_encode_32(p, 0);
}
return 0;
}
/*
* session message, specialization for CEPH_SESSION_REQUEST_OPEN
* to include additional client metadata fields.
*/
static struct ceph_msg *
create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
int i;
int extra_bytes = 0;
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
struct ceph_client *cl = mdsc->fsc->client;
size_t size, count;
void *p, *end;
int ret;
const char* metadata[][2] = {
{"hostname", mdsc->nodename},
{"kernel_version", init_utsname()->release},
{"entity_id", opt->name ? : ""},
{"root", fsopt->server_path ? : "/"},
{NULL, NULL}
};
/* Calculate serialized length of metadata */
extra_bytes = 4; /* map length */
for (i = 0; metadata[i][0]; ++i) {
extra_bytes += 8 + strlen(metadata[i][0]) +
strlen(metadata[i][1]);
metadata_key_count++;
}
/* supported feature */
size = 0;
count = ARRAY_SIZE(feature_bits);
if (count > 0)
size = FEATURE_BYTES(count);
extra_bytes += 4 + size;
/* metric spec */
size = 0;
count = ARRAY_SIZE(metric_bits);
if (count > 0)
size = METRIC_BYTES(count);
extra_bytes += 2 + 4 + 4 + size;
/* flags, mds auth caps and oldest_client_tid */
extra_bytes += 4 + 4 + 8;
/* Allocate the message */
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
if (!msg) {
pr_err_client(cl, "ENOMEM creating session open msg\n");
return ERR_PTR(-ENOMEM);
}
p = msg->front.iov_base;
end = p + msg->front.iov_len;
h = p;
h->op = cpu_to_le32(op);
h->seq = cpu_to_le64(seq);
/*
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
*
* ClientSession messages with metadata are v7
*/
msg->hdr.version = cpu_to_le16(7);
msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
p += sizeof(*h);
/* Number of entries in the map */
ceph_encode_32(&p, metadata_key_count);
/* Two length-prefixed strings for each entry in the map */
for (i = 0; metadata[i][0]; ++i) {
size_t const key_len = strlen(metadata[i][0]);
size_t const val_len = strlen(metadata[i][1]);
ceph_encode_32(&p, key_len);
memcpy(p, metadata[i][0], key_len);
p += key_len;
ceph_encode_32(&p, val_len);
memcpy(p, metadata[i][1], val_len);
p += val_len;
}
ret = encode_supported_features(&p, end);
if (ret) {
pr_err_client(cl, "encode_supported_features failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
ret = encode_metric_spec(&p, end);
if (ret) {
pr_err_client(cl, "encode_metric_spec failed!\n");
ceph_msg_put(msg);
return ERR_PTR(ret);
}
/* version == 5, flags */
ceph_encode_32(&p, 0);
/* version == 6, mds auth caps */
ceph_encode_32(&p, 0);
/* version == 7, oldest_client_tid */
ceph_encode_64(&p, mdsc->oldest_tid);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
return msg;
}
/*
* send session open request.
*
* called under mdsc->mutex
*/
static int __open_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_msg *msg;
int mstate;
int mds = session->s_mds;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return -EIO;
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds,
ceph_mds_state_name(mstate));
session->s_state = CEPH_MDS_SESSION_OPENING;
session->s_renew_requested = jiffies;
/* send connect message */
msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_OPEN,
session->s_seq);
if (IS_ERR(msg))
return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
return 0;
}
/*
* open sessions for any export targets for the given mds
*
* called under mdsc->mutex
*/
static struct ceph_mds_session *
__open_export_target_session(struct ceph_mds_client *mdsc, int target)
{
struct ceph_mds_session *session;
int ret;
session = __ceph_lookup_mds_session(mdsc, target);
if (!session) {
session = register_session(mdsc, target);
if (IS_ERR(session))
return session;
}
if (session->s_state == CEPH_MDS_SESSION_NEW ||
session->s_state == CEPH_MDS_SESSION_CLOSING) {
ret = __open_session(mdsc, session);
if (ret)
return ERR_PTR(ret);
}
return session;
}
struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
{
struct ceph_mds_session *session;
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "to mds%d\n", target);
mutex_lock(&mdsc->mutex);
session = __open_export_target_session(mdsc, target);
mutex_unlock(&mdsc->mutex);
return session;
}
static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_mds_info *mi;
struct ceph_mds_session *ts;
int i, mds = session->s_mds;
struct ceph_client *cl = mdsc->fsc->client;
if (mds >= mdsc->mdsmap->possible_max_rank)
return;
mi = &mdsc->mdsmap->m_info[mds];
doutc(cl, "for mds%d (%d targets)\n", session->s_mds,
mi->num_export_targets);
for (i = 0; i < mi->num_export_targets; i++) {
ts = __open_export_target_session(mdsc, mi->export_targets[i]);
ceph_put_mds_session(ts);
}
}
/*
* session caps
*/
static void detach_cap_releases(struct ceph_mds_session *session,
struct list_head *target)
{
struct ceph_client *cl = session->s_mdsc->fsc->client;
lockdep_assert_held(&session->s_cap_lock);
list_splice_init(&session->s_cap_releases, target);
session->s_num_cap_releases = 0;
doutc(cl, "mds%d\n", session->s_mds);
}
static void dispose_cap_releases(struct ceph_mds_client *mdsc,
struct list_head *dispose)
{
while (!list_empty(dispose)) {
struct ceph_cap *cap;
/* zero out the in-progress message */
cap = list_first_entry(dispose, struct ceph_cap, session_caps);
list_del(&cap->session_caps);
ceph_put_cap(mdsc, cap);
}
}
static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p;
doutc(cl, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
while (!list_empty(&session->s_unsafe)) {
req = list_first_entry(&session->s_unsafe,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n",
req->r_tid);
if (req->r_target_inode)
mapping_set_error(req->r_target_inode->i_mapping, -EIO);
if (req->r_unsafe_dir)
mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
p = rb_first(&mdsc->request_tree);
while (p) {
req = rb_entry(p, struct ceph_mds_request, r_node);
p = rb_next(p);
if (req->r_session &&
req->r_session->s_mds == session->s_mds)
req->r_attempts = 0;
}
mutex_unlock(&mdsc->mutex);
}
/*
* Helper to safely iterate over all caps associated with a session, with
* special care taken to handle a racing __ceph_remove_cap().
*
* Caller must hold session s_mutex.
*/
int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg)
{
struct ceph_client *cl = session->s_mdsc->fsc->client;
struct list_head *p;
struct ceph_cap *cap;
struct inode *inode, *last_inode = NULL;
struct ceph_cap *old_cap = NULL;
int ret;
doutc(cl, "%p mds%d\n", session, session->s_mds);
spin_lock(&session->s_cap_lock);
p = session->s_caps.next;
while (p != &session->s_caps) {
int mds;
cap = list_entry(p, struct ceph_cap, session_caps);
inode = igrab(&cap->ci->netfs.inode);
if (!inode) {
p = p->next;
continue;
}
session->s_cap_iterator = cap;
mds = cap->mds;
spin_unlock(&session->s_cap_lock);
if (last_inode) {
iput(last_inode);
last_inode = NULL;
}
if (old_cap) {
ceph_put_cap(session->s_mdsc, old_cap);
old_cap = NULL;
}
ret = cb(inode, mds, arg);
last_inode = inode;
spin_lock(&session->s_cap_lock);
p = p->next;
if (!cap->ci) {
doutc(cl, "finishing cap %p removal\n", cap);
BUG_ON(cap->session != session);
cap->session = NULL;
list_del_init(&cap->session_caps);
session->s_nr_caps--;
atomic64_dec(&session->s_mdsc->metric.total_caps);
if (cap->queue_release)
__ceph_queue_cap_release(session, cap);
else
old_cap = cap; /* put_cap it w/o locks held */
}
if (ret < 0)
goto out;
}
ret = 0;
out:
session->s_cap_iterator = NULL;
spin_unlock(&session->s_cap_lock);
iput(last_inode);
if (old_cap)
ceph_put_cap(session->s_mdsc, old_cap);
return ret;
}
static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
bool invalidate = false;
struct ceph_cap *cap;
int iputs = 0;
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (cap) {
doutc(cl, " removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->netfs.inode);
iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
}
spin_unlock(&ci->i_ceph_lock);
if (cap)
wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
while (iputs--)
iput(inode);
return 0;
}
/*
* caller must hold session s_mutex
*/
static void remove_session_caps(struct ceph_mds_session *session)
{
struct ceph_fs_client *fsc = session->s_mdsc->fsc;
struct super_block *sb = fsc->sb;
LIST_HEAD(dispose);
doutc(fsc->client, "on %p\n", session);
ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
wake_up_all(&fsc->mdsc->cap_flushing_wq);
spin_lock(&session->s_cap_lock);
if (session->s_nr_caps > 0) {
struct inode *inode;
struct ceph_cap *cap, *prev = NULL;
struct ceph_vino vino;
/*
* iterate_session_caps() skips inodes that are being
* deleted, we need to wait until deletions are complete.
* __wait_on_freeing_inode() is designed for the job,
* but it is not exported, so use lookup inode function
* to access it.
*/
while (!list_empty(&session->s_caps)) {
cap = list_entry(session->s_caps.next,
struct ceph_cap, session_caps);
if (cap == prev)
break;
prev = cap;
vino = cap->ci->i_vino;
spin_unlock(&session->s_cap_lock);
inode = ceph_find_inode(sb, vino);
iput(inode);
spin_lock(&session->s_cap_lock);
}
}
// drop cap expires and unlock s_cap_lock
detach_cap_releases(session, &dispose);
BUG_ON(session->s_nr_caps > 0);
BUG_ON(!list_empty(&session->s_cap_flushing));
spin_unlock(&session->s_cap_lock);
dispose_cap_releases(session->s_mdsc, &dispose);
}
enum {
RECONNECT,
RENEWCAPS,
FORCE_RO,
};
/*
* wake up any threads waiting on this session's caps. if the cap is
* old (didn't get renewed on the client reconnect), remove it now.
*
* caller must hold s_mutex.
*/
static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned long ev = (unsigned long)arg;
if (ev == RECONNECT) {
spin_lock(&ci->i_ceph_lock);
ci->i_wanted_max_size = 0;
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
} else if (ev == RENEWCAPS) {
struct ceph_cap *cap;
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
/* mds did not re-issue stale cap */
if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
cap->issued = cap->implemented = CEPH_CAP_PIN;
spin_unlock(&ci->i_ceph_lock);
} else if (ev == FORCE_RO) {
}
wake_up_all(&ci->i_cap_wq);
return 0;
}
static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
{
struct ceph_client *cl = session->s_mdsc->fsc->client;
doutc(cl, "session %p mds%d\n", session, session->s_mds);
ceph_iterate_session_caps(session, wake_up_session_cb,
(void *)(unsigned long)ev);
}
/*
* Send periodic message to MDS renewing all currently held caps. The
* ack will reset the expiration for all caps from this session.
*
* caller holds s_mutex
*/
static int send_renew_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
int state;
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
pr_info_client(cl, "mds%d caps stale\n", session->s_mds);
session->s_renew_requested = jiffies;
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
if (state < CEPH_MDS_STATE_RECONNECT) {
doutc(cl, "ignoring mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
return 0;
}
doutc(cl, "to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
if (IS_ERR(msg))
return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
return 0;
}
static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, u64 seq)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds,
ceph_session_state_name(session->s_state), seq);
msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 0;
}
/*
* Note new cap ttl, and any transition from stale -> not stale (fresh?).
*
* Called under session->s_mutex
*/
static void renewed_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session, int is_renew)
{
struct ceph_client *cl = mdsc->fsc->client;
int was_stale;
int wake = 0;
spin_lock(&session->s_cap_lock);
was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
session->s_cap_ttl = session->s_renew_requested +
mdsc->mdsmap->m_session_timeout*HZ;
if (was_stale) {
if (time_before(jiffies, session->s_cap_ttl)) {
pr_info_client(cl, "mds%d caps renewed\n",
session->s_mds);
wake = 1;
} else {
pr_info_client(cl, "mds%d caps still stale\n",
session->s_mds);
}
}
doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds,
session->s_cap_ttl, was_stale ? "stale" : "fresh",
time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
spin_unlock(&session->s_cap_lock);
if (wake)
wake_up_session_caps(session, RENEWCAPS);
}
/*
* send a session close request
*/
static int request_close_session(struct ceph_mds_session *session)
{
struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
doutc(cl, "mds%d state %s seq %lld\n", session->s_mds,
ceph_session_state_name(session->s_state), session->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
session->s_seq);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 1;
}
/*
* Called with s_mutex held.
*/
static int __close_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
return 0;
session->s_state = CEPH_MDS_SESSION_CLOSING;
return request_close_session(session);
}
static bool drop_negative_children(struct dentry *dentry)
{
struct dentry *child;
bool all_negative = true;
if (!d_is_dir(dentry))
goto out;
spin_lock(&dentry->d_lock);
hlist_for_each_entry(child, &dentry->d_children, d_sib) {
if (d_really_is_positive(child)) {
all_negative = false;
break;
}
}
spin_unlock(&dentry->d_lock);
if (all_negative)
shrink_dcache_parent(dentry);
out:
return all_negative;
}
/*
* Trim old(er) caps.
*
* Because we can't cache an inode without one or more caps, we do
* this indirectly: if a cap is unused, we prune its aliases, at which
* point the inode will hopefully get dropped to.
*
* Yes, this is a bit sloppy. Our only real goal here is to respond to
* memory pressure from the MDS, though, so it needn't be perfect.
*/
static int trim_caps_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
struct ceph_cap *cap;
if (*remaining <= 0)
return -1;
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
spin_unlock(&ci->i_ceph_lock);
return 0;
}
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
wanted = __ceph_caps_file_wanted(ci);
oissued = __ceph_caps_issued_other(ci, cap);
doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n",
inode, ceph_vinop(inode), cap, ceph_cap_string(mine),
ceph_cap_string(oissued), ceph_cap_string(used),
ceph_cap_string(wanted));
if (cap == ci->i_auth_cap) {
if (ci->i_dirty_caps || ci->i_flushing_caps ||
!list_empty(&ci->i_cap_snaps))
goto out;
if ((used | wanted) & CEPH_CAP_ANY_WR)
goto out;
/* Note: it's possible that i_filelock_ref becomes non-zero
* after dropping auth caps. It doesn't hurt because reply
* of lock mds request will re-add auth caps. */
if (atomic_read(&ci->i_filelock_ref) > 0)
goto out;
}
/* The inode has cached pages, but it's no longer used.
* we can safely drop it */
if (S_ISREG(inode->i_mode) &&
wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
!(oissued & CEPH_CAP_FILE_CACHE)) {
used = 0;
oissued = 0;
}
if ((used | wanted) & ~oissued & mine)
goto out; /* we need these caps */
if (oissued) {
/* we aren't the only cap.. just remove us */
ceph_remove_cap(mdsc, cap, true);
(*remaining)--;
} else {
struct dentry *dentry;
/* try dropping referring dentries */
spin_unlock(&ci->i_ceph_lock);
dentry = d_find_any_alias(inode);
if (dentry && drop_negative_children(dentry)) {
int count;
dput(dentry);
d_prune_aliases(inode);
count = atomic_read(&inode->i_count);
if (count == 1)
(*remaining)--;
doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
inode, ceph_vinop(inode), cap, count);
} else {
dput(dentry);
}
return 0;
}
out:
spin_unlock(&ci->i_ceph_lock);
return 0;
}
/*
* Trim session cap count down to some max number.
*/
int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
int max_caps)
{
struct ceph_client *cl = mdsc->fsc->client;
int trim_caps = session->s_nr_caps - max_caps;
doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds,
session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
int remaining = trim_caps;
ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
doutc(cl, "mds%d done: %d / %d, trimmed %d\n",
session->s_mds, session->s_nr_caps, max_caps,
trim_caps - remaining);
}
ceph_flush_session_cap_releases(mdsc, session);
return 0;
}
static int check_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
struct ceph_client *cl = mdsc->fsc->client;
int ret = 1;
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&mdsc->cap_flush_list)) {
struct ceph_cap_flush *cf =
list_first_entry(&mdsc->cap_flush_list,
struct ceph_cap_flush, g_list);
if (cf->tid <= want_flush_tid) {
doutc(cl, "still flushing tid %llu <= %llu\n",
cf->tid, want_flush_tid);
ret = 0;
}
}
spin_unlock(&mdsc->cap_dirty_lock);
return ret;
}
/*
* flush all dirty inode data to disk.
*
* returns true if we've flushed through want_flush_tid
*/
static void wait_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "want %llu\n", want_flush_tid);
wait_event(mdsc->cap_flushing_wq,
check_caps_flush(mdsc, want_flush_tid));
doutc(cl, "ok, flushed thru %llu\n", want_flush_tid);
}
/*
* called under s_mutex
*/
static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg = NULL;
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
struct ceph_cap *cap;
LIST_HEAD(tmp_list);
int num_cap_releases;
__le32 barrier, *cap_barrier;
down_read(&osdc->lock);
barrier = cpu_to_le32(osdc->epoch_barrier);
up_read(&osdc->lock);
spin_lock(&session->s_cap_lock);
again:
list_splice_init(&session->s_cap_releases, &tmp_list);
num_cap_releases = session->s_num_cap_releases;
session->s_num_cap_releases = 0;
spin_unlock(&session->s_cap_lock);
while (!list_empty(&tmp_list)) {
if (!msg) {
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
PAGE_SIZE, GFP_NOFS, false);
if (!msg)
goto out_err;
head = msg->front.iov_base;
head->num = cpu_to_le32(0);
msg->front.iov_len = sizeof(*head);
msg->hdr.version = cpu_to_le16(2);
msg->hdr.compat_version = cpu_to_le16(1);
}
cap = list_first_entry(&tmp_list, struct ceph_cap,
session_caps);
list_del(&cap->session_caps);
num_cap_releases--;
head = msg->front.iov_base;
put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
&head->num);
item = msg->front.iov_base + msg->front.iov_len;
item->ino = cpu_to_le64(cap->cap_ino);
item->cap_id = cpu_to_le64(cap->cap_id);
item->migrate_seq = cpu_to_le32(cap->mseq);
item->issue_seq = cpu_to_le32(cap->issue_seq);
msg->front.iov_len += sizeof(*item);
ceph_put_cap(mdsc, cap);
if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
// Append cap_barrier field
cap_barrier = msg->front.iov_base + msg->front.iov_len;
*cap_barrier = barrier;
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
msg = NULL;
}
}
BUG_ON(num_cap_releases != 0);
spin_lock(&session->s_cap_lock);
if (!list_empty(&session->s_cap_releases))
goto again;
spin_unlock(&session->s_cap_lock);
if (msg) {
// Append cap_barrier field
cap_barrier = msg->front.iov_base + msg->front.iov_len;
*cap_barrier = barrier;
msg->front.iov_len += sizeof(*cap_barrier);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
doutc(cl, "mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
}
return;
out_err:
pr_err_client(cl, "mds%d, failed to allocate message\n",
session->s_mds);
spin_lock(&session->s_cap_lock);
list_splice(&tmp_list, &session->s_cap_releases);
session->s_num_cap_releases += num_cap_releases;
spin_unlock(&session->s_cap_lock);
}
static void ceph_cap_release_work(struct work_struct *work)
{
struct ceph_mds_session *session =
container_of(work, struct ceph_mds_session, s_cap_release_work);
mutex_lock(&session->s_mutex);
if (session->s_state == CEPH_MDS_SESSION_OPEN ||
session->s_state == CEPH_MDS_SESSION_HUNG)
ceph_send_cap_releases(session->s_mdsc, session);
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
}
void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
ceph_get_mds_session(session);
if (queue_work(mdsc->fsc->cap_wq,
&session->s_cap_release_work)) {
doutc(cl, "cap release work queued\n");
} else {
ceph_put_mds_session(session);
doutc(cl, "failed to queue cap release work\n");
}
}
/*
* caller holds session->s_cap_lock
*/
void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_cap *cap)
{
list_add_tail(&cap->session_caps, &session->s_cap_releases);
session->s_num_cap_releases++;
if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
ceph_flush_session_cap_releases(session->s_mdsc, session);
}
static void ceph_cap_reclaim_work(struct work_struct *work)
{
struct ceph_mds_client *mdsc =
container_of(work, struct ceph_mds_client, cap_reclaim_work);
int ret = ceph_trim_dentries(mdsc);
if (ret == -EAGAIN)
ceph_queue_cap_reclaim_work(mdsc);
}
void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
doutc(cl, "caps reclaim work queued\n");
} else {
doutc(cl, "failed to queue caps release work\n");
}
}
void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
{
int val;
if (!nr)
return;
val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
atomic_set(&mdsc->cap_reclaim_pending, 0);
ceph_queue_cap_reclaim_work(mdsc);
}
}
void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
if (mdsc->stopping)
return;
if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
doutc(cl, "caps unlink work queued\n");
} else {
doutc(cl, "failed to queue caps unlink work\n");
}
}
static void ceph_cap_unlink_work(struct work_struct *work)
{
struct ceph_mds_client *mdsc =
container_of(work, struct ceph_mds_client, cap_unlink_work);
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "begin\n");
spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_unlink_delay_list)) {
struct ceph_inode_info *ci;
struct inode *inode;
ci = list_first_entry(&mdsc->cap_unlink_delay_list,
struct ceph_inode_info,
i_cap_delay_list);
list_del_init(&ci->i_cap_delay_list);
inode = igrab(&ci->netfs.inode);
if (inode) {
spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "on %p %llx.%llx\n", inode,
ceph_vinop(inode));
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
iput(inode);
spin_lock(&mdsc->cap_delay_lock);
}
}
spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n");
}
/*
* requests
*/
int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct inode *dir)
{
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
unsigned int num_entries;
int order;
spin_lock(&ci->i_ceph_lock);
num_entries = ci->i_files + ci->i_subdirs;
spin_unlock(&ci->i_ceph_lock);
num_entries = max(num_entries, 1U);
num_entries = min(num_entries, opt->max_readdir);
order = get_order(size * num_entries);
while (order >= 0) {
rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
__GFP_NOWARN |
__GFP_ZERO,
order);
if (rinfo->dir_entries)
break;
order--;
}
if (!rinfo->dir_entries)
return -ENOMEM;
num_entries = (PAGE_SIZE << order) / size;
num_entries = min(num_entries, opt->max_readdir);
rinfo->dir_buf_size = PAGE_SIZE << order;
req->r_num_caps = num_entries + 1;
req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
return 0;
}
/*
* Create an mds request.
*/
struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{
struct ceph_mds_request *req;
req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
if (!req)
return ERR_PTR(-ENOMEM);
mutex_init(&req->r_fill_mutex);
req->r_mdsc = mdsc;
req->r_started = jiffies;
req->r_start_latency = ktime_get();
req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
INIT_LIST_HEAD(&req->r_unsafe_target_item);
req->r_fmode = -1;
req->r_feature_needed = -1;
kref_init(&req->r_kref);
RB_CLEAR_NODE(&req->r_node);
INIT_LIST_HEAD(&req->r_wait);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
ktime_get_coarse_real_ts64(&req->r_stamp);
req->r_op = op;
req->r_direct_mode = mode;
return req;
}
/*
* return oldest (lowest) request, tid in request tree, 0 if none.
*
* called under mdsc->mutex.
*/
static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
{
if (RB_EMPTY_ROOT(&mdsc->request_tree))
return NULL;
return rb_entry(rb_first(&mdsc->request_tree),
struct ceph_mds_request, r_node);
}
static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
{
return mdsc->oldest_tid;
}
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{
struct inode *dir = req->r_parent;
struct dentry *dentry = req->r_dentry;
u8 *cryptbuf = NULL;
u32 len = 0;
int ret = 0;
/* only encode if we have parent and dentry */
if (!dir || !dentry)
goto success;
/* No-op unless this is encrypted */
if (!IS_ENCRYPTED(dir))
goto success;
ret = ceph_fscrypt_prepare_readdir(dir);
if (ret < 0)
return ERR_PTR(ret);
/* No key? Just ignore it. */
if (!fscrypt_has_encryption_key(dir))
goto success;
if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX,
&len)) {
WARN_ON_ONCE(1);
return ERR_PTR(-ENAMETOOLONG);
}
/* No need to append altname if name is short enough */
if (len <= CEPH_NOHASH_NAME_MAX) {
len = 0;
goto success;
}
cryptbuf = kmalloc(len, GFP_KERNEL);
if (!cryptbuf)
return ERR_PTR(-ENOMEM);
ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len);
if (ret) {
kfree(cryptbuf);
return ERR_PTR(ret);
}
success:
*plen = len;
return cryptbuf;
}
#else
static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
{
*plen = 0;
return NULL;
}
#endif
/**
* ceph_mdsc_build_path - build a path string to a given dentry
* @mdsc: mds client
* @dentry: dentry to which path should be built
* @plen: returned length of string
* @pbase: returned base inode number
* @for_wire: is this path going to be sent to the MDS?
*
* Build a string that represents the path to the dentry. This is mostly called
* for two different purposes:
*
* 1) we need to build a path string to send to the MDS (for_wire == true)
* 2) we need a path string for local presentation (e.g. debugfs)
* (for_wire == false)
*
* The path is built in reverse, starting with the dentry. Walk back up toward
* the root, building the path until the first non-snapped inode is reached
* (for_wire) or the root inode is reached (!for_wire).
*
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
int *plen, u64 *pbase, int for_wire)
{
struct ceph_client *cl = mdsc->fsc->client;
struct dentry *cur;
struct inode *inode;
char *path;
int pos;
unsigned seq;
u64 base;
if (!dentry)
return ERR_PTR(-EINVAL);
path = __getname();
if (!path)
return ERR_PTR(-ENOMEM);
retry:
pos = PATH_MAX - 1;
path[pos] = '\0';
seq = read_seqbegin(&rename_lock);
cur = dget(dentry);
for (;;) {
struct dentry *parent;
spin_lock(&cur->d_lock);
inode = d_inode(cur);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
spin_unlock(&cur->d_lock);
parent = dget_parent(cur);
} else if (for_wire && inode && dentry != cur &&
ceph_snap(inode) == CEPH_NOSNAP) {
spin_unlock(&cur->d_lock);
pos++; /* get rid of any prepended '/' */
break;
} else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) {
pos -= cur->d_name.len;
if (pos < 0) {
spin_unlock(&cur->d_lock);
break;
}
memcpy(path + pos, cur->d_name.name, cur->d_name.len);
spin_unlock(&cur->d_lock);
parent = dget_parent(cur);
} else {
int len, ret;
char buf[NAME_MAX];
/*
* Proactively copy name into buf, in case we need to
* present it as-is.
*/
memcpy(buf, cur->d_name.name, cur->d_name.len);
len = cur->d_name.len;
spin_unlock(&cur->d_lock);
parent = dget_parent(cur);
ret = ceph_fscrypt_prepare_readdir(d_inode(parent));
if (ret < 0) {
dput(parent);
dput(cur);
return ERR_PTR(ret);
}
if (fscrypt_has_encryption_key(d_inode(parent))) {
len = ceph_encode_encrypted_fname(d_inode(parent),
cur, buf);
if (len < 0) {
dput(parent);
dput(cur);
return ERR_PTR(len);
}
}
pos -= len;
if (pos < 0) {
dput(parent);
break;
}
memcpy(path + pos, buf, len);
}
dput(cur);
cur = parent;
/* Are we at the root? */
if (IS_ROOT(cur))
break;
/* Are we out of buffer? */
if (--pos < 0)
break;
path[pos] = '/';
}
inode = d_inode(cur);
base = inode ? ceph_ino(inode) : 0;
dput(cur);
if (read_seqretry(&rename_lock, seq))
goto retry;
if (pos < 0) {
/*
* A rename didn't occur, but somehow we didn't end up where
* we thought we would. Throw a warning and try again.
*/
pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
pos);
goto retry;
}
*pbase = base;
*plen = PATH_MAX - 1 - pos;
doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
base, *plen, path + pos);
return path + pos;
}
static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
struct inode *dir, const char **ppath, int *ppathlen,
u64 *pino, bool *pfreepath, bool parent_locked)
{
char *path;
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
!IS_ENCRYPTED(dir)) {
*pino = ceph_ino(dir);
rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
return 0;
}
rcu_read_unlock();
path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = true;
return 0;
}
static int build_inode_path(struct inode *inode,
const char **ppath, int *ppathlen, u64 *pino,
bool *pfreepath)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
*pino = ceph_ino(inode);
*ppathlen = 0;
return 0;
}
dentry = d_find_alias(inode);
path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = true;
return 0;
}
/*
* request arguments may be specified via an inode *, a dentry *, or
* an explicit ino+path.
*/
static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
struct dentry *rdentry, struct inode *rdiri,
const char *rpath, u64 rino, const char **ppath,
int *pathlen, u64 *ino, bool *freepath,
bool parent_locked)
{
struct ceph_client *cl = mdsc->fsc->client;
int r = 0;
if (rinode) {
r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
freepath, parent_locked);
doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
} else if (rpath || rino) {
*ino = rino;
*ppath = rpath;
*pathlen = rpath ? strlen(rpath) : 0;
doutc(cl, " path %.*s\n", *pathlen, rpath);
}
return r;
}
static void encode_mclientrequest_tail(void **p,
const struct ceph_mds_request *req)
{
struct ceph_timespec ts;
int i;
ceph_encode_timespec64(&ts, &req->r_stamp);
ceph_encode_copy(p, &ts, sizeof(ts));
/* v4: gid_list */
ceph_encode_32(p, req->r_cred->group_info->ngroups);
for (i = 0; i < req->r_cred->group_info->ngroups; i++)
ceph_encode_64(p, from_kgid(&init_user_ns,
req->r_cred->group_info->gid[i]));
/* v5: altname */
ceph_encode_32(p, req->r_altname_len);
ceph_encode_copy(p, req->r_altname, req->r_altname_len);
/* v6: fscrypt_auth and fscrypt_file */
if (req->r_fscrypt_auth) {
u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth);
ceph_encode_32(p, authlen);
ceph_encode_copy(p, req->r_fscrypt_auth, authlen);
} else {
ceph_encode_32(p, 0);
}
if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) {
ceph_encode_32(p, sizeof(__le64));
ceph_encode_64(p, req->r_fscrypt_file);
} else {
ceph_encode_32(p, 0);
}
}
static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
{
if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features))
return 1;
if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features))
return 2;
return CEPH_MDS_REQUEST_HEAD_VERSION;
}
static struct ceph_mds_request_head_legacy *
find_legacy_request_head(void *p, u64 features)
{
bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
struct ceph_mds_request_head_old *ohead;
if (legacy)
return (struct ceph_mds_request_head_legacy *)p;
ohead = (struct ceph_mds_request_head_old *)p;
return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid;
}
/*
* called under mdsc->mutex
*/
static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
struct ceph_mds_request *req,
bool drop_cap_releases)
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_request_head_legacy *lhead;
const char *path1 = NULL;
const char *path2 = NULL;
u64 ino1 = 0, ino2 = 0;
int pathlen1 = 0, pathlen2 = 0;
bool freepath1 = false, freepath2 = false;
struct dentry *old_dentry = NULL;
int len;
u16 releases;
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
u16 request_head_version = mds_supported_head_version(session);
kuid_t caller_fsuid = req->r_cred->fsuid;
kgid_t caller_fsgid = req->r_cred->fsgid;
ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
&path1, &pathlen1, &ino1, &freepath1,
test_bit(CEPH_MDS_R_PARENT_LOCKED,
&req->r_req_flags));
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
/* If r_old_dentry is set, then assume that its parent is locked */
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
old_dentry = req->r_old_dentry;
ret = set_request_path_attr(mdsc, NULL, old_dentry,
req->r_old_dentry_dir,
req->r_path2, req->r_ino2.ino,
&path2, &pathlen2, &ino2, &freepath2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
}
req->r_altname = get_fscrypt_altname(req, &req->r_altname_len);
if (IS_ERR(req->r_altname)) {
msg = ERR_CAST(req->r_altname);
req->r_altname = NULL;
goto out_free2;
}
/*
* For old cephs without supporting the 32bit retry/fwd feature
* it will copy the raw memories directly when decoding the
* requests. While new cephs will decode the head depending the
* version member, so we need to make sure it will be compatible
* with them both.
*/
if (legacy)
len = sizeof(struct ceph_mds_request_head_legacy);
else if (request_head_version == 1)
len = sizeof(struct ceph_mds_request_head_old);
else if (request_head_version == 2)
len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
else
len = sizeof(struct ceph_mds_request_head);
/* filepaths */
len += 2 * (1 + sizeof(u32) + sizeof(u64));
len += pathlen1 + pathlen2;
/* cap releases */
len += sizeof(struct ceph_mds_request_release) *
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
len += pathlen1;
if (req->r_old_dentry_drop)
len += pathlen2;
/* MClientRequest tail */
/* req->r_stamp */
len += sizeof(struct ceph_timespec);
/* gid list */
len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
/* alternate name */
len += sizeof(u32) + req->r_altname_len;
/* fscrypt_auth */
len += sizeof(u32); // fscrypt_auth
if (req->r_fscrypt_auth)
len += ceph_fscrypt_auth_len(req->r_fscrypt_auth);
/* fscrypt_file */
len += sizeof(u32);
if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags))
len += sizeof(__le64);
msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
msg = ERR_PTR(-ENOMEM);
goto out_free2;
}
msg->hdr.tid = cpu_to_le64(req->r_tid);
lhead = find_legacy_request_head(msg->front.iov_base,
session->s_con.peer_features);
if ((req->r_mnt_idmap != &nop_mnt_idmap) &&
!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) {
WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op));
if (enable_unsafe_idmap) {
pr_warn_once_client(cl,
"idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
" is not supported by MDS. UID/GID-based restrictions may"
" not work properly.\n");
caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
VFSUIDT_INIT(req->r_cred->fsuid));
caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
VFSGIDT_INIT(req->r_cred->fsgid));
} else {
pr_err_ratelimited_client(cl,
"idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
" is not supported by MDS. Fail request with -EIO.\n");
ret = -EIO;
goto out_err;
}
}
/*
* The ceph_mds_request_head_legacy didn't contain a version field, and
* one was added when we moved the message version from 3->4.
*/
if (legacy) {
msg->hdr.version = cpu_to_le16(3);
p = msg->front.iov_base + sizeof(*lhead);
} else if (request_head_version == 1) {
struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
ohead->version = cpu_to_le16(1);
p = msg->front.iov_base + sizeof(*ohead);
} else if (request_head_version == 2) {
struct ceph_mds_request_head *nhead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(6);
nhead->version = cpu_to_le16(2);
p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd);
} else {
struct ceph_mds_request_head *nhead = msg->front.iov_base;
kuid_t owner_fsuid;
kgid_t owner_fsgid;
msg->hdr.version = cpu_to_le16(6);
nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head));
if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) {
owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
VFSUIDT_INIT(req->r_cred->fsuid));
owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
VFSGIDT_INIT(req->r_cred->fsgid));
nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid));
nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid));
} else {
nhead->owner_uid = cpu_to_le32(-1);
nhead->owner_gid = cpu_to_le32(-1);
}
p = msg->front.iov_base + sizeof(*nhead);
}
end = msg->front.iov_base + msg->front.iov_len;
lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
lhead->op = cpu_to_le32(req->r_op);
lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
caller_fsuid));
lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
caller_fsgid));
lhead->ino = cpu_to_le64(req->r_deleg_ino);
lhead->args = req->r_args;
ceph_encode_filepath(&p, end, ino1, path1);
ceph_encode_filepath(&p, end, ino2, path2);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
/* cap releases */
releases = 0;
if (req->r_inode_drop)
releases += ceph_encode_inode_release(&p,
req->r_inode ? req->r_inode : d_inode(req->r_dentry),
mds, req->r_inode_drop, req->r_inode_unless,
req->r_op == CEPH_MDS_OP_READDIR);
if (req->r_dentry_drop) {
ret = ceph_encode_dentry_release(&p, req->r_dentry,
req->r_parent, mds, req->r_dentry_drop,
req->r_dentry_unless);
if (ret < 0)
goto out_err;
releases += ret;
}
if (req->r_old_dentry_drop) {
ret = ceph_encode_dentry_release(&p, req->r_old_dentry,
req->r_old_dentry_dir, mds,
req->r_old_dentry_drop,
req->r_old_dentry_unless);
if (ret < 0)
goto out_err;
releases += ret;
}
if (req->r_old_inode_drop)
releases += ceph_encode_inode_release(&p,
d_inode(req->r_old_dentry),
mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
if (drop_cap_releases) {
releases = 0;
p = msg->front.iov_base + req->r_request_release_offset;
}
lhead->num_releases = cpu_to_le16(releases);
encode_mclientrequest_tail(&p, req);
if (WARN_ON_ONCE(p > end)) {
ceph_msg_put(msg);
msg = ERR_PTR(-ERANGE);
goto out_free2;
}
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
if (req->r_pagelist) {
struct ceph_pagelist *pagelist = req->r_pagelist;
ceph_msg_data_add_pagelist(msg, pagelist);
msg->hdr.data_len = cpu_to_le32(pagelist->length);
} else {
msg->hdr.data_len = 0;
}
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
if (freepath2)
ceph_mdsc_free_path((char *)path2, pathlen2);
out_free1:
if (freepath1)
ceph_mdsc_free_path((char *)path1, pathlen1);
out:
return msg;
out_err:
ceph_msg_put(msg);
msg = ERR_PTR(ret);
goto out_free2;
}
/*
* called under mdsc->mutex if error, under no mutex if
* success.
*/
static void complete_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
req->r_end_latency = ktime_get();
if (req->r_callback)
req->r_callback(mdsc, req);
complete_all(&req->r_completion);
}
/*
* called under mdsc->mutex
*/
static int __prepare_send_request(struct ceph_mds_session *session,
struct ceph_mds_request *req,
bool drop_cap_releases)
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request_head_legacy *lhead;
struct ceph_mds_request_head *nhead;
struct ceph_msg *msg;
int flags = 0, old_max_retry;
bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
&session->s_features);
/*
* Avoid infinite retrying after overflow. The client will
* increase the retry count and if the MDS is old version,
* so we limit to retry at most 256 times.
*/
if (req->r_attempts) {
old_max_retry = sizeof_field(struct ceph_mds_request_head_old,
num_retry);
old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
if ((old_version && req->r_attempts >= old_max_retry) ||
((uint32_t)req->r_attempts >= U32_MAX)) {
pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n",
req->r_tid);
return -EMULTIHOP;
}
}
req->r_attempts++;
if (req->r_inode) {
struct ceph_cap *cap =
ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
if (cap)
req->r_sent_on_mseq = cap->mseq;
else
req->r_sent_on_mseq = -1;
}
doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid,
ceph_mds_op_name(req->r_op), req->r_attempts);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
void *p;
/*
* Replay. Do not regenerate message (and rebuild
* paths, etc.); just use the original message.
* Rebuilding paths will break for renames because
* d_move mangles the src name.
*/
msg = req->r_request;
lhead = find_legacy_request_head(msg->front.iov_base,
session->s_con.peer_features);
flags = le32_to_cpu(lhead->flags);
flags |= CEPH_MDS_FLAG_REPLAY;
lhead->flags = cpu_to_le32(flags);
if (req->r_target_inode)
lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
lhead->num_retry = req->r_attempts - 1;
if (!old_version) {
nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
}
/* remove cap/dentry releases from message */
lhead->num_releases = 0;
p = msg->front.iov_base + req->r_request_release_offset;
encode_mclientrequest_tail(&p, req);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
return 0;
}
if (req->r_request) {
ceph_msg_put(req->r_request);
req->r_request = NULL;
}
msg = create_request_message(session, req, drop_cap_releases);
if (IS_ERR(msg)) {
req->r_err = PTR_ERR(msg);
return PTR_ERR(msg);
}
req->r_request = msg;
lhead = find_legacy_request_head(msg->front.iov_base,
session->s_con.peer_features);
lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_REPLAY;
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_ASYNC;
if (req->r_parent)
flags |= CEPH_MDS_FLAG_WANT_DENTRY;
lhead->flags = cpu_to_le32(flags);
lhead->num_fwd = req->r_num_fwd;
lhead->num_retry = req->r_attempts - 1;
if (!old_version) {
nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd);
nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
}
doutc(cl, " r_parent = %p\n", req->r_parent);
return 0;
}
/*
* called under mdsc->mutex
*/
static int __send_request(struct ceph_mds_session *session,
struct ceph_mds_request *req,
bool drop_cap_releases)
{
int err;
err = __prepare_send_request(session, req, drop_cap_releases);
if (!err) {
ceph_msg_get(req->r_request);
ceph_con_send(&session->s_con, req->r_request);
}
return err;
}
/*
* send request, or put it on the appropriate wait list.
*/
static void __do_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session = NULL;
int mds = -1;
int err = 0;
bool random;
if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
__unregister_request(mdsc, req);
return;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
doutc(cl, "metadata corrupted\n");
err = -EIO;
goto finish;
}
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
doutc(cl, "timed out\n");
err = -ETIMEDOUT;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
doutc(cl, "forced umount\n");
err = -EIO;
goto finish;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
if (mdsc->mdsmap_err) {
err = mdsc->mdsmap_err;
doutc(cl, "mdsmap err %d\n", err);
goto finish;
}
if (mdsc->mdsmap->m_epoch == 0) {
doutc(cl, "no mdsmap, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
if (!(mdsc->fsc->mount_options->flags &
CEPH_MOUNT_OPT_MOUNTWAIT) &&
!ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
err = -EHOSTUNREACH;
goto finish;
}
}
put_request_session(req);
mds = __choose_mds(mdsc, req, &random);
if (mds < 0 ||
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
err = -EJUKEBOX;
goto finish;
}
doutc(cl, "no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
/* get, open session */
session = __ceph_lookup_mds_session(mdsc, mds);
if (!session) {
session = register_session(mdsc, mds);
if (IS_ERR(session)) {
err = PTR_ERR(session);
goto finish;
}
}
req->r_session = ceph_get_mds_session(session);
doutc(cl, "mds%d session %p state %s\n", mds, session,
ceph_session_state_name(session->s_state));
/*
* The old ceph will crash the MDSs when see unknown OPs
*/
if (req->r_feature_needed > 0 &&
!test_bit(req->r_feature_needed, &session->s_features)) {
err = -EOPNOTSUPP;
goto out_session;
}
if (session->s_state != CEPH_MDS_SESSION_OPEN &&
session->s_state != CEPH_MDS_SESSION_HUNG) {
/*
* We cannot queue async requests since the caps and delegated
* inodes are bound to the session. Just return -EJUKEBOX and
* let the caller retry a sync request in that case.
*/
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
err = -EJUKEBOX;
goto out_session;
}
/*
* If the session has been REJECTED, then return a hard error,
* unless it's a CLEANRECOVER mount, in which case we'll queue
* it to the mdsc queue.
*/
if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
list_add(&req->r_wait, &mdsc->waiting_for_map);
else
err = -EACCES;
goto out_session;
}
if (session->s_state == CEPH_MDS_SESSION_NEW ||
session->s_state == CEPH_MDS_SESSION_CLOSING) {
err = __open_session(mdsc, session);
if (err)
goto out_session;
/* retry the same mds later */
if (random)
req->r_resend_mds = mds;
}
list_add(&req->r_wait, &session->s_waiting);
goto out_session;
}
/* send request */
req->r_resend_mds = -1; /* forget any previous mds hint */
if (req->r_request_started == 0) /* note request start time */
req->r_request_started = jiffies;
/*
* For async create we will choose the auth MDS of frag in parent
* directory to send the request and usually this works fine, but
* if the migrated the dirtory to another MDS before it could handle
* it the request will be forwarded.
*
* And then the auth cap will be changed.
*/
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
struct ceph_inode_info *ci;
struct ceph_cap *cap;
/*
* The request maybe handled very fast and the new inode
* hasn't been linked to the dentry yet. We need to wait
* for the ceph_finish_async_create(), which shouldn't be
* stuck too long or fail in thoery, to finish when forwarding
* the request.
*/
if (!d_inode(req->r_dentry)) {
err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
TASK_KILLABLE);
if (err) {
mutex_lock(&req->r_fill_mutex);
set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
mutex_unlock(&req->r_fill_mutex);
goto out_session;
}
}
ci = ceph_inode(d_inode(req->r_dentry));
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
doutc(cl, "session changed for auth cap %d -> %d\n",
cap->session->s_mds, session->s_mds);
/* Remove the auth cap from old session */
spin_lock(&cap->session->s_cap_lock);
cap->session->s_nr_caps--;
list_del_init(&cap->session_caps);
spin_unlock(&cap->session->s_cap_lock);
/* Add the auth cap to the new session */
cap->mds = mds;
cap->session = session;
spin_lock(&session->s_cap_lock);
session->s_nr_caps++;
list_add_tail(&cap->session_caps, &session->s_caps);
spin_unlock(&session->s_cap_lock);
change_auth_cap_ses(ci, session);
}
spin_unlock(&ci->i_ceph_lock);
}
err = __send_request(session, req, false);
out_session:
ceph_put_mds_session(session);
finish:
if (err) {
doutc(cl, "early error %d\n", err);
req->r_err = err;
complete_request(mdsc, req);
__unregister_request(mdsc, req);
}
return;
}
/*
* called under mdsc->mutex
*/
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
LIST_HEAD(tmp_list);
list_splice_init(head, &tmp_list);
while (!list_empty(&tmp_list)) {
req = list_entry(tmp_list.next,
struct ceph_mds_request, r_wait);
list_del_init(&req->r_wait);
doutc(cl, " wake request %p tid %llu\n", req,
req->r_tid);
__do_request(mdsc, req);
}
}
/*
* Wake up threads with requests pending for @mds, so that they can
* resubmit their requests to a possibly different mds.
*/
static void kick_requests(struct ceph_mds_client *mdsc, int mds)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct rb_node *p = rb_first(&mdsc->request_tree);
doutc(cl, "kick_requests mds%d\n", mds);
while (p) {
req = rb_entry(p, struct ceph_mds_request, r_node);
p = rb_next(p);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
continue;
if (req->r_attempts > 0)
continue; /* only new requests */
if (req->r_session &&
req->r_session->s_mds == mds) {
doutc(cl, " kicking tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__do_request(mdsc, req);
}
}
}
int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
struct ceph_mds_request *req)
{
struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
if (req->r_inode)
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
if (req->r_parent) {
struct ceph_inode_info *ci = ceph_inode(req->r_parent);
int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
spin_lock(&ci->i_ceph_lock);
ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
}
if (req->r_old_dentry_dir)
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
if (req->r_inode) {
err = ceph_wait_on_async_create(req->r_inode);
if (err) {
doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
if (!err && req->r_old_inode) {
err = ceph_wait_on_async_create(req->r_old_inode);
if (err) {
doutc(cl, "wait for async create returned: %d\n", err);
return err;
}
}
doutc(cl, "submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
__do_request(mdsc, req);
err = req->r_err;
mutex_unlock(&mdsc->mutex);
return err;
}
int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
ceph_mds_request_wait_callback_t wait_func)
{
struct ceph_client *cl = mdsc->fsc->client;
int err;
/* wait */
doutc(cl, "do_request waiting\n");
if (wait_func) {
err = wait_func(mdsc, req);
} else {
long timeleft = wait_for_completion_killable_timeout(
&req->r_completion,
ceph_timeout_jiffies(req->r_timeout));
if (timeleft > 0)
err = 0;
else if (!timeleft)
err = -ETIMEDOUT; /* timed out */
else
err = timeleft; /* killed */
}
doutc(cl, "do_request waited, got %d\n", err);
mutex_lock(&mdsc->mutex);
/* only abort if we didn't race with a real reply */
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
err = le32_to_cpu(req->r_reply_info.head->result);
} else if (err < 0) {
doutc(cl, "aborted request %lld with %d\n", req->r_tid, err);
/*
* ensure we aren't running concurrently with
* ceph_fill_trace or ceph_readdir_prepopulate, which
* rely on locks (dir mutex) held by our caller.
*/
mutex_lock(&req->r_fill_mutex);
req->r_err = err;
set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
mutex_unlock(&req->r_fill_mutex);
if (req->r_parent &&
(req->r_op & CEPH_MDS_OP_WRITE))
ceph_invalidate_dir_request(req);
} else {
err = req->r_err;
}
mutex_unlock(&mdsc->mutex);
return err;
}
/*
* Synchrously perform an mds request. Take care of all of the
* session setup, forwarding, retry details.
*/
int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req)
{
struct ceph_client *cl = mdsc->fsc->client;
int err;
doutc(cl, "do_request on %p\n", req);
/* issue */
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err)
err = ceph_mdsc_wait_request(mdsc, req, NULL);
doutc(cl, "do_request %p done, result %d\n", req, err);
return err;
}
/*
* Invalidate dir's completeness, dentry lease state on an aborted MDS
* namespace request.
*/
void ceph_invalidate_dir_request(struct ceph_mds_request *req)
{
struct inode *dir = req->r_parent;
struct inode *old_dir = req->r_old_dentry_dir;
struct ceph_client *cl = req->r_mdsc->fsc->client;
doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n",
dir, old_dir);
ceph_dir_clear_complete(dir);
if (old_dir)
ceph_dir_clear_complete(old_dir);
if (req->r_dentry)
ceph_invalidate_dentry_lease(req->r_dentry);
if (req->r_old_dentry)
ceph_invalidate_dentry_lease(req->r_old_dentry);
}
/*
* Handle mds reply.
*
* We take the session mutex and parse and process the reply immediately.
* This preserves the logical ordering of replies, capabilities, etc., sent
* by the MDS as they are applied to our local cache.
*/
static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_mds_reply_head *head = msg->front.iov_base;
struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
struct ceph_snap_realm *realm;
u64 tid;
int err, result;
int mds = session->s_mds;
bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) {
pr_err_client(cl, "got corrupt (short) reply\n");
ceph_msg_dump(msg);
return;
}
/* get request, session */
tid = le64_to_cpu(msg->hdr.tid);
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
doutc(cl, "on unknown tid %llu\n", tid);
mutex_unlock(&mdsc->mutex);
return;
}
doutc(cl, "handle_reply %p\n", req);
/* correct session? */
if (req->r_session != session) {
pr_err_client(cl, "got %llu on session mds%d not mds%d\n",
tid, session->s_mds,
req->r_session ? req->r_session->s_mds : -1);
mutex_unlock(&mdsc->mutex);
goto out;
}
/* dup? */
if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
(test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n",
head->safe ? "safe" : "unsafe", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n",
tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
result = le32_to_cpu(head->result);
if (head->safe) {
set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
__unregister_request(mdsc, req);
/* last request during umount? */
if (mdsc->stopping && !__get_oldest_req(mdsc))
complete_all(&mdsc->safe_umount_waiters);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
/*
* We already handled the unsafe response, now do the
* cleanup. No need to examine the response; the MDS
* doesn't include any result info in the safe
* response. And even if it did, there is nothing
* useful we could do with a revised return value.
*/
doutc(cl, "got safe reply %llu, mds%d\n", tid, mds);
mutex_unlock(&mdsc->mutex);
goto out;
}
} else {
set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
}
doutc(cl, "tid %lld result %d\n", tid, result);
if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
err = parse_reply_info(session, msg, req, (u64)-1);
else
err = parse_reply_info(session, msg, req,
session->s_con.peer_features);
mutex_unlock(&mdsc->mutex);
/* Must find target inode outside of mutexes to avoid deadlocks */
rinfo = &req->r_reply_info;
if ((err >= 0) && rinfo->head->is_target) {
struct inode *in = xchg(&req->r_new_inode, NULL);
struct ceph_vino tvino = {
.ino = le64_to_cpu(rinfo->targeti.in->ino),
.snap = le64_to_cpu(rinfo->targeti.in->snapid)
};
/*
* If we ended up opening an existing inode, discard
* r_new_inode
*/
if (req->r_op == CEPH_MDS_OP_CREATE &&
!req->r_reply_info.has_create_ino) {
/* This should never happen on an async create */
WARN_ON_ONCE(req->r_deleg_ino);
iput(in);
in = NULL;
}
in = ceph_get_inode(mdsc->fsc->sb, tvino, in);
if (IS_ERR(in)) {
err = PTR_ERR(in);
mutex_lock(&session->s_mutex);
goto out_err;
}
req->r_target_inode = in;
}
mutex_lock(&session->s_mutex);
if (err < 0) {
pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n",
mds, tid);
ceph_msg_dump(msg);
goto out_err;
}
/* snap trace */
realm = NULL;
if (rinfo->snapblob_len) {
down_write(&mdsc->snap_rwsem);
err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
rinfo->snapblob + rinfo->snapblob_len,
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
&realm);
if (err) {
up_write(&mdsc->snap_rwsem);
close_sessions = true;
if (err == -EIO)
ceph_msg_dump(msg);
goto out_err;
}
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
}
/* insert trace into our cache */
mutex_lock(&req->r_fill_mutex);
current->journal_info = req;
err = ceph_fill_trace(mdsc->fsc->sb, req);
if (err == 0) {
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP))
err = ceph_readdir_prepopulate(req, req->r_session);
}
current->journal_info = NULL;
mutex_unlock(&req->r_fill_mutex);
up_read(&mdsc->snap_rwsem);
if (realm)
ceph_put_snap_realm(mdsc, realm);
if (err == 0) {
if (req->r_target_inode &&
test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
struct ceph_inode_info *ci =
ceph_inode(req->r_target_inode);
spin_lock(&ci->i_unsafe_lock);
list_add_tail(&req->r_unsafe_target_item,
&ci->i_unsafe_iops);
spin_unlock(&ci->i_unsafe_lock);
}
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
out_err:
mutex_lock(&mdsc->mutex);
if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
if (err) {
req->r_err = err;
} else {
req->r_reply = ceph_msg_get(msg);
set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
}
} else {
doutc(cl, "reply arrived after request %lld was aborted\n", tid);
}
mutex_unlock(&mdsc->mutex);
mutex_unlock(&session->s_mutex);
/* kick calling process */
complete_request(mdsc, req);
ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return;
}
/*
* handle mds notification that our request has been forwarded.
*/
static void handle_forward(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
u64 tid = le64_to_cpu(msg->hdr.tid);
u32 next_mds;
u32 fwd_seq;
int err = -EINVAL;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
bool aborted = false;
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
next_mds = ceph_decode_32(&p);
fwd_seq = ceph_decode_32(&p);
mutex_lock(&mdsc->mutex);
req = lookup_get_request(mdsc, tid);
if (!req) {
mutex_unlock(&mdsc->mutex);
doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds);
return; /* dup reply? */
}
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
doutc(cl, "forward tid %llu aborted, unregistering\n", tid);
__unregister_request(mdsc, req);
} else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
/*
* Avoid infinite retrying after overflow.
*
* The MDS will increase the fwd count and in client side
* if the num_fwd is less than the one saved in request
* that means the MDS is an old version and overflowed of
* 8 bits.
*/
mutex_lock(&req->r_fill_mutex);
req->r_err = -EMULTIHOP;
set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
mutex_unlock(&req->r_fill_mutex);
aborted = true;
pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n",
tid);
} else {
/* resend. forward race not possible; mds would drop */
doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds);
BUG_ON(req->r_err);
BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
req->r_attempts = 0;
req->r_num_fwd = fwd_seq;
req->r_resend_mds = next_mds;
put_request_session(req);
__do_request(mdsc, req);
}
mutex_unlock(&mdsc->mutex);
/* kick calling process */
if (aborted)
complete_request(mdsc, req);
ceph_mdsc_put_request(req);
return;
bad:
pr_err_client(cl, "decode error err=%d\n", err);
ceph_msg_dump(msg);
}
static int __decode_session_metadata(void **p, void *end,
bool *blocklisted)
{
/* map<string,string> */
u32 n;
bool err_str;
ceph_decode_32_safe(p, end, n, bad);
while (n-- > 0) {
u32 len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
err_str = !strncmp(*p, "error_string", len);
*p += len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
/*
* Match "blocklisted (blacklisted)" from newer MDSes,
* or "blacklisted" from older MDSes.
*/
if (err_str && strnstr(*p, "blacklisted", len))
*blocklisted = true;
*p += len;
}
return 0;
bad:
return -1;
}
/*
* handle a mds session control message
*/
static void handle_session(struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_client *cl = mdsc->fsc->client;
int mds = session->s_mds;
int msg_version = le16_to_cpu(msg->hdr.version);
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mds_session_head *h;
struct ceph_mds_cap_auth *cap_auths = NULL;
u32 op, cap_auths_num = 0;
u64 seq, features = 0;
int wake = 0;
bool blocklisted = false;
u32 i;
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
h = p;
p += sizeof(*h);
op = le32_to_cpu(h->op);
seq = le64_to_cpu(h->seq);
if (msg_version >= 3) {
u32 len;
/* version >= 2 and < 5, decode metadata, skip otherwise
* as it's handled via flags.
*/
if (msg_version >= 5)
ceph_decode_skip_map(&p, end, string, string, bad);
else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
goto bad;
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
if (len) {
ceph_decode_64_safe(&p, end, features, bad);
p += len - sizeof(features);
}
}
if (msg_version >= 5) {
u32 flags, len;
/* version >= 4 */
ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
ceph_decode_32_safe(&p, end, len, bad); /* len */
ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
/* version >= 5, flags */
ceph_decode_32_safe(&p, end, flags, bad);
if (flags & CEPH_SESSION_BLOCKLISTED) {
pr_warn_client(cl, "mds%d session blocklisted\n",
session->s_mds);
blocklisted = true;
}
}
if (msg_version >= 6) {
ceph_decode_32_safe(&p, end, cap_auths_num, bad);
doutc(cl, "cap_auths_num %d\n", cap_auths_num);
if (cap_auths_num && op != CEPH_SESSION_OPEN) {
WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
goto skip_cap_auths;
}
cap_auths = kcalloc(cap_auths_num,
sizeof(struct ceph_mds_cap_auth),
GFP_KERNEL);
if (!cap_auths) {
pr_err_client(cl, "No memory for cap_auths\n");
return;
}
for (i = 0; i < cap_auths_num; i++) {
u32 _len, j;
/* struct_v, struct_compat, and struct_len in MDSCapAuth */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
/* struct_v, struct_compat, and struct_len in MDSCapMatch */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.gids = kcalloc(_len, sizeof(u32),
GFP_KERNEL);
if (!cap_auths[i].match.gids) {
pr_err_client(cl, "No memory for gids\n");
goto fail;
}
cap_auths[i].match.num_gids = _len;
for (j = 0; j < _len; j++)
ceph_decode_32_safe(&p, end,
cap_auths[i].match.gids[j],
bad);
}
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
GFP_KERNEL);
if (!cap_auths[i].match.path) {
pr_err_client(cl, "No memory for path\n");
goto fail;
}
ceph_decode_copy(&p, cap_auths[i].match.path, _len);
/* Remove the tailing '/' */
while (_len && cap_auths[i].match.path[_len - 1] == '/') {
cap_auths[i].match.path[_len - 1] = '\0';
_len -= 1;
}
}
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
GFP_KERNEL);
if (!cap_auths[i].match.fs_name) {
pr_err_client(cl, "No memory for fs_name\n");
goto fail;
}
ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
}
ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
doutc(cl, "uid %lld, num_gids %u, path %s, fs_name %s, root_squash %d, readable %d, writeable %d\n",
cap_auths[i].match.uid, cap_auths[i].match.num_gids,
cap_auths[i].match.path, cap_auths[i].match.fs_name,
cap_auths[i].match.root_squash,
cap_auths[i].readable, cap_auths[i].writeable);
}
}
skip_cap_auths:
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_OPEN) {
if (mdsc->s_cap_auths) {
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
kfree(mdsc->s_cap_auths[i].match.gids);
kfree(mdsc->s_cap_auths[i].match.path);
kfree(mdsc->s_cap_auths[i].match.fs_name);
}
kfree(mdsc->s_cap_auths);
}
mdsc->s_cap_auths_num = cap_auths_num;
mdsc->s_cap_auths = cap_auths;
}
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
__unregister_session(mdsc, session);
}
/* FIXME: this ttl calculation is generous */
session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
doutc(cl, "mds%d %s %p state %s seq %llu\n", mds,
ceph_session_op_name(op), session,
ceph_session_state_name(session->s_state), seq);
if (session->s_state == CEPH_MDS_SESSION_HUNG) {
session->s_state = CEPH_MDS_SESSION_OPEN;
pr_info_client(cl, "mds%d came back\n", session->s_mds);
}
switch (op) {
case CEPH_SESSION_OPEN:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info_client(cl, "mds%d reconnect success\n",
session->s_mds);
session->s_features = features;
if (session->s_state == CEPH_MDS_SESSION_OPEN) {
pr_notice_client(cl, "mds%d is already opened\n",
session->s_mds);
} else {
session->s_state = CEPH_MDS_SESSION_OPEN;
renewed_caps(mdsc, session, 0);
if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
&session->s_features))
metric_schedule_delayed(&mdsc->metric);
}
/*
* The connection maybe broken and the session in client
* side has been reinitialized, need to update the seq
* anyway.
*/
if (!session->s_seq && seq)
session->s_seq = seq;
wake = 1;
if (mdsc->stopping)
__close_session(mdsc, session);
break;
case CEPH_SESSION_RENEWCAPS:
if (session->s_renew_seq == seq)
renewed_caps(mdsc, session, 1);
break;
case CEPH_SESSION_CLOSE:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info_client(cl, "mds%d reconnect denied\n",
session->s_mds);
session->s_state = CEPH_MDS_SESSION_CLOSED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
wake = 2; /* for good measure */
wake_up_all(&mdsc->session_close_wq);
break;
case CEPH_SESSION_STALE:
pr_info_client(cl, "mds%d caps went stale, renewing\n",
session->s_mds);
atomic_inc(&session->s_cap_gen);
session->s_cap_ttl = jiffies - 1;
send_renew_caps(mdsc, session);
break;
case CEPH_SESSION_RECALL_STATE:
ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
break;
case CEPH_SESSION_FLUSHMSG:
/* flush cap releases */
spin_lock(&session->s_cap_lock);
if (session->s_num_cap_releases)
ceph_flush_session_cap_releases(mdsc, session);
spin_unlock(&session->s_cap_lock);
send_flushmsg_ack(mdsc, session, seq);
break;
case CEPH_SESSION_FORCE_RO:
doutc(cl, "force_session_readonly %p\n", session);
spin_lock(&session->s_cap_lock);
session->s_readonly = true;
spin_unlock(&session->s_cap_lock);
wake_up_session_caps(session, FORCE_RO);
break;
case CEPH_SESSION_REJECT:
WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
pr_info_client(cl, "mds%d rejected session\n",
session->s_mds);
session->s_state = CEPH_MDS_SESSION_REJECTED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
if (blocklisted)
mdsc->fsc->blocklisted = true;
wake = 2; /* for good measure */
break;
default:
pr_err_client(cl, "bad op %d mds%d\n", op, mds);
WARN_ON(1);
}
mutex_unlock(&session->s_mutex);
if (wake) {
mutex_lock(&mdsc->mutex);
__wake_requests(mdsc, &session->s_waiting);
if (wake == 2)
kick_requests(mdsc, mds);
mutex_unlock(&mdsc->mutex);
}
if (op == CEPH_SESSION_CLOSE)
ceph_put_mds_session(session);
return;
bad:
pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
(int)msg->front.iov_len);
ceph_msg_dump(msg);
fail:
for (i = 0; i < cap_auths_num; i++) {
kfree(cap_auths[i].match.gids);
kfree(cap_auths[i].match.path);
kfree(cap_auths[i].match.fs_name);
}
kfree(cap_auths);
return;
}
void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
{
struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
}
}
void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
{
struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps);
}
}
/*
* called under session->mutex.
*/
static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_mds_request *req, *nreq;
struct rb_node *p;
doutc(mdsc->fsc->client, "mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
__send_request(session, req, true);
/*
* also re-send old requests when MDS enters reconnect stage. So that MDS
* can process completed request in clientreplay stage.
*/
p = rb_first(&mdsc->request_tree);
while (p) {
req = rb_entry(p, struct ceph_mds_request, r_node);
p = rb_next(p);
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
continue;
if (req->r_attempts == 0)
continue; /* only old requests */
if (!req->r_session)
continue;
if (req->r_session->s_mds != session->s_mds)
continue;
ceph_mdsc_release_dir_caps_async(req);
__send_request(session, req, true);
}
mutex_unlock(&mdsc->mutex);
}
static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
{
struct ceph_msg *reply;
struct ceph_pagelist *_pagelist;
struct page *page;
__le32 *addr;
int err = -ENOMEM;
if (!recon_state->allow_multi)
return -ENOSPC;
/* can't handle message that contains both caps and realm */
BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
/* pre-allocate new pagelist */
_pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!_pagelist)
return -ENOMEM;
reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
if (!reply)
goto fail_msg;
/* placeholder for nr_caps */
err = ceph_pagelist_encode_32(_pagelist, 0);
if (err < 0)
goto fail;
if (recon_state->nr_caps) {
/* currently encoding caps */
err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
if (err)
goto fail;
} else {
/* placeholder for nr_realms (currently encoding relams) */
err = ceph_pagelist_encode_32(_pagelist, 0);
if (err < 0)
goto fail;
}
err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
if (err)
goto fail;
page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
addr = kmap_atomic(page);
if (recon_state->nr_caps) {
/* currently encoding caps */
*addr = cpu_to_le32(recon_state->nr_caps);
} else {
/* currently encoding relams */
*(addr + 1) = cpu_to_le32(recon_state->nr_realms);
}
kunmap_atomic(addr);
reply->hdr.version = cpu_to_le16(5);
reply->hdr.compat_version = cpu_to_le16(4);
reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
ceph_con_send(&recon_state->session->s_con, reply);
ceph_pagelist_release(recon_state->pagelist);
recon_state->pagelist = _pagelist;
recon_state->nr_caps = 0;
recon_state->nr_realms = 0;
recon_state->msg_version = 5;
return 0;
fail:
ceph_msg_put(reply);
fail_msg:
ceph_pagelist_release(_pagelist);
return err;
}
static struct dentry* d_find_primary(struct inode *inode)
{
struct dentry *alias, *dn = NULL;
if (hlist_empty(&inode->i_dentry))
return NULL;
spin_lock(&inode->i_lock);
if (hlist_empty(&inode->i_dentry))
goto out_unlock;
if (S_ISDIR(inode->i_mode)) {
alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (!IS_ROOT(alias))
dn = dget(alias);
goto out_unlock;
}
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
spin_lock(&alias->d_lock);
if (!d_unhashed(alias) &&
(ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
dn = dget_dlock(alias);
}
spin_unlock(&alias->d_lock);
if (dn)
break;
}
out_unlock:
spin_unlock(&inode->i_lock);
return dn;
}
/*
* Encode information about a cap for a reconnect with the MDS.
*/
static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = ceph_inode_to_client(inode);
union {
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
struct ceph_cap *cap;
char *path;
int pathlen = 0, err;
u64 pathbase;
u64 snap_follows;
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
} else {
path = NULL;
pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
spin_unlock(&ci->i_ceph_lock);
err = 0;
goto out_err;
}
doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode,
ceph_vinop(inode), cap, cap->cap_id,
ceph_cap_string(cap->issued));
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
cap->mseq = 0; /* and migrate_seq */
cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
/* These are lost when the session goes away */
if (S_ISDIR(inode->i_mode)) {
if (cap->issued & CEPH_CAP_DIR_CREATE) {
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
}
cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
}
if (recon_state->msg_version >= 2) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v2.pathbase = cpu_to_le64(pathbase);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
struct timespec64 ts;
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
ts = inode_get_mtime(inode);
ceph_encode_timespec64(&rec.v1.mtime, &ts);
ts = inode_get_atime(inode);
ceph_encode_timespec64(&rec.v1.atime, &ts);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
}
if (list_empty(&ci->i_cap_snaps)) {
snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
} else {
struct ceph_cap_snap *capsnap =
list_first_entry(&ci->i_cap_snaps,
struct ceph_cap_snap, ci_item);
snap_follows = capsnap->follows;
}
spin_unlock(&ci->i_ceph_lock);
if (recon_state->msg_version >= 2) {
int num_fcntl_locks, num_flock_locks;
struct ceph_filelock *flocks = NULL;
size_t struct_len, total_len = sizeof(u64);
u8 struct_v = 0;
encode_again:
if (rec.v2.flock_len) {
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
} else {
num_fcntl_locks = 0;
num_flock_locks = 0;
}
if (num_fcntl_locks + num_flock_locks > 0) {
flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
sizeof(struct ceph_filelock),
GFP_NOFS);
if (!flocks) {
err = -ENOMEM;
goto out_err;
}
err = ceph_encode_locks_to_buffer(inode, flocks,
num_fcntl_locks,
num_flock_locks);
if (err) {
kfree(flocks);
flocks = NULL;
if (err == -ENOSPC)
goto encode_again;
goto out_err;
}
} else {
kfree(flocks);
flocks = NULL;
}
if (recon_state->msg_version >= 3) {
/* version, compat_version and struct_len */
total_len += 2 * sizeof(u8) + sizeof(u32);
struct_v = 2;
}
/*
* number of encoded locks is stable, so copy to pagelist
*/
struct_len = 2 * sizeof(u32) +
(num_fcntl_locks + num_flock_locks) *
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
total_len += struct_len;
if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
err = send_reconnect_partial(recon_state);
if (err)
goto out_freeflocks;
pagelist = recon_state->pagelist;
}
err = ceph_pagelist_reserve(pagelist, total_len);
if (err)
goto out_freeflocks;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
if (recon_state->msg_version >= 3) {
ceph_pagelist_encode_8(pagelist, struct_v);
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
if (struct_v >= 2)
ceph_pagelist_encode_64(pagelist, snap_follows);
out_freeflocks:
kfree(flocks);
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
ceph_mdsc_free_path(path, pathlen);
if (!err)
recon_state->nr_caps++;
return err;
}
static int encode_snap_realms(struct ceph_mds_client *mdsc,
struct ceph_reconnect_state *recon_state)
{
struct rb_node *p;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct ceph_client *cl = mdsc->fsc->client;
int err = 0;
if (recon_state->msg_version >= 4) {
err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
if (err < 0)
goto fail;
}
/*
* snaprealms. we provide mds with the ino, seq (version), and
* parent for all of our realms. If the mds has any newer info,
* it will tell us.
*/
for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
struct ceph_snap_realm *realm =
rb_entry(p, struct ceph_snap_realm, node);
struct ceph_mds_snaprealm_reconnect sr_rec;
if (recon_state->msg_version >= 4) {
size_t need = sizeof(u8) * 2 + sizeof(u32) +
sizeof(sr_rec);
if (pagelist->length + need > RECONNECT_MAX_SIZE) {
err = send_reconnect_partial(recon_state);
if (err)
goto fail;
pagelist = recon_state->pagelist;
}
err = ceph_pagelist_reserve(pagelist, need);
if (err)
goto fail;
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
}
doutc(cl, " adding snap realm %llx seq %lld parent %llx\n",
realm->ino, realm->seq, realm->parent_ino);
sr_rec.ino = cpu_to_le64(realm->ino);
sr_rec.seq = cpu_to_le64(realm->seq);
sr_rec.parent = cpu_to_le64(realm->parent_ino);
err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
if (err)
goto fail;
recon_state->nr_realms++;
}
fail:
return err;
}
/*
* If an MDS fails and recovers, clients need to reconnect in order to
* reestablish shared state. This includes all caps issued through
* this session _and_ the snap_realm hierarchy. Because it's not
* clear which snap realms the mds cares about, we send everything we
* know about.. that ensures we'll then get any new info the
* recovering MDS might have.
*
* This is a relatively heavyweight operation, but it's rare.
*/
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *reply;
int mds = session->s_mds;
int err = -ENOMEM;
struct ceph_reconnect_state recon_state = {
.session = session,
};
LIST_HEAD(dispose);
pr_info_client(cl, "mds%d reconnect start\n", mds);
recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!recon_state.pagelist)
goto fail_nopagelist;
reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
if (!reply)
goto fail_nomsg;
xa_destroy(&session->s_delegated_inos);
mutex_lock(&session->s_mutex);
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
session->s_seq = 0;
doutc(cl, "session %p state %s\n", session,
ceph_session_state_name(session->s_state));
atomic_inc(&session->s_cap_gen);
spin_lock(&session->s_cap_lock);
/* don't know if session is readonly */
session->s_readonly = 0;
/*
* notify __ceph_remove_cap() that we are composing cap reconnect.
* If a cap get released before being added to the cap reconnect,
* __ceph_remove_cap() should skip queuing cap release.
*/
session->s_cap_reconnect = 1;
/* drop old cap expires; we're about to reestablish that state */
detach_cap_releases(session, &dispose);
spin_unlock(&session->s_cap_lock);
dispose_cap_releases(mdsc, &dispose);
/* trim unused caps to reduce MDS's cache rejoin time */
if (mdsc->fsc->sb->s_root)
shrink_dcache_parent(mdsc->fsc->sb->s_root);
ceph_con_close(&session->s_con);
ceph_con_open(&session->s_con,
CEPH_ENTITY_TYPE_MDS, mds,
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
/* replay unsafe requests */
replay_unsafe_requests(mdsc, session);
ceph_early_kick_flushing_caps(mdsc, session);
down_read(&mdsc->snap_rwsem);
/* placeholder for nr_caps */
err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
if (err)
goto fail;
if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
recon_state.msg_version = 3;
recon_state.allow_multi = true;
} else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
recon_state.msg_version = 3;
} else {
recon_state.msg_version = 2;
}
/* traverse this session's caps */
err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
spin_lock(&session->s_cap_lock);
session->s_cap_reconnect = 0;
spin_unlock(&session->s_cap_lock);
if (err < 0)
goto fail;
/* check if all realms can be encoded into current message */
if (mdsc->num_snap_realms) {
size_t total_len =
recon_state.pagelist->length +
mdsc->num_snap_realms *
sizeof(struct ceph_mds_snaprealm_reconnect);
if (recon_state.msg_version >= 4) {
/* number of realms */
total_len += sizeof(u32);
/* version, compat_version and struct_len */
total_len += mdsc->num_snap_realms *
(2 * sizeof(u8) + sizeof(u32));
}
if (total_len > RECONNECT_MAX_SIZE) {
if (!recon_state.allow_multi) {
err = -ENOSPC;
goto fail;
}
if (recon_state.nr_caps) {
err = send_reconnect_partial(&recon_state);
if (err)
goto fail;
}
recon_state.msg_version = 5;
}
}
err = encode_snap_realms(mdsc, &recon_state);
if (err < 0)
goto fail;
if (recon_state.msg_version >= 5) {
err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
if (err < 0)
goto fail;
}
if (recon_state.nr_caps || recon_state.nr_realms) {
struct page *page =
list_first_entry(&recon_state.pagelist->head,
struct page, lru);
__le32 *addr = kmap_atomic(page);
if (recon_state.nr_caps) {
WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
*addr = cpu_to_le32(recon_state.nr_caps);
} else if (recon_state.msg_version >= 4) {
*(addr + 1) = cpu_to_le32(recon_state.nr_realms);
}
kunmap_atomic(addr);
}
reply->hdr.version = cpu_to_le16(recon_state.msg_version);
if (recon_state.msg_version >= 4)
reply->hdr.compat_version = cpu_to_le16(4);
reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
ceph_con_send(&session->s_con, reply);
mutex_unlock(&session->s_mutex);
mutex_lock(&mdsc->mutex);
__wake_requests(mdsc, &session->s_waiting);
mutex_unlock(&mdsc->mutex);
up_read(&mdsc->snap_rwsem);
ceph_pagelist_release(recon_state.pagelist);
return;
fail:
ceph_msg_put(reply);
up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
fail_nomsg:
ceph_pagelist_release(recon_state.pagelist);
fail_nopagelist:
pr_err_client(cl, "error %d preparing reconnect for mds%d\n",
err, mds);
return;
}
/*
* compare old and new mdsmaps, kicking requests
* and closing out old connections as necessary
*
* called under mdsc->mutex.
*/
static void check_new_map(struct ceph_mds_client *mdsc,
struct ceph_mdsmap *newmap,
struct ceph_mdsmap *oldmap)
{
int i, j, err;
int oldstate, newstate;
struct ceph_mds_session *s;
unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch);
if (newmap->m_info) {
for (i = 0; i < newmap->possible_max_rank; i++) {
for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
set_bit(newmap->m_info[i].export_targets[j], targets);
}
}
for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
if (!mdsc->sessions[i])
continue;
s = mdsc->sessions[i];
oldstate = ceph_mdsmap_get_state(oldmap, i);
newstate = ceph_mdsmap_get_state(newmap, i);
doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n",
i, ceph_mds_state_name(oldstate),
ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
ceph_mds_state_name(newstate),
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
ceph_session_state_name(s->s_state));
if (i >= newmap->possible_max_rank) {
/* force close session for stopped mds */
ceph_get_mds_session(s);
__unregister_session(mdsc, s);
__wake_requests(mdsc, &s->s_waiting);
mutex_unlock(&mdsc->mutex);
mutex_lock(&s->s_mutex);
cleanup_session_requests(mdsc, s);
remove_session_caps(s);
mutex_unlock(&s->s_mutex);
ceph_put_mds_session(s);
mutex_lock(&mdsc->mutex);
kick_requests(mdsc, i);
continue;
}
if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
ceph_mdsmap_get_addr(newmap, i),
sizeof(struct ceph_entity_addr))) {
/* just close it */
mutex_unlock(&mdsc->mutex);
mutex_lock(&s->s_mutex);
mutex_lock(&mdsc->mutex);
ceph_con_close(&s->s_con);
mutex_unlock(&s->s_mutex);
s->s_state = CEPH_MDS_SESSION_RESTARTING;
} else if (oldstate == newstate) {
continue; /* nothing new with this mds */
}
/*
* send reconnect?
*/
if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
newstate >= CEPH_MDS_STATE_RECONNECT) {
mutex_unlock(&mdsc->mutex);
clear_bit(i, targets);
send_mds_reconnect(mdsc, s);
mutex_lock(&mdsc->mutex);
}
/*
* kick request on any mds that has gone active.
*/
if (oldstate < CEPH_MDS_STATE_ACTIVE &&
newstate >= CEPH_MDS_STATE_ACTIVE) {
if (oldstate != CEPH_MDS_STATE_CREATING &&
oldstate != CEPH_MDS_STATE_STARTING)
pr_info_client(cl, "mds%d recovery completed\n",
s->s_mds);
kick_requests(mdsc, i);
mutex_unlock(&mdsc->mutex);
mutex_lock(&s->s_mutex);
mutex_lock(&mdsc->mutex);
ceph_kick_flushing_caps(mdsc, s);
mutex_unlock(&s->s_mutex);
wake_up_session_caps(s, RECONNECT);
}
}
/*
* Only open and reconnect sessions that don't exist yet.
*/
for (i = 0; i < newmap->possible_max_rank; i++) {
/*
* In case the import MDS is crashed just after
* the EImportStart journal is flushed, so when
* a standby MDS takes over it and is replaying
* the EImportStart journal the new MDS daemon
* will wait the client to reconnect it, but the
* client may never register/open the session yet.
*
* Will try to reconnect that MDS daemon if the
* rank number is in the export targets array and
* is the up:reconnect state.
*/
newstate = ceph_mdsmap_get_state(newmap, i);
if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
continue;
/*
* The session maybe registered and opened by some
* requests which were choosing random MDSes during
* the mdsc->mutex's unlock/lock gap below in rare
* case. But the related MDS daemon will just queue
* that requests and be still waiting for the client's
* reconnection request in up:reconnect state.
*/
s = __ceph_lookup_mds_session(mdsc, i);
if (likely(!s)) {
s = __open_export_target_session(mdsc, i);
if (IS_ERR(s)) {
err = PTR_ERR(s);
pr_err_client(cl,
"failed to open export target session, err %d\n",
err);
continue;
}
}
doutc(cl, "send reconnect to export target mds.%d\n", i);
mutex_unlock(&mdsc->mutex);
send_mds_reconnect(mdsc, s);
ceph_put_mds_session(s);
mutex_lock(&mdsc->mutex);
}
for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
s = mdsc->sessions[i];
if (!s)
continue;
if (!ceph_mdsmap_is_laggy(newmap, i))
continue;
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
s->s_state == CEPH_MDS_SESSION_HUNG ||
s->s_state == CEPH_MDS_SESSION_CLOSING) {
doutc(cl, " connecting to export targets of laggy mds%d\n", i);
__open_export_target_sessions(mdsc, s);
}
}
}
/*
* leases
*/
/*
* caller must hold session s_mutex, dentry->d_lock
*/
void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
ceph_put_mds_session(di->lease_session);
di->lease_session = NULL;
}
static void handle_lease(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
struct dentry *parent, *dentry;
struct ceph_dentry_info *di;
int mds = session->s_mds;
struct ceph_mds_lease *h = msg->front.iov_base;
u32 seq;
struct ceph_vino vino;
struct qstr dname;
int release = 0;
doutc(cl, "from mds%d\n", mds);
if (!ceph_inc_mds_stopping_blocker(mdsc, session))
return;
/* decode */
if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
goto bad;
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
seq = le32_to_cpu(h->seq);
dname.len = get_unaligned_le32(h + 1);
if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
goto bad;
dname.name = (void *)(h + 1) + sizeof(u32);
/* lookup inode */
inode = ceph_find_inode(sb, vino);
doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action),
vino.ino, inode, dname.len, dname.name);
mutex_lock(&session->s_mutex);
if (!inode) {
doutc(cl, "no inode %llx\n", vino.ino);
goto release;
}
/* dentry */
parent = d_find_alias(inode);
if (!parent) {
doutc(cl, "no parent dentry on inode %p\n", inode);
WARN_ON(1);
goto release; /* hrm... */
}
dname.hash = full_name_hash(parent, dname.name, dname.len);
dentry = d_lookup(parent, &dname);
dput(parent);
if (!dentry)
goto release;
spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
switch (h->action) {
case CEPH_MDS_LEASE_REVOKE:
if (di->lease_session == session) {
if (ceph_seq_cmp(di->lease_seq, seq) > 0)
h->seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
}
release = 1;
break;
case CEPH_MDS_LEASE_RENEW:
if (di->lease_session == session &&
di->lease_gen == atomic_read(&session->s_cap_gen) &&
di->lease_renew_from &&
di->lease_renew_after == 0) {
unsigned long duration =
msecs_to_jiffies(le32_to_cpu(h->duration_ms));
di->lease_seq = seq;
di->time = di->lease_renew_from + duration;
di->lease_renew_after = di->lease_renew_from +
(duration >> 1);
di->lease_renew_from = 0;
}
break;
}
spin_unlock(&dentry->d_lock);
dput(dentry);
if (!release)
goto out;
release:
/* let's just reuse the same message */
h->action = CEPH_MDS_LEASE_REVOKE_ACK;
ceph_msg_get(msg);
ceph_con_send(&session->s_con, msg);
out:
mutex_unlock(&session->s_mutex);
iput(inode);
ceph_dec_mds_stopping_blocker(mdsc);
return;
bad:
ceph_dec_mds_stopping_blocker(mdsc);
pr_err_client(cl, "corrupt lease message\n");
ceph_msg_dump(msg);
}
void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
u32 seq)
{
struct ceph_client *cl = session->s_mdsc->fsc->client;
struct ceph_msg *msg;
struct ceph_mds_lease *lease;
struct inode *dir;
int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action),
session->s_mds);
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
if (!msg)
return;
lease = msg->front.iov_base;
lease->action = action;
lease->seq = cpu_to_le32(seq);
spin_lock(&dentry->d_lock);
dir = d_inode(dentry->d_parent);
lease->ino = cpu_to_le64(ceph_ino(dir));
lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
put_unaligned_le32(dentry->d_name.len, lease + 1);
memcpy((void *)(lease + 1) + 4,
dentry->d_name.name, dentry->d_name.len);
spin_unlock(&dentry->d_lock);
ceph_con_send(&session->s_con, msg);
}
/*
* lock unlock the session, to wait ongoing session activities
*/
static void lock_unlock_session(struct ceph_mds_session *s)
{
mutex_lock(&s->s_mutex);
mutex_unlock(&s->s_mutex);
}
static void maybe_recover_session(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_fs_client *fsc = mdsc->fsc;
if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
return;
if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
return;
if (!READ_ONCE(fsc->blocklisted))
return;
pr_info_client(cl, "auto reconnect after blocklisted\n");
ceph_force_reconnect(fsc->sb);
}
bool check_session_state(struct ceph_mds_session *s)
{
struct ceph_client *cl = s->s_mdsc->fsc->client;
switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
s->s_state = CEPH_MDS_SESSION_HUNG;
pr_info_client(cl, "mds%d hung\n", s->s_mds);
}
break;
case CEPH_MDS_SESSION_CLOSING:
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
case CEPH_MDS_SESSION_CLOSED:
case CEPH_MDS_SESSION_REJECTED:
return false;
}
return true;
}
/*
* If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
* then we need to retransmit that request.
*/
void inc_session_sequence(struct ceph_mds_session *s)
{
struct ceph_client *cl = s->s_mdsc->fsc->client;
lockdep_assert_held(&s->s_mutex);
s->s_seq++;
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
int ret;
doutc(cl, "resending session close request for mds%d\n", s->s_mds);
ret = request_close_session(s);
if (ret < 0)
pr_err_client(cl, "unable to close session to mds%d: %d\n",
s->s_mds, ret);
}
}
/*
* delayed work -- periodically trim expired leases, renew caps with mds. If
* the @delay parameter is set to 0 or if it's more than 5 secs, the default
* workqueue delay value of 5 secs will be used.
*/
static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
{
unsigned long max_delay = HZ * 5;
/* 5 secs default delay */
if (!delay || (delay > max_delay))
delay = max_delay;
schedule_delayed_work(&mdsc->delayed_work,
round_jiffies_relative(delay));
}
static void delayed_work(struct work_struct *work)
{
struct ceph_mds_client *mdsc =
container_of(work, struct ceph_mds_client, delayed_work.work);
unsigned long delay;
int renew_interval;
int renew_caps;
int i;
doutc(mdsc->fsc->client, "mdsc delayed_work\n");
if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
return;
mutex_lock(&mdsc->mutex);
renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
renew_caps = time_after_eq(jiffies, HZ*renew_interval +
mdsc->last_renew_caps);
if (renew_caps)
mdsc->last_renew_caps = jiffies;
for (i = 0; i < mdsc->max_sessions; i++) {
struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
if (!s)
continue;
if (!check_session_state(s)) {
ceph_put_mds_session(s);
continue;
}
mutex_unlock(&mdsc->mutex);
ceph_flush_session_cap_releases(mdsc, s);
mutex_lock(&s->s_mutex);
if (renew_caps)
send_renew_caps(mdsc, s);
else
ceph_con_keepalive(&s->s_con);
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
s->s_state == CEPH_MDS_SESSION_HUNG)
ceph_send_cap_releases(mdsc, s);
mutex_unlock(&s->s_mutex);
ceph_put_mds_session(s);
mutex_lock(&mdsc->mutex);
}
mutex_unlock(&mdsc->mutex);
delay = ceph_check_delayed_caps(mdsc);
ceph_queue_cap_reclaim_work(mdsc);
ceph_trim_snapid_map(mdsc);
maybe_recover_session(mdsc);
schedule_delayed(mdsc, delay);
}
int ceph_mdsc_init(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc;
int err;
mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
if (!mdsc)
return -ENOMEM;
mdsc->fsc = fsc;
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
if (!mdsc->mdsmap) {
err = -ENOMEM;
goto err_mdsc;
}
init_completion(&mdsc->safe_umount_waiters);
spin_lock_init(&mdsc->stopping_lock);
atomic_set(&mdsc->stopping_blockers, 0);
init_completion(&mdsc->stopping_waiter);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->quotarealms_inodes = RB_ROOT;
mutex_init(&mdsc->quotarealms_inodes_mutex);
init_rwsem(&mdsc->snap_rwsem);
mdsc->snap_realms = RB_ROOT;
INIT_LIST_HEAD(&mdsc->snap_empty);
spin_lock_init(&mdsc->snap_empty_lock);
mdsc->request_tree = RB_ROOT;
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
#ifdef CONFIG_DEBUG_FS
INIT_LIST_HEAD(&mdsc->cap_wait_list);
#endif
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
INIT_LIST_HEAD(&mdsc->cap_flush_list);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
err = ceph_metric_init(&mdsc->metric);
if (err)
goto err_mdsmap;
spin_lock_init(&mdsc->dentry_list_lock);
INIT_LIST_HEAD(&mdsc->dentry_leases);
INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
ceph_caps_init(mdsc);
ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
spin_lock_init(&mdsc->snapid_map_lock);
mdsc->snapid_map_tree = RB_ROOT;
INIT_LIST_HEAD(&mdsc->snapid_map_lru);
init_rwsem(&mdsc->pool_perm_rwsem);
mdsc->pool_perm_tree = RB_ROOT;
strscpy(mdsc->nodename, utsname()->nodename,
sizeof(mdsc->nodename));
fsc->mdsc = mdsc;
return 0;
err_mdsmap:
kfree(mdsc->mdsmap);
err_mdsc:
kfree(mdsc);
return err;
}
/*
* Wait for safe replies on open mds requests. If we time out, drop
* all requests from the tree to avoid dangling dentry refs.
*/
static void wait_requests(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_options *opts = mdsc->fsc->client->options;
struct ceph_mds_request *req;
mutex_lock(&mdsc->mutex);
if (__get_oldest_req(mdsc)) {
mutex_unlock(&mdsc->mutex);
doutc(cl, "waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
ceph_timeout_jiffies(opts->mount_timeout));
/* tear down remaining requests */
mutex_lock(&mdsc->mutex);
while ((req = __get_oldest_req(mdsc))) {
doutc(cl, "timed out on tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
__unregister_request(mdsc, req);
}
}
mutex_unlock(&mdsc->mutex);
doutc(cl, "done\n");
}
void send_flush_mdlog(struct ceph_mds_session *s)
{
struct ceph_client *cl = s->s_mdsc->fsc->client;
struct ceph_msg *msg;
/*
* Pre-luminous MDS crashes when it sees an unknown session request
*/
if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
return;
mutex_lock(&s->s_mutex);
doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n",
s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
s->s_seq);
if (!msg) {
pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n",
s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
} else {
ceph_con_send(&s->s_con, msg);
}
mutex_unlock(&s->s_mutex);
}
static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
struct ceph_mds_cap_auth *auth,
const struct cred *cred,
char *tpath)
{
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_client *cl = mdsc->fsc->client;
const char *spath = mdsc->fsc->mount_options->server_path;
bool gid_matched = false;
u32 gid, tlen, len;
int i, j;
doutc(cl, "match.uid %lld\n", auth->match.uid);
if (auth->match.uid != MDS_AUTH_UID_ANY) {
if (auth->match.uid != caller_uid)
return 0;
if (auth->match.num_gids) {
for (i = 0; i < auth->match.num_gids; i++) {
if (caller_gid == auth->match.gids[i])
gid_matched = true;
}
if (!gid_matched && cred->group_info->ngroups) {
for (i = 0; i < cred->group_info->ngroups; i++) {
gid = from_kgid(&init_user_ns,
cred->group_info->gid[i]);
for (j = 0; j < auth->match.num_gids; j++) {
if (gid == auth->match.gids[j]) {
gid_matched = true;
break;
}
}
if (gid_matched)
break;
}
}
if (!gid_matched)
return 0;
}
}
/* path match */
if (auth->match.path) {
if (!tpath)
return 0;
tlen = strlen(tpath);
len = strlen(auth->match.path);
if (len) {
char *_tpath = tpath;
bool free_tpath = false;
int m, n;
doutc(cl, "server path %s, tpath %s, match.path %s\n",
spath, tpath, auth->match.path);
if (spath && (m = strlen(spath)) != 1) {
/* mount path + '/' + tpath + an extra space */
n = m + 1 + tlen + 1;
_tpath = kmalloc(n, GFP_NOFS);
if (!_tpath)
return -ENOMEM;
/* remove the leading '/' */
snprintf(_tpath, n, "%s/%s", spath + 1, tpath);
free_tpath = true;
tlen = strlen(_tpath);
}
/*
* Please note the tailing '/' for match.path has already
* been removed when parsing.
*
* Remove the tailing '/' for the target path.
*/
while (tlen && _tpath[tlen - 1] == '/') {
_tpath[tlen - 1] = '\0';
tlen -= 1;
}
doutc(cl, "_tpath %s\n", _tpath);
/*
* In case first == _tpath && tlen == len:
* match.path=/foo --> /foo _path=/foo --> match
* match.path=/foo/ --> /foo _path=/foo --> match
*
* In case first == _tmatch.path && tlen > len:
* match.path=/foo/ --> /foo _path=/foo/ --> match
* match.path=/foo --> /foo _path=/foo/ --> match
* match.path=/foo/ --> /foo _path=/foo/d --> match
* match.path=/foo --> /foo _path=/food --> mismatch
*
* All the other cases --> mismatch
*/
char *first = strstr(_tpath, auth->match.path);
if (first != _tpath) {
if (free_tpath)
kfree(_tpath);
return 0;
}
if (tlen > len && _tpath[len] != '/') {
if (free_tpath)
kfree(_tpath);
return 0;
}
}
}
doutc(cl, "matched\n");
return 1;
}
int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
{
const struct cred *cred = get_current_cred();
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_mds_cap_auth *rw_perms_s = NULL;
struct ceph_client *cl = mdsc->fsc->client;
bool root_squash_perms = true;
int i, err;
doutc(cl, "tpath '%s', mask %d, caller_uid %d, caller_gid %d\n",
tpath, mask, caller_uid, caller_gid);
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
err = ceph_mds_auth_match(mdsc, s, cred, tpath);
if (err < 0) {
put_cred(cred);
return err;
} else if (err > 0) {
/* always follow the last auth caps' permission */
root_squash_perms = true;
rw_perms_s = NULL;
if ((mask & MAY_WRITE) && s->writeable &&
s->match.root_squash && (!caller_uid || !caller_gid))
root_squash_perms = false;
if (((mask & MAY_WRITE) && !s->writeable) ||
((mask & MAY_READ) && !s->readable))
rw_perms_s = s;
}
}
put_cred(cred);
doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
rw_perms_s);
if (root_squash_perms && rw_perms_s == NULL) {
doutc(cl, "access allowed\n");
return 0;
}
if (!root_squash_perms) {
doutc(cl, "root_squash is enabled and user(%d %d) isn't allowed to write",
caller_uid, caller_gid);
}
if (rw_perms_s) {
doutc(cl, "mds auth caps readable/writeable %d/%d while request r/w %d/%d",
rw_perms_s->readable, rw_perms_s->writeable,
!!(mask & MAY_READ), !!(mask & MAY_WRITE));
}
doutc(cl, "access denied\n");
return -EACCES;
}
/*
* called before mount is ro, and before dentries are torn down.
* (hmm, does this still race with new lookups?)
*/
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
doutc(mdsc->fsc->client, "begin\n");
mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
ceph_flush_dirty_caps(mdsc);
wait_requests(mdsc);
/*
* wait for reply handlers to drop their request refs and
* their inode/dcache refs
*/
ceph_msgr_flush();
ceph_cleanup_quotarealms_inodes(mdsc);
doutc(mdsc->fsc->client, "done\n");
}
/*
* flush the mdlog and wait for all write mds requests to flush.
*/
static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
u64 want_tid)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req = NULL, *nextreq;
struct ceph_mds_session *last_session = NULL;
struct rb_node *n;
mutex_lock(&mdsc->mutex);
doutc(cl, "want %lld\n", want_tid);
restart:
req = __get_oldest_req(mdsc);
while (req && req->r_tid <= want_tid) {
/* find next request */
n = rb_next(&req->r_node);
if (n)
nextreq = rb_entry(n, struct ceph_mds_request, r_node);
else
nextreq = NULL;
if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
(req->r_op & CEPH_MDS_OP_WRITE)) {
struct ceph_mds_session *s = req->r_session;
if (!s) {
req = nextreq;
continue;
}
/* write op */
ceph_mdsc_get_request(req);
if (nextreq)
ceph_mdsc_get_request(nextreq);
s = ceph_get_mds_session(s);
mutex_unlock(&mdsc->mutex);
/* send flush mdlog request to MDS */
if (last_session != s) {
send_flush_mdlog(s);
ceph_put_mds_session(last_session);
last_session = s;
} else {
ceph_put_mds_session(s);
}
doutc(cl, "wait on %llu (want %llu)\n",
req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&mdsc->mutex);
ceph_mdsc_put_request(req);
if (!nextreq)
break; /* next dne before, so we're done! */
if (RB_EMPTY_NODE(&nextreq->r_node)) {
/* next request was removed from tree */
ceph_mdsc_put_request(nextreq);
goto restart;
}
ceph_mdsc_put_request(nextreq); /* won't go away */
}
req = nextreq;
}
mutex_unlock(&mdsc->mutex);
ceph_put_mds_session(last_session);
doutc(cl, "done\n");
}
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
u64 want_tid, want_flush;
if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
return;
doutc(cl, "sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
mutex_unlock(&mdsc->mutex);
ceph_flush_dirty_caps(mdsc);
ceph_flush_cap_releases(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
if (!list_empty(&mdsc->cap_flush_list)) {
struct ceph_cap_flush *cf =
list_last_entry(&mdsc->cap_flush_list,
struct ceph_cap_flush, g_list);
cf->wake = true;
}
spin_unlock(&mdsc->cap_dirty_lock);
doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
wait_caps_flush(mdsc, want_flush);
}
/*
* true if all sessions are closed, or we force unmount
*/
static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
{
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
return true;
return atomic_read(&mdsc->num_sessions) <= skipped;
}
/*
* called after sb is ro or when metadata corrupted.
*/
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{
struct ceph_options *opts = mdsc->fsc->client->options;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_session *session;
int i;
int skipped = 0;
doutc(cl, "begin\n");
/* close sessions */
mutex_lock(&mdsc->mutex);
for (i = 0; i < mdsc->max_sessions; i++) {
session = __ceph_lookup_mds_session(mdsc, i);
if (!session)
continue;
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
if (__close_session(mdsc, session) <= 0)
skipped++;
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
mutex_lock(&mdsc->mutex);
}
mutex_unlock(&mdsc->mutex);
doutc(cl, "waiting for sessions to close\n");
wait_event_timeout(mdsc->session_close_wq,
done_closing_sessions(mdsc, skipped),
ceph_timeout_jiffies(opts->mount_timeout));
/* tear down remaining sessions */
mutex_lock(&mdsc->mutex);
for (i = 0; i < mdsc->max_sessions; i++) {
if (mdsc->sessions[i]) {
session = ceph_get_mds_session(mdsc->sessions[i]);
__unregister_session(mdsc, session);
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
remove_session_caps(session);
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
mutex_lock(&mdsc->mutex);
}
}
WARN_ON(!list_empty(&mdsc->cap_delay_list));
mutex_unlock(&mdsc->mutex);
ceph_cleanup_snapid_map(mdsc);
ceph_cleanup_global_and_empty_realms(mdsc);
cancel_work_sync(&mdsc->cap_reclaim_work);
cancel_work_sync(&mdsc->cap_unlink_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
doutc(cl, "done\n");
}
void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
{
struct ceph_mds_session *session;
int mds;
doutc(mdsc->fsc->client, "force umount\n");
mutex_lock(&mdsc->mutex);
for (mds = 0; mds < mdsc->max_sessions; mds++) {
session = __ceph_lookup_mds_session(mdsc, mds);
if (!session)
continue;
if (session->s_state == CEPH_MDS_SESSION_REJECTED)
__unregister_session(mdsc, session);
__wake_requests(mdsc, &session->s_waiting);
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
__close_session(mdsc, session);
if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
mutex_lock(&mdsc->mutex);
kick_requests(mdsc, mds);
}
__wake_requests(mdsc, &mdsc->waiting_for_map);
mutex_unlock(&mdsc->mutex);
}
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
doutc(mdsc->fsc->client, "stop\n");
/*
* Make sure the delayed work stopped before releasing
* the resources.
*
* Because the cancel_delayed_work_sync() will only
* guarantee that the work finishes executing. But the
* delayed work will re-arm itself again after that.
*/
flush_delayed_work(&mdsc->delayed_work);
if (mdsc->mdsmap)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
ceph_caps_finalize(mdsc);
if (mdsc->s_cap_auths) {
int i;
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
kfree(mdsc->s_cap_auths[i].match.gids);
kfree(mdsc->s_cap_auths[i].match.path);
kfree(mdsc->s_cap_auths[i].match.fs_name);
}
kfree(mdsc->s_cap_auths);
}
ceph_pool_perm_destroy(mdsc);
}
void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
doutc(fsc->client, "%p\n", mdsc);
if (!mdsc)
return;
/* flush out any connection work with references to us */
ceph_msgr_flush();
ceph_mdsc_stop(mdsc);
ceph_metric_destroy(&mdsc->metric);
fsc->mdsc = NULL;
kfree(mdsc);
doutc(fsc->client, "%p done\n", mdsc);
}
void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
struct ceph_fs_client *fsc = mdsc->fsc;
struct ceph_client *cl = fsc->client;
const char *mds_namespace = fsc->mount_options->mds_namespace;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
u32 epoch;
u32 num_fs;
u32 mount_fscid = (u32)-1;
int err = -EINVAL;
ceph_decode_need(&p, end, sizeof(u32), bad);
epoch = ceph_decode_32(&p);
doutc(cl, "epoch %u\n", epoch);
/* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
ceph_decode_32_safe(&p, end, num_fs, bad);
while (num_fs-- > 0) {
void *info_p, *info_end;
u32 info_len;
u32 fscid, namelen;
ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
p += 2; // info_v, info_cv
info_len = ceph_decode_32(&p);
ceph_decode_need(&p, end, info_len, bad);
info_p = p;
info_end = p + info_len;
p = info_end;
ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
fscid = ceph_decode_32(&info_p);
namelen = ceph_decode_32(&info_p);
ceph_decode_need(&info_p, info_end, namelen, bad);
if (mds_namespace &&
strlen(mds_namespace) == namelen &&
!strncmp(mds_namespace, (char *)info_p, namelen)) {
mount_fscid = fscid;
break;
}
}
ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
if (mount_fscid != (u32)-1) {
fsc->client->monc.fs_cluster_id = mount_fscid;
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
0, true);
ceph_monc_renew_subs(&fsc->client->monc);
} else {
err = -ENOENT;
goto err_out;
}
return;
bad:
pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n",
err);
ceph_umount_begin(mdsc->fsc->sb);
ceph_msg_dump(msg);
err_out:
mutex_lock(&mdsc->mutex);
mdsc->mdsmap_err = err;
__wake_requests(mdsc, &mdsc->waiting_for_map);
mutex_unlock(&mdsc->mutex);
}
/*
* handle mds map update.
*/
void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
struct ceph_client *cl = mdsc->fsc->client;
u32 epoch;
u32 maplen;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mdsmap *newmap, *oldmap;
struct ceph_fsid fsid;
int err = -EINVAL;
ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
ceph_decode_copy(&p, &fsid, sizeof(fsid));
if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
return;
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
doutc(cl, "epoch %u len %d\n", epoch, (int)maplen);
/* do we need it? */
mutex_lock(&mdsc->mutex);
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
return;
}
newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad_unlock;
}
/* swap into place */
if (mdsc->mdsmap) {
oldmap = mdsc->mdsmap;
mdsc->mdsmap = newmap;
check_new_map(mdsc, newmap, oldmap);
ceph_mdsmap_destroy(oldmap);
} else {
mdsc->mdsmap = newmap; /* first mds map */
}
mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
MAX_LFS_FILESIZE);
__wake_requests(mdsc, &mdsc->waiting_for_map);
ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
mdsc->mdsmap->m_epoch);
mutex_unlock(&mdsc->mutex);
schedule_delayed(mdsc, 0);
return;
bad_unlock:
mutex_unlock(&mdsc->mutex);
bad:
pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n",
err);
ceph_umount_begin(mdsc->fsc->sb);
ceph_msg_dump(msg);
return;
}
static struct ceph_connection *mds_get_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
if (ceph_get_mds_session(s))
return con;
return NULL;
}
static void mds_put_con(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
ceph_put_mds_session(s);
}
/*
* if the client is unresponsive for long enough, the mds will kill
* the session entirely.
*/
static void mds_peer_reset(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
s->s_mds);
if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO &&
ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT)
send_mds_reconnect(mdsc, s);
}
static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_client *cl = mdsc->fsc->client;
int type = le16_to_cpu(msg->hdr.type);
mutex_lock(&mdsc->mutex);
if (__verify_registered_session(mdsc, s) < 0) {
mutex_unlock(&mdsc->mutex);
goto out;
}
mutex_unlock(&mdsc->mutex);
switch (type) {
case CEPH_MSG_MDS_MAP:
ceph_mdsc_handle_mdsmap(mdsc, msg);
break;
case CEPH_MSG_FS_MAP_USER:
ceph_mdsc_handle_fsmap(mdsc, msg);
break;
case CEPH_MSG_CLIENT_SESSION:
handle_session(s, msg);
break;
case CEPH_MSG_CLIENT_REPLY:
handle_reply(s, msg);
break;
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
handle_forward(mdsc, s, msg);
break;
case CEPH_MSG_CLIENT_CAPS:
ceph_handle_caps(s, msg);
break;
case CEPH_MSG_CLIENT_SNAP:
ceph_handle_snap(mdsc, s, msg);
break;
case CEPH_MSG_CLIENT_LEASE:
handle_lease(mdsc, s, msg);
break;
case CEPH_MSG_CLIENT_QUOTA:
ceph_handle_quota(mdsc, s, msg);
break;
default:
pr_err_client(cl, "received unknown message type %d %s\n",
type, ceph_msg_type_name(type));
}
out:
ceph_msg_put(msg);
}
/*
* authentication
*/
/*
* Note: returned pointer is the address of a structure that's
* managed separately. Caller must *not* attempt to free it.
*/
static struct ceph_auth_handshake *
mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
struct ceph_auth_handshake *auth = &s->s_auth;
int ret;
ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
force_new, proto, NULL, NULL);
if (ret)
return ERR_PTR(ret);
return auth;
}
static int mds_add_authorizer_challenge(struct ceph_connection *con,
void *challenge_buf, int challenge_buf_len)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
challenge_buf, challenge_buf_len);
}
static int mds_verify_authorizer_reply(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
struct ceph_auth_handshake *auth = &s->s_auth;
return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
NULL, NULL, NULL, NULL);
}
static int mds_invalidate_authorizer(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
}
static int mds_get_auth_request(struct ceph_connection *con,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_mds_session *s = con->private;
struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
struct ceph_auth_handshake *auth = &s->s_auth;
int ret;
ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
buf, buf_len);
if (ret)
return ret;
*authorizer = auth->authorizer_buf;
*authorizer_len = auth->authorizer_buf_len;
return 0;
}
static int mds_handle_auth_reply_more(struct ceph_connection *con,
void *reply, int reply_len,
void *buf, int *buf_len,
void **authorizer, int *authorizer_len)
{
struct ceph_mds_session *s = con->private;
struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
struct ceph_auth_handshake *auth = &s->s_auth;
int ret;
ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
buf, buf_len);
if (ret)
return ret;
*authorizer = auth->authorizer_buf;
*authorizer_len = auth->authorizer_buf_len;
return 0;
}
static int mds_handle_auth_done(struct ceph_connection *con,
u64 global_id, void *reply, int reply_len,
u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len)
{
struct ceph_mds_session *s = con->private;
struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
struct ceph_auth_handshake *auth = &s->s_auth;
return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
session_key, session_key_len,
con_secret, con_secret_len);
}
static int mds_handle_auth_bad_method(struct ceph_connection *con,
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt)
{
struct ceph_mds_session *s = con->private;
struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
int ret;
if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
used_proto, result,
allowed_protos, proto_cnt,
allowed_modes, mode_cnt)) {
ret = ceph_monc_validate_auth(monc);
if (ret)
return ret;
}
return -EACCES;
}
static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
struct ceph_msg_header *hdr, int *skip)
{
struct ceph_msg *msg;
int type = (int) le16_to_cpu(hdr->type);
int front_len = (int) le32_to_cpu(hdr->front_len);
if (con->in_msg)
return con->in_msg;
*skip = 0;
msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
if (!msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
return NULL;
}
return msg;
}
static int mds_sign_message(struct ceph_msg *msg)
{
struct ceph_mds_session *s = msg->con->private;
struct ceph_auth_handshake *auth = &s->s_auth;
return ceph_auth_sign_message(auth, msg);
}
static int mds_check_message_signature(struct ceph_msg *msg)
{
struct ceph_mds_session *s = msg->con->private;
struct ceph_auth_handshake *auth = &s->s_auth;
return ceph_auth_check_message_signature(auth, msg);
}
static const struct ceph_connection_operations mds_con_ops = {
.get = mds_get_con,
.put = mds_put_con,
.alloc_msg = mds_alloc_msg,
.dispatch = mds_dispatch,
.peer_reset = mds_peer_reset,
.get_authorizer = mds_get_authorizer,
.add_authorizer_challenge = mds_add_authorizer_challenge,
.verify_authorizer_reply = mds_verify_authorizer_reply,
.invalidate_authorizer = mds_invalidate_authorizer,
.sign_message = mds_sign_message,
.check_message_signature = mds_check_message_signature,
.get_auth_request = mds_get_auth_request,
.handle_auth_reply_more = mds_handle_auth_reply_more,
.handle_auth_done = mds_handle_auth_done,
.handle_auth_bad_method = mds_handle_auth_bad_method,
};
/* eof */
|
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
#include <dt-bindings/iio/qcom,spmi-vadc.h>
#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP)
#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB)
#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB)
#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP)
#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB)
#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB)
#endif
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_HEADER_H
#define __PERF_HEADER_H
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <sys/types.h>
#include <stdio.h> // FILE
#include <stdbool.h>
#include <linux/bitmap.h>
#include <linux/types.h>
#include "env.h"
#include <perf/cpumap.h>
struct evlist;
union perf_event;
struct perf_header;
struct perf_session;
struct perf_tool;
enum {
HEADER_RESERVED = 0, /* always cleared */
HEADER_FIRST_FEATURE = 1,
HEADER_TRACING_DATA = 1,
HEADER_BUILD_ID,
HEADER_HOSTNAME,
HEADER_OSRELEASE,
HEADER_VERSION,
HEADER_ARCH,
HEADER_NRCPUS,
HEADER_CPUDESC,
HEADER_CPUID,
HEADER_TOTAL_MEM,
HEADER_CMDLINE,
HEADER_EVENT_DESC,
HEADER_CPU_TOPOLOGY,
HEADER_NUMA_TOPOLOGY,
HEADER_BRANCH_STACK,
HEADER_PMU_MAPPINGS,
HEADER_GROUP_DESC,
HEADER_AUXTRACE,
HEADER_STAT,
HEADER_CACHE,
HEADER_SAMPLE_TIME,
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
HEADER_DIR_FORMAT,
HEADER_BPF_PROG_INFO,
HEADER_BPF_BTF,
HEADER_COMPRESSED,
HEADER_CPU_PMU_CAPS,
HEADER_CLOCK_DATA,
HEADER_HYBRID_TOPOLOGY,
HEADER_PMU_CAPS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
enum perf_header_version {
PERF_HEADER_VERSION_1,
PERF_HEADER_VERSION_2,
};
struct perf_file_section {
u64 offset;
u64 size;
};
/**
* struct perf_file_header: Header representation on disk.
*/
struct perf_file_header {
/** @magic: Holds "PERFILE2". */
u64 magic;
/** @size: Size of this header - sizeof(struct perf_file_header). */
u64 size;
/**
* @attr_size: Size of attrs entries - sizeof(struct perf_event_attr) +
* sizeof(struct perf_file_section).
*/
u64 attr_size;
/** @attrs: Offset and size of file section holding attributes. */
struct perf_file_section attrs;
/** @data: Offset and size of file section holding regular event data. */
struct perf_file_section data;
/** @event_types: Ignored. */
struct perf_file_section event_types;
/**
* @adds_features: Bitmap of features. The features are immediately after the data section.
*/
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
struct perf_pipe_file_header {
u64 magic;
u64 size;
};
int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd);
struct perf_header {
enum perf_header_version version;
bool needs_swap;
u64 data_offset;
u64 data_size;
u64 feat_offset;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
struct perf_env env;
};
struct feat_fd {
struct perf_header *ph;
int fd;
void *buf; /* Either buf != NULL or fd >= 0 */
ssize_t offset;
size_t size;
struct evsel *events;
};
struct perf_header_feature_ops {
int (*write)(struct feat_fd *ff, struct evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
int (*process)(struct feat_fd *ff, void *data);
const char *name;
bool full_only;
bool synthesize;
};
extern const char perf_version_string[];
int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
/* feat_writer writes a feature section to output */
struct feat_writer {
int (*write)(struct feat_writer *fw, void *buf, size_t sz);
};
/* feat_copier copies a feature section using feat_writer to output */
struct feat_copier {
int (*copy)(struct feat_copier *fc, int feat, struct feat_writer *fw);
};
int perf_session__inject_header(struct perf_session *session,
struct evlist *evlist,
int fd,
struct feat_copier *fc,
bool write_attrs_after_data);
size_t perf_session__data_offset(const struct evlist *evlist);
void perf_header__set_feat(struct perf_header *header, int feat);
void perf_header__clear_feat(struct perf_header *header, int feat);
bool perf_header__has_feat(const struct perf_header *header, int feat);
int perf_header__set_cmdline(int argc, const char **argv);
int perf_header__process_sections(struct perf_header *header, int fd,
void *data,
int (*process)(struct perf_file_section *section,
struct perf_header *ph,
int feat, int fd, void *data));
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
int perf_event__process_feature(struct perf_session *session,
union perf_event *event);
int perf_event__process_attr(const struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist);
int perf_event__process_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
#ifdef HAVE_LIBTRACEEVENT
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event);
#endif
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event);
bool is_perf_magic(u64 magic);
#define NAME_ALIGN 64
struct feat_fd;
int do_write(struct feat_fd *fd, const void *buf, size_t size);
int write_padded(struct feat_fd *fd, const void *bf,
size_t count, size_t count_aligned);
#define MAX_CACHE_LVL 4
int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp);
/*
* arch specific callback
*/
int get_cpuid(char *buffer, size_t sz, struct perf_cpu cpu);
char *get_cpuid_str(struct perf_cpu cpu);
char *get_cpuid_allow_env_override(struct perf_cpu cpu);
int strcmp_cpuid_str(const char *s1, const char *s2);
#endif /* __PERF_HEADER_H */
|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#ifndef _smuio_13_0_2_OFFSET_HEADER
#define _smuio_13_0_2_OFFSET_HEADER
// addressBlock: smuio_smuio_SmuSmuioDec
// base address: 0x5a000
#define regSMUSVI0_TEL_PLANE0 0x0004
#define regSMUSVI0_TEL_PLANE0_BASE_IDX 0
#define regSMUSVI0_PLANE0_CURRENTVID 0x0014
#define regSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
#define regSMUIO_MCM_CONFIG 0x0024
#define regSMUIO_MCM_CONFIG_BASE_IDX 0
#define regCKSVII2C_IC_CON 0x0040
#define regCKSVII2C_IC_CON_BASE_IDX 0
#define regCKSVII2C_IC_TAR 0x0041
#define regCKSVII2C_IC_TAR_BASE_IDX 0
#define regCKSVII2C_IC_SAR 0x0042
#define regCKSVII2C_IC_SAR_BASE_IDX 0
#define regCKSVII2C_IC_HS_MADDR 0x0043
#define regCKSVII2C_IC_HS_MADDR_BASE_IDX 0
#define regCKSVII2C_IC_DATA_CMD 0x0044
#define regCKSVII2C_IC_DATA_CMD_BASE_IDX 0
#define regCKSVII2C_IC_SS_SCL_HCNT 0x0045
#define regCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C_IC_SS_SCL_LCNT 0x0046
#define regCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C_IC_FS_SCL_HCNT 0x0047
#define regCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C_IC_FS_SCL_LCNT 0x0048
#define regCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C_IC_HS_SCL_HCNT 0x0049
#define regCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C_IC_HS_SCL_LCNT 0x004a
#define regCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C_IC_INTR_STAT 0x004b
#define regCKSVII2C_IC_INTR_STAT_BASE_IDX 0
#define regCKSVII2C_IC_INTR_MASK 0x004c
#define regCKSVII2C_IC_INTR_MASK_BASE_IDX 0
#define regCKSVII2C_IC_RAW_INTR_STAT 0x004d
#define regCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0
#define regCKSVII2C_IC_RX_TL 0x004e
#define regCKSVII2C_IC_RX_TL_BASE_IDX 0
#define regCKSVII2C_IC_TX_TL 0x004f
#define regCKSVII2C_IC_TX_TL_BASE_IDX 0
#define regCKSVII2C_IC_CLR_INTR 0x0050
#define regCKSVII2C_IC_CLR_INTR_BASE_IDX 0
#define regCKSVII2C_IC_CLR_RX_UNDER 0x0051
#define regCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0
#define regCKSVII2C_IC_CLR_RX_OVER 0x0052
#define regCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0
#define regCKSVII2C_IC_CLR_TX_OVER 0x0053
#define regCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0
#define regCKSVII2C_IC_CLR_RD_REQ 0x0054
#define regCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0
#define regCKSVII2C_IC_CLR_TX_ABRT 0x0055
#define regCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0
#define regCKSVII2C_IC_CLR_RX_DONE 0x0056
#define regCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0
#define regCKSVII2C_IC_CLR_ACTIVITY 0x0057
#define regCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0
#define regCKSVII2C_IC_CLR_STOP_DET 0x0058
#define regCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0
#define regCKSVII2C_IC_CLR_START_DET 0x0059
#define regCKSVII2C_IC_CLR_START_DET_BASE_IDX 0
#define regCKSVII2C_IC_CLR_GEN_CALL 0x005a
#define regCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0
#define regCKSVII2C_IC_ENABLE 0x005b
#define regCKSVII2C_IC_ENABLE_BASE_IDX 0
#define regCKSVII2C_IC_STATUS 0x005c
#define regCKSVII2C_IC_STATUS_BASE_IDX 0
#define regCKSVII2C_IC_TXFLR 0x005d
#define regCKSVII2C_IC_TXFLR_BASE_IDX 0
#define regCKSVII2C_IC_RXFLR 0x005e
#define regCKSVII2C_IC_RXFLR_BASE_IDX 0
#define regCKSVII2C_IC_SDA_HOLD 0x005f
#define regCKSVII2C_IC_SDA_HOLD_BASE_IDX 0
#define regCKSVII2C_IC_TX_ABRT_SOURCE 0x0060
#define regCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0
#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061
#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
#define regCKSVII2C_IC_DMA_CR 0x0062
#define regCKSVII2C_IC_DMA_CR_BASE_IDX 0
#define regCKSVII2C_IC_DMA_TDLR 0x0063
#define regCKSVII2C_IC_DMA_TDLR_BASE_IDX 0
#define regCKSVII2C_IC_DMA_RDLR 0x0064
#define regCKSVII2C_IC_DMA_RDLR_BASE_IDX 0
#define regCKSVII2C_IC_SDA_SETUP 0x0065
#define regCKSVII2C_IC_SDA_SETUP_BASE_IDX 0
#define regCKSVII2C_IC_ACK_GENERAL_CALL 0x0066
#define regCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0
#define regCKSVII2C_IC_ENABLE_STATUS 0x0067
#define regCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0
#define regCKSVII2C_IC_FS_SPKLEN 0x0068
#define regCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0
#define regCKSVII2C_IC_HS_SPKLEN 0x0069
#define regCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0
#define regCKSVII2C_IC_CLR_RESTART_DET 0x006a
#define regCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0
#define regCKSVII2C_IC_COMP_PARAM_1 0x006b
#define regCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0
#define regCKSVII2C_IC_COMP_VERSION 0x006c
#define regCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
#define regCKSVII2C_IC_COMP_TYPE 0x006d
#define regCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
#define regCKSVII2C1_IC_CON 0x0080
#define regCKSVII2C1_IC_CON_BASE_IDX 0
#define regCKSVII2C1_IC_TAR 0x0081
#define regCKSVII2C1_IC_TAR_BASE_IDX 0
#define regCKSVII2C1_IC_SAR 0x0082
#define regCKSVII2C1_IC_SAR_BASE_IDX 0
#define regCKSVII2C1_IC_HS_MADDR 0x0083
#define regCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
#define regCKSVII2C1_IC_DATA_CMD 0x0084
#define regCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
#define regCKSVII2C1_IC_SS_SCL_HCNT 0x0085
#define regCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C1_IC_SS_SCL_LCNT 0x0086
#define regCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C1_IC_FS_SCL_HCNT 0x0087
#define regCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C1_IC_FS_SCL_LCNT 0x0088
#define regCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C1_IC_HS_SCL_HCNT 0x0089
#define regCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
#define regCKSVII2C1_IC_HS_SCL_LCNT 0x008a
#define regCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
#define regCKSVII2C1_IC_INTR_STAT 0x008b
#define regCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
#define regCKSVII2C1_IC_INTR_MASK 0x008c
#define regCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
#define regCKSVII2C1_IC_RAW_INTR_STAT 0x008d
#define regCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
#define regCKSVII2C1_IC_RX_TL 0x008e
#define regCKSVII2C1_IC_RX_TL_BASE_IDX 0
#define regCKSVII2C1_IC_TX_TL 0x008f
#define regCKSVII2C1_IC_TX_TL_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_INTR 0x0090
#define regCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_RX_UNDER 0x0091
#define regCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_RX_OVER 0x0092
#define regCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_TX_OVER 0x0093
#define regCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_RD_REQ 0x0094
#define regCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_TX_ABRT 0x0095
#define regCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_RX_DONE 0x0096
#define regCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_ACTIVITY 0x0097
#define regCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_STOP_DET 0x0098
#define regCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_START_DET 0x0099
#define regCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_GEN_CALL 0x009a
#define regCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
#define regCKSVII2C1_IC_ENABLE 0x009b
#define regCKSVII2C1_IC_ENABLE_BASE_IDX 0
#define regCKSVII2C1_IC_STATUS 0x009c
#define regCKSVII2C1_IC_STATUS_BASE_IDX 0
#define regCKSVII2C1_IC_TXFLR 0x009d
#define regCKSVII2C1_IC_TXFLR_BASE_IDX 0
#define regCKSVII2C1_IC_RXFLR 0x009e
#define regCKSVII2C1_IC_RXFLR_BASE_IDX 0
#define regCKSVII2C1_IC_SDA_HOLD 0x009f
#define regCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
#define regCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
#define regCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
#define regCKSVII2C1_IC_DMA_CR 0x00a2
#define regCKSVII2C1_IC_DMA_CR_BASE_IDX 0
#define regCKSVII2C1_IC_DMA_TDLR 0x00a3
#define regCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
#define regCKSVII2C1_IC_DMA_RDLR 0x00a4
#define regCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
#define regCKSVII2C1_IC_SDA_SETUP 0x00a5
#define regCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
#define regCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
#define regCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
#define regCKSVII2C1_IC_ENABLE_STATUS 0x00a7
#define regCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
#define regCKSVII2C1_IC_FS_SPKLEN 0x00a8
#define regCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
#define regCKSVII2C1_IC_HS_SPKLEN 0x00a9
#define regCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
#define regCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
#define regCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
#define regCKSVII2C1_IC_COMP_PARAM_1 0x00ab
#define regCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
#define regCKSVII2C1_IC_COMP_VERSION 0x00ac
#define regCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
#define regCKSVII2C1_IC_COMP_TYPE 0x00ad
#define regCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define regSMUIO_MP_RESET_INTR 0x00c1
#define regSMUIO_MP_RESET_INTR_BASE_IDX 0
#define regSMUIO_SOC_HALT 0x00c2
#define regSMUIO_SOC_HALT_BASE_IDX 0
#define regSMUIO_PWRMGT 0x00cd
#define regSMUIO_PWRMGT_BASE_IDX 0
#define regSMUIO_GFX_MISC_CNTL 0x00d1
#define regSMUIO_GFX_MISC_CNTL_BASE_IDX 0
#define regROM_CNTL 0x00e1
#define regROM_CNTL_BASE_IDX 0
#define regPAGE_MIRROR_CNTL 0x00e2
#define regPAGE_MIRROR_CNTL_BASE_IDX 0
#define regROM_STATUS 0x00e3
#define regROM_STATUS_BASE_IDX 0
#define regCGTT_ROM_CLK_CTRL0 0x00e4
#define regCGTT_ROM_CLK_CTRL0_BASE_IDX 0
#define regROM_INDEX 0x00e5
#define regROM_INDEX_BASE_IDX 0
#define regROM_DATA 0x00e6
#define regROM_DATA_BASE_IDX 0
#define regROM_START 0x00e7
#define regROM_START_BASE_IDX 0
#define regROM_SW_CNTL 0x00e9
#define regROM_SW_CNTL_BASE_IDX 0
#define regROM_SW_STATUS 0x00ea
#define regROM_SW_STATUS_BASE_IDX 0
#define regROM_SW_COMMAND 0x00eb
#define regROM_SW_COMMAND_BASE_IDX 0
#define regROM_SW_DATA_1 0x00ed
#define regROM_SW_DATA_1_BASE_IDX 0
#define regROM_SW_DATA_2 0x00ee
#define regROM_SW_DATA_2_BASE_IDX 0
#define regROM_SW_DATA_3 0x00ef
#define regROM_SW_DATA_3_BASE_IDX 0
#define regROM_SW_DATA_4 0x00f0
#define regROM_SW_DATA_4_BASE_IDX 0
#define regROM_SW_DATA_5 0x00f1
#define regROM_SW_DATA_5_BASE_IDX 0
#define regROM_SW_DATA_6 0x00f2
#define regROM_SW_DATA_6_BASE_IDX 0
#define regROM_SW_DATA_7 0x00f3
#define regROM_SW_DATA_7_BASE_IDX 0
#define regROM_SW_DATA_8 0x00f4
#define regROM_SW_DATA_8_BASE_IDX 0
#define regROM_SW_DATA_9 0x00f5
#define regROM_SW_DATA_9_BASE_IDX 0
#define regROM_SW_DATA_10 0x00f6
#define regROM_SW_DATA_10_BASE_IDX 0
#define regROM_SW_DATA_11 0x00f7
#define regROM_SW_DATA_11_BASE_IDX 0
#define regROM_SW_DATA_12 0x00f8
#define regROM_SW_DATA_12_BASE_IDX 0
#define regROM_SW_DATA_13 0x00f9
#define regROM_SW_DATA_13_BASE_IDX 0
#define regROM_SW_DATA_14 0x00fa
#define regROM_SW_DATA_14_BASE_IDX 0
#define regROM_SW_DATA_15 0x00fb
#define regROM_SW_DATA_15_BASE_IDX 0
#define regROM_SW_DATA_16 0x00fc
#define regROM_SW_DATA_16_BASE_IDX 0
#define regROM_SW_DATA_17 0x00fd
#define regROM_SW_DATA_17_BASE_IDX 0
#define regROM_SW_DATA_18 0x00fe
#define regROM_SW_DATA_18_BASE_IDX 0
#define regROM_SW_DATA_19 0x00ff
#define regROM_SW_DATA_19_BASE_IDX 0
#define regROM_SW_DATA_20 0x0100
#define regROM_SW_DATA_20_BASE_IDX 0
#define regROM_SW_DATA_21 0x0101
#define regROM_SW_DATA_21_BASE_IDX 0
#define regROM_SW_DATA_22 0x0102
#define regROM_SW_DATA_22_BASE_IDX 0
#define regROM_SW_DATA_23 0x0103
#define regROM_SW_DATA_23_BASE_IDX 0
#define regROM_SW_DATA_24 0x0104
#define regROM_SW_DATA_24_BASE_IDX 0
#define regROM_SW_DATA_25 0x0105
#define regROM_SW_DATA_25_BASE_IDX 0
#define regROM_SW_DATA_26 0x0106
#define regROM_SW_DATA_26_BASE_IDX 0
#define regROM_SW_DATA_27 0x0107
#define regROM_SW_DATA_27_BASE_IDX 0
#define regROM_SW_DATA_28 0x0108
#define regROM_SW_DATA_28_BASE_IDX 0
#define regROM_SW_DATA_29 0x0109
#define regROM_SW_DATA_29_BASE_IDX 0
#define regROM_SW_DATA_30 0x010a
#define regROM_SW_DATA_30_BASE_IDX 0
#define regROM_SW_DATA_31 0x010b
#define regROM_SW_DATA_31_BASE_IDX 0
#define regROM_SW_DATA_32 0x010c
#define regROM_SW_DATA_32_BASE_IDX 0
#define regROM_SW_DATA_33 0x010d
#define regROM_SW_DATA_33_BASE_IDX 0
#define regROM_SW_DATA_34 0x010e
#define regROM_SW_DATA_34_BASE_IDX 0
#define regROM_SW_DATA_35 0x010f
#define regROM_SW_DATA_35_BASE_IDX 0
#define regROM_SW_DATA_36 0x0110
#define regROM_SW_DATA_36_BASE_IDX 0
#define regROM_SW_DATA_37 0x0111
#define regROM_SW_DATA_37_BASE_IDX 0
#define regROM_SW_DATA_38 0x0112
#define regROM_SW_DATA_38_BASE_IDX 0
#define regROM_SW_DATA_39 0x0113
#define regROM_SW_DATA_39_BASE_IDX 0
#define regROM_SW_DATA_40 0x0114
#define regROM_SW_DATA_40_BASE_IDX 0
#define regROM_SW_DATA_41 0x0115
#define regROM_SW_DATA_41_BASE_IDX 0
#define regROM_SW_DATA_42 0x0116
#define regROM_SW_DATA_42_BASE_IDX 0
#define regROM_SW_DATA_43 0x0117
#define regROM_SW_DATA_43_BASE_IDX 0
#define regROM_SW_DATA_44 0x0118
#define regROM_SW_DATA_44_BASE_IDX 0
#define regROM_SW_DATA_45 0x0119
#define regROM_SW_DATA_45_BASE_IDX 0
#define regROM_SW_DATA_46 0x011a
#define regROM_SW_DATA_46_BASE_IDX 0
#define regROM_SW_DATA_47 0x011b
#define regROM_SW_DATA_47_BASE_IDX 0
#define regROM_SW_DATA_48 0x011c
#define regROM_SW_DATA_48_BASE_IDX 0
#define regROM_SW_DATA_49 0x011d
#define regROM_SW_DATA_49_BASE_IDX 0
#define regROM_SW_DATA_50 0x011e
#define regROM_SW_DATA_50_BASE_IDX 0
#define regROM_SW_DATA_51 0x011f
#define regROM_SW_DATA_51_BASE_IDX 0
#define regROM_SW_DATA_52 0x0120
#define regROM_SW_DATA_52_BASE_IDX 0
#define regROM_SW_DATA_53 0x0121
#define regROM_SW_DATA_53_BASE_IDX 0
#define regROM_SW_DATA_54 0x0122
#define regROM_SW_DATA_54_BASE_IDX 0
#define regROM_SW_DATA_55 0x0123
#define regROM_SW_DATA_55_BASE_IDX 0
#define regROM_SW_DATA_56 0x0124
#define regROM_SW_DATA_56_BASE_IDX 0
#define regROM_SW_DATA_57 0x0125
#define regROM_SW_DATA_57_BASE_IDX 0
#define regROM_SW_DATA_58 0x0126
#define regROM_SW_DATA_58_BASE_IDX 0
#define regROM_SW_DATA_59 0x0127
#define regROM_SW_DATA_59_BASE_IDX 0
#define regROM_SW_DATA_60 0x0128
#define regROM_SW_DATA_60_BASE_IDX 0
#define regROM_SW_DATA_61 0x0129
#define regROM_SW_DATA_61_BASE_IDX 0
#define regROM_SW_DATA_62 0x012a
#define regROM_SW_DATA_62_BASE_IDX 0
#define regROM_SW_DATA_63 0x012b
#define regROM_SW_DATA_63_BASE_IDX 0
#define regROM_SW_DATA_64 0x012c
#define regROM_SW_DATA_64_BASE_IDX 0
#define regSMU_GPIOPAD_SW_INT_STAT 0x0140
#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 0
#define regSMU_GPIOPAD_MASK 0x0141
#define regSMU_GPIOPAD_MASK_BASE_IDX 0
#define regSMU_GPIOPAD_A 0x0142
#define regSMU_GPIOPAD_A_BASE_IDX 0
#define regSMU_GPIOPAD_TXIMPSEL 0x0143
#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 0
#define regSMU_GPIOPAD_EN 0x0144
#define regSMU_GPIOPAD_EN_BASE_IDX 0
#define regSMU_GPIOPAD_Y 0x0145
#define regSMU_GPIOPAD_Y_BASE_IDX 0
#define regSMU_GPIOPAD_RXEN 0x0146
#define regSMU_GPIOPAD_RXEN_BASE_IDX 0
#define regSMU_GPIOPAD_RCVR_SEL0 0x0147
#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 0
#define regSMU_GPIOPAD_RCVR_SEL1 0x0148
#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 0
#define regSMU_GPIOPAD_PU_EN 0x0149
#define regSMU_GPIOPAD_PU_EN_BASE_IDX 0
#define regSMU_GPIOPAD_PD_EN 0x014a
#define regSMU_GPIOPAD_PD_EN_BASE_IDX 0
#define regSMU_GPIOPAD_PINSTRAPS 0x014b
#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 0
#define regDFT_PINSTRAPS 0x014c
#define regDFT_PINSTRAPS_BASE_IDX 0
#define regSMU_GPIOPAD_INT_STAT_EN 0x014d
#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 0
#define regSMU_GPIOPAD_INT_STAT 0x014e
#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 0
#define regSMU_GPIOPAD_INT_STAT_AK 0x014f
#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 0
#define regSMU_GPIOPAD_INT_EN 0x0150
#define regSMU_GPIOPAD_INT_EN_BASE_IDX 0
#define regSMU_GPIOPAD_INT_TYPE 0x0151
#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 0
#define regSMU_GPIOPAD_INT_POLARITY 0x0152
#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 0
#define regROM_CC_BIF_PINSTRAP 0x0153
#define regROM_CC_BIF_PINSTRAP_BASE_IDX 0
#define regIO_SMUIO_PINSTRAP 0x0154
#define regIO_SMUIO_PINSTRAP_BASE_IDX 0
#define regSMUIO_PCC_CONTROL 0x0155
#define regSMUIO_PCC_CONTROL_BASE_IDX 0
#define regSMUIO_PCC_GPIO_SELECT 0x0156
#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 0
#define regSMUIO_GPIO_INT0_SELECT 0x0157
#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 0
#define regSMUIO_GPIO_INT1_SELECT 0x0158
#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 0
#define regSMUIO_GPIO_INT2_SELECT 0x0159
#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 0
#define regSMUIO_GPIO_INT3_SELECT 0x015a
#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 0
#define regSMU_GPIOPAD_MP_INT0_STAT 0x015b
#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 0
#define regSMU_GPIOPAD_MP_INT1_STAT 0x015c
#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 0
#define regSMU_GPIOPAD_MP_INT2_STAT 0x015d
#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 0
#define regSMU_GPIOPAD_MP_INT3_STAT 0x015e
#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 0
#define regSMIO_INDEX 0x015f
#define regSMIO_INDEX_BASE_IDX 0
#define regS0_VID_SMIO_CNTL 0x0160
#define regS0_VID_SMIO_CNTL_BASE_IDX 0
#define regS1_VID_SMIO_CNTL 0x0161
#define regS1_VID_SMIO_CNTL_BASE_IDX 0
#define regOPEN_DRAIN_SELECT 0x0162
#define regOPEN_DRAIN_SELECT_BASE_IDX 0
#define regSMIO_ENABLE 0x0163
#define regSMIO_ENABLE_BASE_IDX 0
#define regSMU_GPIOPAD_S0 0x0164
#define regSMU_GPIOPAD_S0_BASE_IDX 0
#define regSMU_GPIOPAD_S1 0x0165
#define regSMU_GPIOPAD_S1_BASE_IDX 0
#define regSMU_GPIOPAD_SCL_EN 0x0166
#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 0
#define regSMU_GPIOPAD_SDA_EN 0x0167
#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 0
#define regSMU_GPIOPAD_SCHMEN 0x0168
#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 0
// addressBlock: smuio_smuio_pwr_SmuSmuioDec
// base address: 0x5a800
#define regIP_DISCOVERY_VERSION 0x0000
#define regIP_DISCOVERY_VERSION_BASE_IDX 1
#define regSOC_GAP_PWROK 0x00fc
#define regSOC_GAP_PWROK_BASE_IDX 1
#define regGFX_GAP_PWROK 0x00fd
#define regGFX_GAP_PWROK_BASE_IDX 1
#define regPWROK_REFCLK_GAP_CYCLES 0x00fe
#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
#define regGOLDEN_TSC_INCREMENT_UPPER 0x0104
#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
#define regGOLDEN_TSC_INCREMENT_LOWER 0x0105
#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
#define regGOLDEN_TSC_COUNT_UPPER 0x0106
#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
#define regGOLDEN_TSC_COUNT_LOWER 0x0107
#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x0108
#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0109
#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
#define regGFX_GOLDEN_TSC_SHADOW_UPPER 0x010a
#define regGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
#define regGFX_GOLDEN_TSC_SHADOW_LOWER 0x010b
#define regGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
#define regSCRATCH_REGISTER0 0x0114
#define regSCRATCH_REGISTER0_BASE_IDX 1
#define regSCRATCH_REGISTER1 0x0115
#define regSCRATCH_REGISTER1_BASE_IDX 1
#define regSCRATCH_REGISTER2 0x0116
#define regSCRATCH_REGISTER2_BASE_IDX 1
#define regSCRATCH_REGISTER3 0x0117
#define regSCRATCH_REGISTER3_BASE_IDX 1
#define regSCRATCH_REGISTER4 0x0118
#define regSCRATCH_REGISTER4_BASE_IDX 1
#define regSCRATCH_REGISTER5 0x0119
#define regSCRATCH_REGISTER5_BASE_IDX 1
#define regSCRATCH_REGISTER6 0x011a
#define regSCRATCH_REGISTER6_BASE_IDX 1
#define regSCRATCH_REGISTER7 0x011b
#define regSCRATCH_REGISTER7_BASE_IDX 1
#define regPWR_DISP_TIMER_CONTROL 0x0134
#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 1
#define regPWR_DISP_TIMER_DEBUG 0x0135
#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 1
#define regPWR_DISP_TIMER2_CONTROL 0x0136
#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
#define regPWR_DISP_TIMER2_DEBUG 0x0137
#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 1
#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x0138
#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
#define regPWR_IH_CONTROL 0x0139
#define regPWR_IH_CONTROL_BASE_IDX 1
#endif
|
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _WMI_OPS_H_
#define _WMI_OPS_H_
struct ath10k;
struct sk_buff;
struct wmi_ops {
void (*rx)(struct ath10k *ar, struct sk_buff *skb);
void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_scan_ev_arg *arg);
int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg);
int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
int (*pull_mgmt_tx_bundle_compl)(
struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg);
int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_vdev_start_ev_arg *arg);
int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_peer_kick_ev_arg *arg);
int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_swba_ev_arg *arg);
int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_phyerr_hdr_arg *arg);
int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
int left_len, struct wmi_phyerr_ev_arg *arg);
int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg);
int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_echo_ev_arg *arg);
int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_dfs_status_ev_arg *arg);
int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_avail_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
const u8 macaddr[ETH_ALEN]);
struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg);
struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
u32 value);
struct sk_buff *(*gen_init)(struct ath10k *ar);
struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
const struct wmi_start_scan_arg *arg);
struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
const struct wmi_stop_scan_arg *arg);
struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
enum wmi_vdev_type type,
enum wmi_vdev_subtype subtype,
const u8 macaddr[ETH_ALEN]);
struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg,
bool restart);
struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
const u8 *bssid);
struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
u32 param_id, u32 param_value);
struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
const struct wmi_vdev_spectral_conf_arg *arg);
struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
u32 trigger, u32 enable);
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
u32 tid_bitmap);
struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
const u8 *peer_addr,
enum wmi_peer_param param_id,
u32 param_value);
struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
const struct wmi_peer_assoc_complete_arg *arg);
struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_ps_mode psmode);
struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_powersave_param param_id,
u32 value);
struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
const u8 *mac,
enum wmi_ap_ps_peer_param param_id,
u32 value);
struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
u32 prob_req_oui);
struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab);
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
u32 vdev_id,
enum
wmi_peer_stats_info_request_type
type,
u8 *addr,
u32 reset);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
struct sk_buff *skb,
dma_addr_t paddr);
int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
u32 period, u32 duration,
u32 next_offset,
u32 enabled);
struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac);
struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 buf_size);
struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid,
u32 status);
struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 initiator,
u32 reason);
struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
u32 tim_ie_offset, struct sk_buff *bcn,
u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len);
struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
struct sk_buff *bcn);
struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
const u8 *p2p_ie);
struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable);
struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id,
const u8 *pattern,
const u8 *mask,
int pattern_len,
int pattern_offset);
struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id);
struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
u32 vdev_id,
enum wmi_tdls_state state);
struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan);
struct sk_buff *(*gen_radar_found)
(struct ath10k *ar,
const struct ath10k_radar_found_info *arg);
struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
u32 param);
void (*fw_stats_fill)(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf);
struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
u8 enable,
u32 detect_level,
u32 detect_margin);
struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
enum wmi_host_platform_type type,
u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno_scan);
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
u32 param);
struct sk_buff *(*gen_bb_timing)
(struct ath10k *ar,
const struct wmi_bb_timing_cfg_arg *arg);
struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
const struct wmi_per_peer_per_tid_cfg_arg *arg);
struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
u32 input, u32 pull_type, u32 intr_mode);
struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
static inline int
ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
{
if (WARN_ON_ONCE(!ar->wmi.ops->rx))
return -EOPNOTSUPP;
ar->wmi.ops->rx(ar, skb);
return 0;
}
static inline int
ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
size_t len)
{
if (!ar->wmi.ops->map_svc)
return -EOPNOTSUPP;
ar->wmi.ops->map_svc(in, out, len);
return 0;
}
static inline int
ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
size_t len)
{
if (!ar->wmi.ops->map_svc_ext)
return -EOPNOTSUPP;
ar->wmi.ops->map_svc_ext(in, out, len);
return 0;
}
static inline int
ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
struct wmi_scan_ev_arg *arg)
{
if (!ar->wmi.ops->pull_scan)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_scan(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
{
if (!ar->wmi.ops->pull_mgmt_tx_compl)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
{
if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_ev_arg *arg)
{
if (!ar->wmi.ops->pull_mgmt_rx)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
struct wmi_ch_info_ev_arg *arg)
{
if (!ar->wmi.ops->pull_ch_info)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_ch_info(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
struct wmi_vdev_start_ev_arg *arg)
{
if (!ar->wmi.ops->pull_vdev_start)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
struct wmi_peer_kick_ev_arg *arg)
{
if (!ar->wmi.ops->pull_peer_kick)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
struct wmi_swba_ev_arg *arg)
{
if (!ar->wmi.ops->pull_swba)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_swba(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
struct wmi_phyerr_hdr_arg *arg)
{
if (!ar->wmi.ops->pull_phyerr_hdr)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
int left_len, struct wmi_phyerr_ev_arg *arg)
{
if (!ar->wmi.ops->pull_phyerr)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
}
static inline int
ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg)
{
if (!ar->wmi.ops->pull_svc_rdy)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
struct wmi_rdy_ev_arg *arg)
{
if (!ar->wmi.ops->pull_rdy)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_rdy(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_avail_ev_arg *arg)
{
if (!ar->wmi.ops->pull_svc_avail)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
if (!ar->wmi.ops->pull_fw_stats)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
}
static inline int
ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg)
{
if (!ar->wmi.ops->pull_roam_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg)
{
if (!ar->wmi.ops->pull_wow_event)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_echo_ev_arg *arg)
{
if (!ar->wmi.ops->pull_echo_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
struct wmi_dfs_status_ev_arg *arg)
{
if (!ar->wmi.ops->pull_dfs_status_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
}
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
if (!ar->wmi.ops->get_txbf_conf_scheme)
return WMI_TXBF_CONF_UNSUPPORTED;
return ar->wmi.ops->get_txbf_conf_scheme(ar);
}
static inline int
ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
{
if (!ar->wmi.ops->cleanup_mgmt_tx_send)
return -EOPNOTSUPP;
return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
}
static inline int
ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
dma_addr_t paddr)
{
struct sk_buff *skb;
int ret;
if (!ar->wmi.ops->gen_mgmt_tx_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
if (IS_ERR(skb))
return PTR_ERR(skb);
ret = ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->mgmt_tx_send_cmdid);
if (ret)
return ret;
return 0;
}
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct sk_buff *skb;
int ret;
if (!ar->wmi.ops->gen_mgmt_tx)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
if (IS_ERR(skb))
return PTR_ERR(skb);
ret = ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->mgmt_tx_cmdid);
if (ret)
return ret;
/* FIXME There's no ACK event for Management Tx. This probably
* shouldn't be called here either.
*/
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
return 0;
}
static inline int
ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
u16 ctl2g, u16 ctl5g,
enum wmi_dfs_region dfs_reg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_set_rd)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
dfs_reg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
static inline int
ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
}
static inline int
ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_suspend)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
static inline int
ath10k_wmi_pdev_resume_target(struct ath10k *ar)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_resume)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_resume(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
}
static inline int
ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_set_param)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
static inline int
ath10k_wmi_cmd_init(struct ath10k *ar)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_init)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_init(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
}
static inline int
ath10k_wmi_start_scan(struct ath10k *ar,
const struct wmi_start_scan_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_start_scan)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_start_scan(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
static inline int
ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_stop_scan)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_stop_scan(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
}
static inline int
ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
enum wmi_vdev_type type,
enum wmi_vdev_subtype subtype,
const u8 macaddr[ETH_ALEN])
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_create)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
}
static inline int
ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_delete)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
}
static inline int
ath10k_wmi_vdev_start(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_start)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->vdev_start_request_cmdid);
}
static inline int
ath10k_wmi_vdev_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_start)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->vdev_restart_request_cmdid);
}
static inline int
ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_stop)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
static inline int
ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_up)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
}
static inline int
ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_down)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
}
static inline int
ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
u32 param_value)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_set_param)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
param_value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
}
static inline int
ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_vdev_install_key)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->vdev_install_key_cmdid);
}
static inline int
ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
const struct wmi_vdev_spectral_conf_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_spectral_conf)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
u32 enable)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_spectral_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_sta_uapsd)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
num_ac);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
}
static inline int
ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN])
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_delete)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
}
static inline int
ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_flush)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
}
static inline int
ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
enum wmi_peer_param param_id, u32 param_value)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_set_param)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
param_value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
}
static inline int
ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_ps_mode psmode)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_set_psmode)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->sta_powersave_mode_cmdid);
}
static inline int
ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_powersave_param param_id, u32 value)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_set_sta_ps)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->sta_powersave_param_cmdid);
}
static inline int
ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_set_ap_ps)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->ap_ps_peer_param_cmdid);
}
static inline int
ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_scan_chan_list)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
}
static inline int
ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
{
struct sk_buff *skb;
u32 prob_req_oui;
prob_req_oui = (((u32)mac_addr[0]) << 16) |
(((u32)mac_addr[1]) << 8) | mac_addr[2];
if (!ar->wmi.ops->gen_scan_prob_req_oui)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->scan_prob_req_oui_cmdid);
}
static inline int
ath10k_wmi_peer_assoc(struct ath10k *ar,
const struct wmi_peer_assoc_complete_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_assoc)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
static inline int
ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab)
{
struct sk_buff *skb;
int ret;
if (!ar->wmi.ops->gen_beacon_dma)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
dtim_zero, deliver_cab);
if (IS_ERR(skb))
return PTR_ERR(skb);
ret = ath10k_wmi_cmd_send_nowait(ar, skb,
ar->wmi.cmd->pdev_send_bcn_cmdid);
if (ret) {
dev_kfree_skb(skb);
return ret;
}
return 0;
}
static inline int
ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_wmm_params_all_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_set_wmm)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
static inline int
ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_stats)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
static inline int
ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
u32 vdev_id,
enum wmi_peer_stats_info_request_type type,
u8 *addr,
u32 reset)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_request_peer_stats_info)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
vdev_id,
type,
addr,
reset);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
}
static inline int
ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_force_fw_hang)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
u32 input, u32 pull_type, u32 intr_mode)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_gpio_config)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
}
static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_gpio_config)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
}
static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_dbglog_cfg)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
}
static inline int
ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pktlog_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
}
static inline int
ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pktlog_disable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pktlog_disable(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_pktlog_disable_cmdid);
}
static inline int
ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
u32 next_offset, u32 enabled)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
next_offset, enabled);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
}
static inline int
ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_get_temperature)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_get_temperature_cmdid);
}
static inline int
ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_clear_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_clear_resp_cmdid);
}
static inline int
ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_send_cmdid);
}
static inline int
ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_set_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_set_resp_cmdid);
}
static inline int
ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_delba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
reason);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->delba_send_cmdid);
}
static inline int
ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_bcn_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
prb_caps, prb_erp, prb_ies,
prb_ies_len);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
}
static inline int
ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_prb_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
}
static inline int
ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
}
static inline int
ath10k_wmi_sta_keepalive(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_sta_keepalive)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_enable(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_enable(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_wakeup_event)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
pattern, mask, pattern_len,
pattern_offset);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_del_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_config_pno)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_update_fw_tdls_state)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
}
static inline int
ath10k_wmi_tdls_peer_update(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_tdls_peer_update)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->tdls_peer_update_cmdid);
}
static inline int
ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_adaptive_qcs)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
}
static inline int
ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_get_tpc_config)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_get_tpc_config_cmdid);
}
static inline int
ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
char *buf)
{
if (!ar->wmi.ops->fw_stats_fill)
return -EOPNOTSUPP;
ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
return 0;
}
static inline int
ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
u32 detect_level, u32 detect_margin)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
detect_level,
detect_margin);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
}
static inline int
ath10k_wmi_ext_resource_config(struct ath10k *ar,
enum wmi_host_platform_type type,
u32 fw_feature_bitmap)
{
struct sk_buff *skb;
if (!ar->wmi.ops->ext_resource_config)
return -EOPNOTSUPP;
skb = ar->wmi.ops->ext_resource_config(ar, type,
fw_feature_bitmap);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->ext_resource_cfg_cmdid);
}
static inline int
ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
{
if (!ar->wmi.ops->get_vdev_subtype)
return -EOPNOTSUPP;
return ar->wmi.ops->get_vdev_subtype(ar, subtype);
}
static inline int
ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
enum wmi_bss_survey_req_type type)
{
struct ath10k_wmi *wmi = &ar->wmi;
struct sk_buff *skb;
if (!wmi->ops->gen_pdev_bss_chan_info_req)
return -EOPNOTSUPP;
skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
wmi->cmd->pdev_bss_chan_info_request_cmdid);
}
static inline int
ath10k_wmi_echo(struct ath10k *ar, u32 value)
{
struct ath10k_wmi *wmi = &ar->wmi;
struct sk_buff *skb;
if (!wmi->ops->gen_echo)
return -EOPNOTSUPP;
skb = wmi->ops->gen_echo(ar, value);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
}
static inline int
ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_get_tpc_table_cmdid);
}
static inline int
ath10k_wmi_report_radar_found(struct ath10k *ar,
const struct ath10k_radar_found_info *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_radar_found)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_radar_found(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->radar_found_cmdid);
}
static inline int
ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
const struct wmi_bb_timing_cfg_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_bb_timing)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_bb_timing(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->set_bb_timing_cmdid);
}
static inline int
ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
const struct wmi_per_peer_per_tid_cfg_arg *arg)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->per_peer_per_tid_config_cmdid);
}
#endif
|
// SPDX-License-Identifier: GPL-2.0
/*
* user-mode-linux networking multicast transport
* Copyright (C) 2001 by Harald Welte <[email protected]>
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*
* based on the existing uml-networking code, which is
* Copyright (C) 2001 Lennert Buytenhek ([email protected]) and
* James Leu ([email protected]).
* Copyright (C) 2001 by various other people who didn't put their name here.
*
*/
#include <linux/init.h>
#include <linux/netdevice.h>
#include "umcast.h"
#include <net_kern.h>
struct umcast_init {
char *addr;
int lport;
int rport;
int ttl;
bool unicast;
};
static void umcast_init(struct net_device *dev, void *data)
{
struct uml_net_private *pri;
struct umcast_data *dpri;
struct umcast_init *init = data;
pri = netdev_priv(dev);
dpri = (struct umcast_data *) pri->user;
dpri->addr = init->addr;
dpri->lport = init->lport;
dpri->rport = init->rport;
dpri->unicast = init->unicast;
dpri->ttl = init->ttl;
dpri->dev = dev;
if (dpri->unicast) {
printk(KERN_INFO "ucast backend address: %s:%u listen port: "
"%u\n", dpri->addr, dpri->rport, dpri->lport);
} else {
printk(KERN_INFO "mcast backend multicast address: %s:%u, "
"TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl);
}
}
static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return net_recvfrom(fd, skb_mac_header(skb),
skb->dev->mtu + ETH_HEADER_OTHER);
}
static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
{
return umcast_user_write(fd, skb->data, skb->len,
(struct umcast_data *) &lp->user);
}
static const struct net_kern_info umcast_kern_info = {
.init = umcast_init,
.protocol = eth_protocol,
.read = umcast_read,
.write = umcast_write,
};
static int mcast_setup(char *str, char **mac_out, void *data)
{
struct umcast_init *init = data;
char *port_str = NULL, *ttl_str = NULL, *remain;
char *last;
*init = ((struct umcast_init)
{ .addr = "239.192.168.1",
.lport = 1102,
.ttl = 1 });
remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
NULL);
if (remain != NULL) {
printk(KERN_ERR "mcast_setup - Extra garbage on "
"specification : '%s'\n", remain);
return 0;
}
if (port_str != NULL) {
init->lport = simple_strtoul(port_str, &last, 10);
if ((*last != '\0') || (last == port_str)) {
printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
port_str);
return 0;
}
}
if (ttl_str != NULL) {
init->ttl = simple_strtoul(ttl_str, &last, 10);
if ((*last != '\0') || (last == ttl_str)) {
printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
ttl_str);
return 0;
}
}
init->unicast = false;
init->rport = init->lport;
printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
init->lport, init->ttl);
return 1;
}
static int ucast_setup(char *str, char **mac_out, void *data)
{
struct umcast_init *init = data;
char *lport_str = NULL, *rport_str = NULL, *remain;
char *last;
*init = ((struct umcast_init)
{ .addr = "",
.lport = 1102,
.rport = 1102 });
remain = split_if_spec(str, mac_out, &init->addr,
&lport_str, &rport_str, NULL);
if (remain != NULL) {
printk(KERN_ERR "ucast_setup - Extra garbage on "
"specification : '%s'\n", remain);
return 0;
}
if (lport_str != NULL) {
init->lport = simple_strtoul(lport_str, &last, 10);
if ((*last != '\0') || (last == lport_str)) {
printk(KERN_ERR "ucast_setup - Bad listen port : "
"'%s'\n", lport_str);
return 0;
}
}
if (rport_str != NULL) {
init->rport = simple_strtoul(rport_str, &last, 10);
if ((*last != '\0') || (last == rport_str)) {
printk(KERN_ERR "ucast_setup - Bad remote port : "
"'%s'\n", rport_str);
return 0;
}
}
init->unicast = true;
printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n",
init->lport, init->addr, init->rport);
return 1;
}
static struct transport mcast_transport = {
.list = LIST_HEAD_INIT(mcast_transport.list),
.name = "mcast",
.setup = mcast_setup,
.user = &umcast_user_info,
.kern = &umcast_kern_info,
.private_size = sizeof(struct umcast_data),
.setup_size = sizeof(struct umcast_init),
};
static struct transport ucast_transport = {
.list = LIST_HEAD_INIT(ucast_transport.list),
.name = "ucast",
.setup = ucast_setup,
.user = &umcast_user_info,
.kern = &umcast_kern_info,
.private_size = sizeof(struct umcast_data),
.setup_size = sizeof(struct umcast_init),
};
static int register_umcast(void)
{
register_transport(&mcast_transport);
register_transport(&ucast_transport);
return 0;
}
late_initcall(register_umcast);
|
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/head.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0013.h>
int
nvif_head_vblank_event_ctor(struct nvif_head *head, const char *name, nvif_event_func func,
bool wait, struct nvif_event *event)
{
int ret = nvif_event_ctor(&head->object, name ?: "nvifHeadVBlank", nvif_head_id(head),
func, wait, NULL, 0, event);
NVIF_ERRON(ret, &head->object, "[NEW EVENT:VBLANK]");
return ret;
}
void
nvif_head_dtor(struct nvif_head *head)
{
nvif_object_dtor(&head->object);
}
int
nvif_head_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_head *head)
{
struct nvif_head_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ? name : "nvifHead", id, NVIF_CLASS_HEAD,
&args, sizeof(args), &head->object);
NVIF_ERRON(ret, &disp->object, "[NEW head id:%d]", args.id);
return ret;
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2008 Steven Rostedt <[email protected]>
*
*/
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/security.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <asm/setup.h>
#include "trace.h"
#define STACK_TRACE_ENTRIES 500
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
static unsigned int stack_trace_nr_entries;
static unsigned long stack_trace_max_size;
static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static void print_max_stack(void)
{
long i;
int size;
pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
stack_trace_nr_entries);
for (i = 0; i < stack_trace_nr_entries; i++) {
if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
size, (void *)stack_dump_trace[i]);
}
}
/*
* The stack tracer looks for a maximum stack at each call from a function. It
* registers a callback from ftrace, and in that callback it examines the stack
* size. It determines the stack size from the variable passed in, which is the
* address of a local variable in the stack_trace_call() callback function.
* The stack size is calculated by the address of the local variable to the top
* of the current stack. If that size is smaller than the currently saved max
* stack size, nothing more is done.
*
* If the size of the stack is greater than the maximum recorded size, then the
* following algorithm takes place.
*
* For architectures (like x86) that store the function's return address before
* saving the function's local variables, the stack will look something like
* this:
*
* [ top of stack ]
* 0: sys call entry frame
* 10: return addr to entry code
* 11: start of sys_foo frame
* 20: return addr to sys_foo
* 21: start of kernel_func_bar frame
* 30: return addr to kernel_func_bar
* 31: [ do trace stack here ]
*
* The save_stack_trace() is called returning all the functions it finds in the
* current stack. Which would be (from the bottom of the stack to the top):
*
* return addr to kernel_func_bar
* return addr to sys_foo
* return addr to entry code
*
* Now to figure out how much each of these functions' local variable size is,
* a search of the stack is made to find these values. When a match is made, it
* is added to the stack_dump_trace[] array. The offset into the stack is saved
* in the stack_trace_index[] array. The above example would show:
*
* stack_dump_trace[] | stack_trace_index[]
* ------------------ + -------------------
* return addr to kernel_func_bar | 30
* return addr to sys_foo | 20
* return addr to entry | 10
*
* The print_max_stack() function above, uses these values to print the size of
* each function's portion of the stack.
*
* for (i = 0; i < nr_entries; i++) {
* size = i == nr_entries - 1 ? stack_trace_index[i] :
* stack_trace_index[i] - stack_trace_index[i+1]
* print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
* }
*
* The above shows
*
* depth size location
* ----- ---- --------
* 0 30 10 kernel_func_bar
* 1 20 10 sys_foo
* 2 10 10 entry code
*
* Now for architectures that might save the return address after the functions
* local variables (saving the link register before calling nested functions),
* this will cause the stack to look a little different:
*
* [ top of stack ]
* 0: sys call entry frame
* 10: start of sys_foo_frame
* 19: return addr to entry code << lr saved before calling kernel_func_bar
* 20: start of kernel_func_bar frame
* 29: return addr to sys_foo_frame << lr saved before calling next function
* 30: [ do trace stack here ]
*
* Although the functions returned by save_stack_trace() may be the same, the
* placement in the stack will be different. Using the same algorithm as above
* would yield:
*
* stack_dump_trace[] | stack_trace_index[]
* ------------------ + -------------------
* return addr to kernel_func_bar | 30
* return addr to sys_foo | 29
* return addr to entry | 19
*
* Where the mapping is off by one:
*
* kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
*
* To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
* values in stack_trace_index[] are shifted by one to and the number of
* stack trace entries is decremented by one.
*
* stack_dump_trace[] | stack_trace_index[]
* ------------------ + -------------------
* return addr to kernel_func_bar | 29
* return addr to sys_foo | 19
*
* Although the entry function is not displayed, the first function (sys_foo)
* will still include the stack size of it.
*/
static void check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
int frame_size = READ_ONCE(tracer_frame);
int i, x;
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size;
/* Remove the frame of the tracer */
this_size -= frame_size;
if (this_size <= stack_trace_max_size)
return;
/* we do not handle interrupt stacks yet */
if (!object_is_on_stack(stack))
return;
/* Can't do this from NMI context (can cause deadlocks) */
if (in_nmi())
return;
local_irq_save(flags);
arch_spin_lock(&stack_trace_max_lock);
/* In case another CPU set the tracer_frame on us */
if (unlikely(!frame_size))
this_size -= tracer_frame;
/* a race could have already updated it */
if (this_size <= stack_trace_max_size)
goto out;
stack_trace_max_size = this_size;
stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
ARRAY_SIZE(stack_dump_trace) - 1,
0);
/* Skip over the overhead of the stack tracer itself */
for (i = 0; i < stack_trace_nr_entries; i++) {
if (stack_dump_trace[i] == ip)
break;
}
/*
* Some archs may not have the passed in ip in the dump.
* If that happens, we need to show everything.
*/
if (i == stack_trace_nr_entries)
i = 0;
/*
* Now find where in the stack these are.
*/
x = 0;
start = stack;
top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
/*
* Loop through all the entries. One of the entries may
* for some reason be missed on the stack, so we may
* have to account for them. If they are all there, this
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
while (i < stack_trace_nr_entries) {
int found = 0;
stack_trace_index[x] = this_size;
p = start;
for (; p < top && i < stack_trace_nr_entries; p++) {
/*
* The READ_ONCE_NOCHECK is used to let KASAN know that
* this is not a stack-out-of-bounds error.
*/
if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++];
this_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long);
found = 1;
/* Start the search from here */
start = p + 1;
/*
* We do not want to show the overhead
* of the stack tracer stack in the
* max stack. If we haven't figured
* out what that is, then figure it out
* now.
*/
if (unlikely(!tracer_frame)) {
tracer_frame = (p - stack) *
sizeof(unsigned long);
stack_trace_max_size -= tracer_frame;
}
}
}
if (!found)
i++;
}
#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
/*
* Some archs will store the link register before calling
* nested functions. This means the saved return address
* comes after the local storage, and we need to shift
* for that.
*/
if (x > 1) {
memmove(&stack_trace_index[0], &stack_trace_index[1],
sizeof(stack_trace_index[0]) * (x - 1));
x--;
}
#endif
stack_trace_nr_entries = x;
if (task_stack_end_corrupted(current)) {
print_max_stack();
BUG();
}
out:
arch_spin_unlock(&stack_trace_max_lock);
local_irq_restore(flags);
}
/* Some archs may not define MCOUNT_INSN_SIZE */
#ifndef MCOUNT_INSN_SIZE
# define MCOUNT_INSN_SIZE 0
#endif
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long stack;
preempt_disable_notrace();
/* no atomic needed, we only modify this variable by this cpu */
__this_cpu_inc(disable_stack_tracer);
if (__this_cpu_read(disable_stack_tracer) != 1)
goto out;
/* If rcu is not watching, then save stack trace can fail */
if (!rcu_is_watching())
goto out;
ip += MCOUNT_INSN_SIZE;
check_stack(ip, &stack);
out:
__this_cpu_dec(disable_stack_tracer);
/* prevent recursion in schedule */
preempt_enable_notrace();
}
static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
};
static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
char buf[64];
int r;
r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
if (r > sizeof(buf))
r = sizeof(buf);
return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}
static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
long *ptr = filp->private_data;
unsigned long val, flags;
int ret;
ret = kstrtoul_from_user(ubuf, count, 10, &val);
if (ret)
return ret;
local_irq_save(flags);
/*
* In case we trace inside arch_spin_lock() or after (NMI),
* we will cause circular lock, so we also need to increase
* the percpu disable_stack_tracer here.
*/
__this_cpu_inc(disable_stack_tracer);
arch_spin_lock(&stack_trace_max_lock);
*ptr = val;
arch_spin_unlock(&stack_trace_max_lock);
__this_cpu_dec(disable_stack_tracer);
local_irq_restore(flags);
return count;
}
static const struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
.llseek = default_llseek,
};
static void *
__next(struct seq_file *m, loff_t *pos)
{
long n = *pos - 1;
if (n >= stack_trace_nr_entries)
return NULL;
m->private = (void *)n;
return &m->private;
}
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return __next(m, pos);
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
local_irq_disable();
__this_cpu_inc(disable_stack_tracer);
arch_spin_lock(&stack_trace_max_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
return __next(m, pos);
}
static void t_stop(struct seq_file *m, void *p)
{
arch_spin_unlock(&stack_trace_max_lock);
__this_cpu_dec(disable_stack_tracer);
local_irq_enable();
}
static void trace_lookup_stack(struct seq_file *m, long i)
{
unsigned long addr = stack_dump_trace[i];
seq_printf(m, "%pS\n", (void *)addr);
}
static void print_disabled(struct seq_file *m)
{
seq_puts(m, "#\n"
"# Stack tracer disabled\n"
"#\n"
"# To enable the stack tracer, either add 'stacktrace' to the\n"
"# kernel command line\n"
"# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
"#\n");
}
static int t_show(struct seq_file *m, void *v)
{
long i;
int size;
if (v == SEQ_START_TOKEN) {
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
stack_trace_nr_entries);
if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m);
return 0;
}
i = *(long *)v;
if (i >= stack_trace_nr_entries)
return 0;
if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
trace_lookup_stack(m, i);
return 0;
}
static const struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};
static int stack_trace_open(struct inode *inode, struct file *file)
{
int ret;
ret = security_locked_down(LOCKDOWN_TRACEFS);
if (ret)
return ret;
return seq_open(file, &stack_trace_seq_ops);
}
static const struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_DYNAMIC_FTRACE
static int
stack_trace_filter_open(struct inode *inode, struct file *file)
{
struct ftrace_ops *ops = inode->i_private;
/* Checks for tracefs lockdown */
return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
inode, file);
}
static const struct file_operations stack_trace_filter_fops = {
.open = stack_trace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
.llseek = tracing_lseek,
.release = ftrace_regex_release,
};
#endif /* CONFIG_DYNAMIC_FTRACE */
int
stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int was_enabled;
int ret;
mutex_lock(&stack_sysctl_mutex);
was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (was_enabled == !!stack_tracer_enabled))
goto out;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
out:
mutex_unlock(&stack_sysctl_mutex);
return ret;
}
static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
static __init int enable_stacktrace(char *str)
{
int len;
if ((len = str_has_prefix(str, "_filter=")))
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
stack_tracer_enabled = 1;
return 1;
}
__setup("stacktrace", enable_stacktrace);
static __init int stack_trace_init(void)
{
int ret;
ret = tracing_init_dentry();
if (ret)
return 0;
trace_create_file("stack_max_size", TRACE_MODE_WRITE, NULL,
&stack_trace_max_size, &stack_max_size_fops);
trace_create_file("stack_trace", TRACE_MODE_READ, NULL,
NULL, &stack_trace_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("stack_trace_filter", TRACE_MODE_WRITE, NULL,
&trace_ops, &stack_trace_filter_fops);
#endif
if (stack_trace_filter_buf[0])
ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
return 0;
}
device_initcall(stack_trace_init);
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Ethernet-type device handling.
*
* Version: @(#)eth.c 1.0.7 05/25/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <[email protected]>
* Mark Evans, <[email protected]>
* Florian La Roche, <[email protected]>
* Alan Cox, <[email protected]>
*
* Fixes:
* Mr Linux : Arp problems
* Alan Cox : Generic queue tidyup (very tiny here)
* Alan Cox : eth_header ntohs should be htons
* Alan Cox : eth_rebuild_header missing an htons and
* minor other things.
* Tegge : Arp bug fixes.
* Florian : Removed many unnecessary functions, code cleanup
* and changes for new arp and skbuff.
* Alan Cox : Redid header building to reflect new format.
* Alan Cox : ARP only when compiled with CONFIG_INET
* Greg Page : 802.2 and SNAP stuff.
* Alan Cox : MAC layer pointers/new format.
* Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
* Alan Cox : Protect against forwarding explosions with
* older network drivers and IFF_ALLMULTI.
* Christer Weinigel : Better rebuild header message.
* Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup().
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/nvmem-consumer.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/property.h>
#include <net/dst.h>
#include <net/arp.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/dsa.h>
#include <net/flow_dissector.h>
#include <net/gro.h>
#include <linux/uaccess.h>
#include <net/pkt_sched.h>
/**
* eth_header - create the Ethernet header
* @skb: buffer to alter
* @dev: source device
* @type: Ethernet type field
* @daddr: destination address (NULL leave destination address)
* @saddr: source address (NULL use device source address)
* @len: packet length (<= skb->len)
*
*
* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
* in here instead.
*/
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct ethhdr *eth = skb_push(skb, ETH_HLEN);
if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
/*
* Set the source hardware address.
*/
if (!saddr)
saddr = dev->dev_addr;
memcpy(eth->h_source, saddr, ETH_ALEN);
if (daddr) {
memcpy(eth->h_dest, daddr, ETH_ALEN);
return ETH_HLEN;
}
/*
* Anyway, the loopback-device should never use this function...
*/
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
eth_zero_addr(eth->h_dest);
return ETH_HLEN;
}
return -ETH_HLEN;
}
EXPORT_SYMBOL(eth_header);
/**
* eth_get_headlen - determine the length of header for an ethernet frame
* @dev: pointer to network device
* @data: pointer to start of frame
* @len: total length of frame
*
* Make a best effort attempt to pull the length for all of the headers for
* a given frame in a linear buffer.
*/
u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len)
{
const unsigned int flags = FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
const struct ethhdr *eth = (const struct ethhdr *)data;
struct flow_keys_basic keys;
/* this should never happen, but better safe than sorry */
if (unlikely(len < sizeof(*eth)))
return len;
/* parse any remaining L2/L3 headers, check for L4 */
if (!skb_flow_dissect_flow_keys_basic(dev_net(dev), NULL, &keys, data,
eth->h_proto, sizeof(*eth),
len, flags))
return max_t(u32, keys.control.thoff, sizeof(*eth));
/* parse for any L4 headers */
return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
}
EXPORT_SYMBOL(eth_get_headlen);
/**
* eth_type_trans - determine the packet's protocol ID.
* @skb: received socket data
* @dev: receiving network device
*
* The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*/
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
unsigned short _service_access_point;
const unsigned short *sap;
const struct ethhdr *eth;
skb->dev = dev;
skb_reset_mac_header(skb);
eth = eth_skb_pull_mac(skb);
eth_skb_pkt_type(skb, dev);
/*
* Some variants of DSA tagging don't have an ethertype field
* at all, so we check here whether one of those tagging
* variants has been configured on the receiving interface,
* and if so, set skb->protocol without looking at the packet.
*/
if (unlikely(netdev_uses_dsa(dev)))
return htons(ETH_P_XDSA);
if (likely(eth_proto_is_802_3(eth->h_proto)))
return eth->h_proto;
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
sap = skb_header_pointer(skb, 0, sizeof(*sap), &_service_access_point);
if (sap && *sap == 0xFFFF)
return htons(ETH_P_802_3);
/*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
}
EXPORT_SYMBOL(eth_type_trans);
/**
* eth_header_parse - extract hardware address from packet
* @skb: packet to extract header from
* @haddr: destination buffer
*/
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
const struct ethhdr *eth = eth_hdr(skb);
memcpy(haddr, eth->h_source, ETH_ALEN);
return ETH_ALEN;
}
EXPORT_SYMBOL(eth_header_parse);
/**
* eth_header_cache - fill cache entry from neighbour
* @neigh: source neighbour
* @hh: destination cache entry
* @type: Ethernet type field
*
* Create an Ethernet header template from the neighbour.
*/
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
{
struct ethhdr *eth;
const struct net_device *dev = neigh->dev;
eth = (struct ethhdr *)
(((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
if (type == htons(ETH_P_802_3))
return -1;
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
/* Pairs with READ_ONCE() in neigh_resolve_output(),
* neigh_hh_output() and neigh_update_hhs().
*/
smp_store_release(&hh->hh_len, ETH_HLEN);
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
/**
* eth_header_cache_update - update cache entry
* @hh: destination cache entry
* @dev: network device
* @haddr: new hardware address
*
* Called by Address Resolution module to notify changes in address.
*/
void eth_header_cache_update(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr)
{
memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
haddr, ETH_ALEN);
}
EXPORT_SYMBOL(eth_header_cache_update);
/**
* eth_header_parse_protocol - extract protocol from L2 header
* @skb: packet to extract protocol from
*/
__be16 eth_header_parse_protocol(const struct sk_buff *skb)
{
const struct ethhdr *eth = eth_hdr(skb);
return eth->h_proto;
}
EXPORT_SYMBOL(eth_header_parse_protocol);
/**
* eth_prepare_mac_addr_change - prepare for mac change
* @dev: network device
* @p: socket address
*/
int eth_prepare_mac_addr_change(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
return 0;
}
EXPORT_SYMBOL(eth_prepare_mac_addr_change);
/**
* eth_commit_mac_addr_change - commit mac change
* @dev: network device
* @p: socket address
*/
void eth_commit_mac_addr_change(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
eth_hw_addr_set(dev, addr->sa_data);
}
EXPORT_SYMBOL(eth_commit_mac_addr_change);
/**
* eth_mac_addr - set new Ethernet hardware address
* @dev: network device
* @p: socket address
*
* Change hardware address of device.
*
* This doesn't change hardware matching, so needs to be overridden
* for most real devices.
*/
int eth_mac_addr(struct net_device *dev, void *p)
{
int ret;
ret = eth_prepare_mac_addr_change(dev, p);
if (ret < 0)
return ret;
eth_commit_mac_addr_change(dev, p);
return 0;
}
EXPORT_SYMBOL(eth_mac_addr);
int eth_validate_addr(struct net_device *dev)
{
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
return 0;
}
EXPORT_SYMBOL(eth_validate_addr);
const struct header_ops eth_header_ops ____cacheline_aligned = {
.create = eth_header,
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
.parse_protocol = eth_header_parse_protocol,
};
/**
* ether_setup - setup Ethernet network device
* @dev: network device
*
* Fill in the fields of the device structure with Ethernet-generic values.
*/
void ether_setup(struct net_device *dev)
{
dev->header_ops = ð_header_ops;
dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN;
dev->min_header_len = ETH_HLEN;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_DATA_LEN;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
dev->priv_flags |= IFF_TX_SKB_SHARING;
eth_broadcast_addr(dev->broadcast);
}
EXPORT_SYMBOL(ether_setup);
/**
* alloc_etherdev_mqs - Allocates and sets up an Ethernet device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this Ethernet device
* @txqs: The number of TX queues this device has.
* @rxqs: The number of RX queues this device has.
*
* Fill in the fields of the device structure with Ethernet-generic
* values. Basically does everything except registering the device.
*
* Constructs a new net device, complete with a private data area of
* size (sizeof_priv). A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs)
{
return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_ENUM,
ether_setup, txqs, rxqs);
}
EXPORT_SYMBOL(alloc_etherdev_mqs);
ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
{
return sysfs_emit(buf, "%*phC\n", len, addr);
}
EXPORT_SYMBOL(sysfs_format_mac);
struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
{
const struct packet_offload *ptype;
unsigned int hlen, off_eth;
struct sk_buff *pp = NULL;
struct ethhdr *eh, *eh2;
struct sk_buff *p;
__be16 type;
int flush = 1;
off_eth = skb_gro_offset(skb);
hlen = off_eth + sizeof(*eh);
eh = skb_gro_header(skb, hlen, off_eth);
if (unlikely(!eh))
goto out;
flush = 0;
list_for_each_entry(p, head, list) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
eh2 = (struct ethhdr *)(p->data + off_eth);
if (compare_ether_header(eh, eh2)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
type = eh->h_proto;
ptype = gro_find_receive_by_type(type);
if (ptype == NULL) {
flush = 1;
goto out;
}
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
ipv6_gro_receive, inet_gro_receive,
head, skb);
out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
EXPORT_SYMBOL(eth_gro_receive);
int eth_gro_complete(struct sk_buff *skb, int nhoff)
{
struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
__be16 type = eh->h_proto;
struct packet_offload *ptype;
int err = -ENOSYS;
if (skb->encapsulation)
skb_set_inner_mac_header(skb, nhoff);
ptype = gro_find_complete_by_type(type);
if (ptype != NULL)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*eh));
return err;
}
EXPORT_SYMBOL(eth_gro_complete);
static struct packet_offload eth_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_TEB),
.priority = 10,
.callbacks = {
.gro_receive = eth_gro_receive,
.gro_complete = eth_gro_complete,
},
};
static int __init eth_offload_init(void)
{
dev_add_offload(ð_packet_offload);
return 0;
}
fs_initcall(eth_offload_init);
unsigned char * __weak arch_get_platform_mac_address(void)
{
return NULL;
}
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
{
unsigned char *addr;
int ret;
ret = of_get_mac_address(dev->of_node, mac_addr);
if (!ret)
return 0;
addr = arch_get_platform_mac_address();
if (!addr)
return -ENODEV;
ether_addr_copy(mac_addr, addr);
return 0;
}
EXPORT_SYMBOL(eth_platform_get_mac_address);
/**
* platform_get_ethdev_address - Set netdev's MAC address from a given device
* @dev: Pointer to the device
* @netdev: Pointer to netdev to write the address to
*
* Wrapper around eth_platform_get_mac_address() which writes the address
* directly to netdev->dev_addr.
*/
int platform_get_ethdev_address(struct device *dev, struct net_device *netdev)
{
u8 addr[ETH_ALEN] __aligned(2);
int ret;
ret = eth_platform_get_mac_address(dev, addr);
if (!ret)
eth_hw_addr_set(netdev, addr);
return ret;
}
EXPORT_SYMBOL(platform_get_ethdev_address);
/**
* nvmem_get_mac_address - Obtain the MAC address from an nvmem cell named
* 'mac-address' associated with given device.
*
* @dev: Device with which the mac-address cell is associated.
* @addrbuf: Buffer to which the MAC address will be copied on success.
*
* Returns 0 on success or a negative error number on failure.
*/
int nvmem_get_mac_address(struct device *dev, void *addrbuf)
{
struct nvmem_cell *cell;
const void *mac;
size_t len;
cell = nvmem_cell_get(dev, "mac-address");
if (IS_ERR(cell))
return PTR_ERR(cell);
mac = nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(mac))
return PTR_ERR(mac);
if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
kfree(mac);
return -EINVAL;
}
ether_addr_copy(addrbuf, mac);
kfree(mac);
return 0;
}
static int fwnode_get_mac_addr(struct fwnode_handle *fwnode,
const char *name, char *addr)
{
int ret;
ret = fwnode_property_read_u8_array(fwnode, name, addr, ETH_ALEN);
if (ret)
return ret;
if (!is_valid_ether_addr(addr))
return -EINVAL;
return 0;
}
/**
* fwnode_get_mac_address - Get the MAC from the firmware node
* @fwnode: Pointer to the firmware node
* @addr: Address of buffer to store the MAC in
*
* Search the firmware node for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
* address. If that isn't set, then 'local-mac-address' is checked next,
* because that is the default address. If that isn't set, then the obsolete
* 'address' is checked, just in case we're using an old device tree.
*
* Note that the 'address' property is supposed to contain a virtual address of
* the register set, but some DTS files have redefined that property to be the
* MAC address.
*
* All-zero MAC addresses are rejected, because those could be properties that
* exist in the firmware tables, but were not updated by the firmware. For
* example, the DTS could define 'mac-address' and 'local-mac-address', with
* zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
* In this case, the real MAC is in 'local-mac-address', and 'mac-address'
* exists but is all zeros.
*/
int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr)
{
if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) ||
!fwnode_get_mac_addr(fwnode, "local-mac-address", addr) ||
!fwnode_get_mac_addr(fwnode, "address", addr))
return 0;
return -ENOENT;
}
EXPORT_SYMBOL(fwnode_get_mac_address);
/**
* device_get_mac_address - Get the MAC for a given device
* @dev: Pointer to the device
* @addr: Address of buffer to store the MAC in
*/
int device_get_mac_address(struct device *dev, char *addr)
{
return fwnode_get_mac_address(dev_fwnode(dev), addr);
}
EXPORT_SYMBOL(device_get_mac_address);
/**
* device_get_ethdev_address - Set netdev's MAC address from a given device
* @dev: Pointer to the device
* @netdev: Pointer to netdev to write the address to
*
* Wrapper around device_get_mac_address() which writes the address
* directly to netdev->dev_addr.
*/
int device_get_ethdev_address(struct device *dev, struct net_device *netdev)
{
u8 addr[ETH_ALEN];
int ret;
ret = device_get_mac_address(dev, addr);
if (!ret)
eth_hw_addr_set(netdev, addr);
return ret;
}
EXPORT_SYMBOL(device_get_ethdev_address);
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/acpi/device_pm.c - ACPI device power management routines.
*
* Copyright (C) 2012, Intel Corp.
* Author: Rafael J. Wysocki <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include "fan.h"
#include "internal.h"
/**
* acpi_power_state_string - String representation of ACPI device power state.
* @state: ACPI device power state to return the string representation of.
*/
const char *acpi_power_state_string(int state)
{
switch (state) {
case ACPI_STATE_D0:
return "D0";
case ACPI_STATE_D1:
return "D1";
case ACPI_STATE_D2:
return "D2";
case ACPI_STATE_D3_HOT:
return "D3hot";
case ACPI_STATE_D3_COLD:
return "D3cold";
default:
return "(unknown)";
}
}
static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
{
unsigned long long psc;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "_PSC", NULL, &psc);
if (ACPI_FAILURE(status))
return -ENODEV;
*state = psc;
return 0;
}
/**
* acpi_device_get_power - Get power state of an ACPI device.
* @device: Device to get the power state of.
* @state: Place to store the power state of the device.
*
* This function does not update the device's power.state field, but it may
* update its parent's power.state field (when the parent's power state is
* unknown and the device's power state turns out to be D0).
*
* Also, it does not update power resource reference counters to ensure that
* the power state returned by it will be persistent and it may return a power
* state shallower than previously set by acpi_device_set_power() for @device
* (if that power state depends on any power resources).
*/
int acpi_device_get_power(struct acpi_device *device, int *state)
{
int result = ACPI_STATE_UNKNOWN;
struct acpi_device *parent;
int error;
if (!device || !state)
return -EINVAL;
parent = acpi_dev_parent(device);
if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = parent ? parent->power.state : ACPI_STATE_D0;
goto out;
}
/*
* Get the device's power state from power resources settings and _PSC,
* if available.
*/
if (device->power.flags.power_resources) {
error = acpi_power_get_inferred_state(device, &result);
if (error)
return error;
}
if (device->power.flags.explicit_get) {
int psc;
error = acpi_dev_pm_explicit_get(device, &psc);
if (error)
return error;
/*
* The power resources settings may indicate a power state
* shallower than the actual power state of the device, because
* the same power resources may be referenced by other devices.
*
* For systems predating ACPI 4.0 we assume that D3hot is the
* deepest state that can be supported.
*/
if (psc > result && psc < ACPI_STATE_D3_COLD)
result = psc;
else if (result == ACPI_STATE_UNKNOWN)
result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_HOT : psc;
}
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
* to be in D0 too, except if ignore_parent is set.
*/
if (!device->power.flags.ignore_parent && parent &&
parent->power.state == ACPI_STATE_UNKNOWN &&
result == ACPI_STATE_D0)
parent->power.state = ACPI_STATE_D0;
*state = result;
out:
acpi_handle_debug(device->handle, "Power state: %s\n",
acpi_power_state_string(*state));
return 0;
}
static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
{
if (adev->power.states[state].flags.explicit_set) {
char method[5] = { '_', 'P', 'S', '0' + state, '\0' };
acpi_status status;
status = acpi_evaluate_object(adev->handle, method, NULL, NULL);
if (ACPI_FAILURE(status))
return -ENODEV;
}
return 0;
}
/**
* acpi_device_set_power - Set power state of an ACPI device.
* @device: Device to set the power state of.
* @state: New power state to set.
*
* Callers must ensure that the device is power manageable before using this
* function.
*/
int acpi_device_set_power(struct acpi_device *device, int state)
{
int target_state = state;
int result = 0;
if (!device || !device->flags.power_manageable
|| (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
acpi_handle_debug(device->handle, "Power state change: %s -> %s\n",
acpi_power_state_string(device->power.state),
acpi_power_state_string(state));
/* Make sure this is a valid target state */
/* There is a special case for D0 addressed below. */
if (state > ACPI_STATE_D0 && state == device->power.state)
goto no_change;
if (state == ACPI_STATE_D3_COLD) {
/*
* For transitions to D3cold we need to execute _PS3 and then
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
/* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
acpi_handle_debug(device->handle, "Power state %s not supported\n",
acpi_power_state_string(state));
return -ENODEV;
}
if (!device->power.flags.ignore_parent) {
struct acpi_device *parent;
parent = acpi_dev_parent(device);
if (parent && state < parent->power.state) {
acpi_handle_debug(device->handle,
"Cannot transition to %s for parent in %s\n",
acpi_power_state_string(state),
acpi_power_state_string(parent->power.state));
return -ENODEV;
}
}
/*
* Transition Power
* ----------------
* In accordance with ACPI 6, _PSx is executed before manipulating power
* resources, unless the target state is D0, in which case _PS0 is
* supposed to be executed after turning the power resources on.
*/
if (state > ACPI_STATE_D0) {
/*
* According to ACPI 6, devices cannot go from lower-power
* (deeper) states to higher-power (shallower) states.
*/
if (state < device->power.state) {
acpi_handle_debug(device->handle,
"Cannot transition from %s to %s\n",
acpi_power_state_string(device->power.state),
acpi_power_state_string(state));
return -ENODEV;
}
/*
* If the device goes from D3hot to D3cold, _PS3 has been
* evaluated for it already, so skip it in that case.
*/
if (device->power.state < ACPI_STATE_D3_HOT) {
result = acpi_dev_pm_explicit_set(device, state);
if (result)
goto end;
}
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
int cur_state = device->power.state;
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
if (cur_state == ACPI_STATE_D0) {
int psc;
/* Nothing to do here if _PSC is not present. */
if (!device->power.flags.explicit_get)
goto no_change;
/*
* The power state of the device was set to D0 last
* time, but that might have happened before a
* system-wide transition involving the platform
* firmware, so it may be necessary to evaluate _PS0
* for the device here. However, use extra care here
* and evaluate _PSC to check the device's current power
* state, and only invoke _PS0 if the evaluation of _PSC
* is successful and it returns a power state different
* from D0.
*/
result = acpi_dev_pm_explicit_get(device, &psc);
if (result || psc == ACPI_STATE_D0)
goto no_change;
}
result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
}
end:
if (result) {
acpi_handle_debug(device->handle,
"Failed to change power state to %s\n",
acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
acpi_handle_debug(device->handle, "Power state changed to %s\n",
acpi_power_state_string(target_state));
}
return result;
no_change:
acpi_handle_debug(device->handle, "Already in %s\n",
acpi_power_state_string(state));
return 0;
}
EXPORT_SYMBOL(acpi_device_set_power);
int acpi_bus_set_power(acpi_handle handle, int state)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (device)
return acpi_device_set_power(device, state);
return -ENODEV;
}
EXPORT_SYMBOL(acpi_bus_set_power);
int acpi_bus_init_power(struct acpi_device *device)
{
int state;
int result;
if (!device)
return -EINVAL;
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device)) {
device->flags.initialized = false;
return -ENXIO;
}
result = acpi_device_get_power(device, &state);
if (result)
return result;
if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
/* Reference count the power resources. */
result = acpi_power_on_resources(device, state);
if (result)
return result;
if (state == ACPI_STATE_D0) {
/*
* If _PSC is not present and the state inferred from
* power resources appears to be D0, it still may be
* necessary to execute _PS0 at this point, because
* another device using the same power resources may
* have been put into D0 previously and that's why we
* see D0 here.
*/
result = acpi_dev_pm_explicit_set(device, state);
if (result)
return result;
}
} else if (state == ACPI_STATE_UNKNOWN) {
/*
* No power resources and missing _PSC? Cross fingers and make
* it D0 in hope that this is what the BIOS put the device into.
* [We tried to force D0 here by executing _PS0, but that broke
* Toshiba P870-303 in a nasty way.]
*/
state = ACPI_STATE_D0;
}
device->power.state = state;
return 0;
}
/**
* acpi_device_fix_up_power - Force device with missing _PSC into D0.
* @device: Device object whose power state is to be fixed up.
*
* Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
* are assumed to be put into D0 by the BIOS. However, in some cases that may
* not be the case and this function should be used then.
*/
int acpi_device_fix_up_power(struct acpi_device *device)
{
int ret = 0;
if (!device->power.flags.power_resources
&& !device->power.flags.explicit_get
&& device->power.state == ACPI_STATE_D0)
ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
return ret;
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
static int fix_up_power_if_applicable(struct acpi_device *adev, void *not_used)
{
if (adev->status.present && adev->status.enabled)
acpi_device_fix_up_power(adev);
return 0;
}
/**
* acpi_device_fix_up_power_extended - Force device and its children into D0.
* @adev: Parent device object whose power state is to be fixed up.
*
* Call acpi_device_fix_up_power() for @adev and its children so long as they
* are reported as present and enabled.
*/
void acpi_device_fix_up_power_extended(struct acpi_device *adev)
{
acpi_device_fix_up_power(adev);
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
/**
* acpi_device_fix_up_power_children - Force a device's children into D0.
* @adev: Parent device object whose children's power state is to be fixed up.
*
* Call acpi_device_fix_up_power() for @adev's children so long as they
* are reported as present and enabled.
*/
void acpi_device_fix_up_power_children(struct acpi_device *adev)
{
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;
int result;
if (device->power.state == ACPI_STATE_UNKNOWN) {
result = acpi_bus_init_power(device);
if (!result && state_p)
*state_p = device->power.state;
return result;
}
result = acpi_device_get_power(device, &state);
if (result)
return result;
if (state == ACPI_STATE_UNKNOWN) {
state = ACPI_STATE_D0;
result = acpi_device_set_power(device, state);
if (result)
return result;
} else {
if (device->power.flags.power_resources) {
/*
* We don't need to really switch the state, bu we need
* to update the power resources' reference counters.
*/
result = acpi_power_transition(device, state);
if (result)
return result;
}
device->power.state = state;
}
if (state_p)
*state_p = state;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_device_update_power);
int acpi_bus_update_power(acpi_handle handle, int *state_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
if (device)
return acpi_device_update_power(device, state_p);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_bus_update_power);
bool acpi_bus_power_manageable(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
return device && device->flags.power_manageable;
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
static int acpi_power_up_if_adr_present(struct acpi_device *adev, void *not_used)
{
if (!(adev->flags.power_manageable && adev->pnp.type.bus_address))
return 0;
acpi_handle_debug(adev->handle, "Power state: %s\n",
acpi_power_state_string(adev->power.state));
if (adev->power.state == ACPI_STATE_D3_COLD)
return acpi_device_set_power(adev, ACPI_STATE_D0);
return 0;
}
/**
* acpi_dev_power_up_children_with_adr - Power up childres with valid _ADR
* @adev: Parent ACPI device object.
*
* Change the power states of the direct children of @adev that are in D3cold
* and hold valid _ADR objects to D0 in order to allow bus (e.g. PCI)
* enumeration code to access them.
*/
void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
{
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
/**
* acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
* @adev: ACPI companion of the target device.
*
* Evaluate _S0W for @adev and return the value produced by it or return
* ACPI_STATE_UNKNOWN on errors (including _S0W not present).
*/
u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
{
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
if (ACPI_FAILURE(status))
return ACPI_STATE_UNKNOWN;
return state;
}
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev)
{
pm_wakeup_dev_event(dev, 0, acpi_s2idle_wakeup());
}
EXPORT_SYMBOL_GPL(acpi_pm_wakeup_event);
static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
{
struct acpi_device *adev;
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
acpi_handle_debug(handle, "Wake notify\n");
adev = acpi_get_acpi_dev(handle);
if (!adev)
return;
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
if (adev->wakeup.context.func) {
acpi_handle_debug(handle, "Running %pS for %s\n",
adev->wakeup.context.func,
dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
}
}
mutex_unlock(&acpi_pm_notifier_lock);
acpi_put_acpi_dev(adev);
}
/**
* acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
* @adev: ACPI device to add the notify handler for.
* @dev: Device to generate a wakeup event for while handling the notification.
* @func: Work function to execute when handling the notification.
*
* NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
* PM wakeup events. For example, wakeup events may be generated for bridges
* if one of the devices below the bridge is signaling wakeup, even if the
* bridge itself doesn't have a wakeup GPE associated with it.
*/
acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
void (*func)(struct acpi_device_wakeup_context *context))
{
acpi_status status = AE_ALREADY_EXISTS;
if (!dev && !func)
return AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.ws = wakeup_source_register(&adev->dev,
dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
/**
* acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
* @adev: ACPI device to remove the notifier from.
*/
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present)
goto out;
status = acpi_remove_notify_handler(adev->handle,
ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler);
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws);
adev->wakeup.flags.notifier_present = false;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
bool acpi_bus_can_wakeup(acpi_handle handle)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
return device && device->wakeup.flags.valid;
}
EXPORT_SYMBOL(acpi_bus_can_wakeup);
bool acpi_pm_device_can_wakeup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
return adev ? acpi_device_can_wakeup(adev) : false;
}
/**
* acpi_dev_pm_get_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @adev: ACPI device node corresponding to @dev.
* @target_state: System state to match the resultant device state.
* @d_min_p: Location to store the highest power state available to the device.
* @d_max_p: Location to store the lowest power state available to the device.
*
* Find the lowest power (highest number) and highest power (lowest number) ACPI
* device power states that the device can be in while the system is in the
* state represented by @target_state. Store the integer numbers representing
* those stats in the memory locations pointed to by @d_max_p and @d_min_p,
* respectively.
*
* Callers must ensure that @dev and @adev are valid pointers and that @adev
* actually corresponds to @dev before using this function.
*
* Returns 0 on success or -ENODATA when one of the ACPI methods fails or
* returns a value that doesn't make sense. The memory locations pointed to by
* @d_max_p and @d_min_p are only modified on success.
*/
static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
u32 target_state, int *d_min_p, int *d_max_p)
{
char method[] = { '_', 'S', '0' + target_state, 'D', '\0' };
acpi_handle handle = adev->handle;
unsigned long long ret;
int d_min, d_max;
bool wakeup = false;
bool has_sxd = false;
acpi_status status;
/*
* If the system state is S0, the lowest power state the device can be
* in is D3cold, unless the device has _S0W and is supposed to signal
* wakeup, in which case the return value of _S0W has to be used as the
* lowest power state available to the device.
*/
d_min = ACPI_STATE_D0;
d_max = ACPI_STATE_D3_COLD;
/*
* If present, _SxD methods return the minimum D-state (highest power
* state) we can use for the corresponding S-states. Otherwise, the
* minimum D-state is D0 (ACPI 3.x).
*/
if (target_state > ACPI_STATE_S0) {
/*
* We rely on acpi_evaluate_integer() not clobbering the integer
* provided if AE_NOT_FOUND is returned.
*/
ret = d_min;
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if ((ACPI_FAILURE(status) && status != AE_NOT_FOUND)
|| ret > ACPI_STATE_D3_COLD)
return -ENODATA;
/*
* We need to handle legacy systems where D3hot and D3cold are
* the same and 3 is returned in both cases, so fall back to
* D3cold if D3hot is not a valid state.
*/
if (!adev->power.states[ret].flags.valid) {
if (ret == ACPI_STATE_D3_HOT)
ret = ACPI_STATE_D3_COLD;
else
return -ENODATA;
}
if (status == AE_OK)
has_sxd = true;
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
} else if (device_may_wakeup(dev) && dev->power.wakeirq) {
/*
* The ACPI subsystem doesn't manage the wake bit for IRQs
* defined with ExclusiveAndWake and SharedAndWake. Instead we
* expect them to be managed via the PM subsystem. Drivers
* should call dev_pm_set_wake_irq to register an IRQ as a wake
* source.
*
* If a device has a wake IRQ attached we need to check the
* _S0W method to get the correct wake D-state. Otherwise we
* end up putting the device into D3Cold which will more than
* likely disable wake functionality.
*/
wakeup = true;
} else {
/* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid;
}
/*
* If _PRW says we can wake up the system from the target sleep state,
* the D-state returned by _SxD is sufficient for that (we assume a
* wakeup-aware driver if wake is set). Still, if _SxW exists
* (ACPI 3.x), it should return the maximum (lowest power) D-state that
* can wake the system. _S0W may be valid, too.
*/
if (wakeup) {
method[3] = 'W';
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if (status == AE_NOT_FOUND) {
/* No _SxW. In this case, the ACPI spec says that we
* must not go into any power state deeper than the
* value returned from _SxD.
*/
if (has_sxd && target_state > ACPI_STATE_S0)
d_max = d_min;
} else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) {
/* Fall back to D3cold if ret is not a valid state. */
if (!adev->power.states[ret].flags.valid)
ret = ACPI_STATE_D3_COLD;
d_max = ret > d_min ? ret : d_min;
} else {
return -ENODATA;
}
}
if (d_min_p)
*d_min_p = d_min;
if (d_max_p)
*d_max_p = d_max;
return 0;
}
/**
* acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @d_min_p: Location to store the upper limit of the allowed states range.
* @d_max_in: Deepest low-power state to take into consideration.
* Return value: Preferred power state of the device on success, -ENODEV
* if there's no 'struct acpi_device' for @dev, -EINVAL if @d_max_in is
* incorrect, or -ENODATA on ACPI method failure.
*
* The caller must ensure that @dev is valid before using this function.
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
struct acpi_device *adev;
int ret, d_min, d_max;
if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD)
return -EINVAL;
if (d_max_in > ACPI_STATE_D2) {
enum pm_qos_flags_status stat;
stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat == PM_QOS_FLAGS_ALL)
d_max_in = ACPI_STATE_D2;
}
adev = ACPI_COMPANION(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
ret = acpi_dev_pm_get_state(dev, adev, acpi_target_system_state(),
&d_min, &d_max);
if (ret)
return ret;
if (d_max_in < d_min)
return -EINVAL;
if (d_max > d_max_in) {
for (d_max = d_max_in; d_max > d_min; d_max--) {
if (adev->power.states[d_max].flags.valid)
break;
}
}
if (d_min_p)
*d_min_p = d_min;
return d_max;
}
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
/**
* acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
* @context: Device wakeup context.
*/
static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
{
struct device *dev = context->dev;
if (dev) {
pm_wakeup_event(dev, 0);
pm_request_resume(dev);
}
}
static DEFINE_MUTEX(acpi_wakeup_lock);
static int __acpi_device_wakeup_enable(struct acpi_device *adev,
u32 target_state)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
acpi_status status;
int error = 0;
mutex_lock(&acpi_wakeup_lock);
/*
* If the device wakeup power is already enabled, disable it and enable
* it again in case it depends on the configuration of subordinate
* devices and the conditions have changed since it was enabled last
* time.
*/
if (wakeup->enable_count > 0)
acpi_disable_wakeup_device_power(adev);
error = acpi_enable_wakeup_device_power(adev, target_state);
if (error) {
if (wakeup->enable_count > 0) {
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
wakeup->enable_count = 0;
}
goto out;
}
if (wakeup->enable_count > 0)
goto inc;
status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
if (ACPI_FAILURE(status)) {
acpi_disable_wakeup_device_power(adev);
error = -EIO;
goto out;
}
acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
(unsigned int)wakeup->gpe_number);
inc:
if (wakeup->enable_count < INT_MAX)
wakeup->enable_count++;
else
acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n");
out:
mutex_unlock(&acpi_wakeup_lock);
return error;
}
/**
* acpi_device_wakeup_enable - Enable wakeup functionality for device.
* @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
*
* Enable the GPE associated with @adev so that it can generate wakeup signals
* for the device in response to external (remote) events and enable wakeup
* power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
{
return __acpi_device_wakeup_enable(adev, target_state);
}
/**
* acpi_device_wakeup_disable - Disable wakeup functionality for device.
* @adev: ACPI device to disable wakeup functionality for.
*
* Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
mutex_lock(&acpi_wakeup_lock);
if (!wakeup->enable_count)
goto out;
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
acpi_disable_wakeup_device_power(adev);
wakeup->enable_count--;
out:
mutex_unlock(&acpi_wakeup_lock);
}
/**
* acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
* @dev: Device to enable/disable to generate wakeup events.
* @enable: Whether to enable or disable the wakeup functionality.
*/
int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
struct acpi_device *adev;
int error;
adev = ACPI_COMPANION(dev);
if (!adev) {
dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
if (!enable) {
acpi_device_wakeup_disable(adev);
dev_dbg(dev, "Wakeup disabled by ACPI\n");
return 0;
}
error = __acpi_device_wakeup_enable(adev, acpi_target_system_state());
if (!error)
dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
* @dev: Device to put into a low-power state.
* @adev: ACPI device node corresponding to @dev.
* @system_state: System state to choose the device state for.
*/
static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev,
u32 system_state)
{
int ret, state;
if (!acpi_device_power_manageable(adev))
return 0;
ret = acpi_dev_pm_get_state(dev, adev, system_state, NULL, &state);
return ret ? ret : acpi_device_set_power(adev, state);
}
/**
* acpi_dev_pm_full_power - Put ACPI device into the full-power state.
* @adev: ACPI device node to put into the full-power state.
*/
static int acpi_dev_pm_full_power(struct acpi_device *adev)
{
return acpi_device_power_manageable(adev) ?
acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
}
/**
* acpi_dev_suspend - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
* @wakeup: Whether or not to enable wakeup for the device.
*
* Put the given device into a low-power state using the standard ACPI
* mechanism. Set up remote wakeup if desired, choose the state to put the
* device into (this checks if remote wakeup is expected to work too), and set
* the power state of the device.
*/
int acpi_dev_suspend(struct device *dev, bool wakeup)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u32 target_state = acpi_target_system_state();
int error;
if (!adev)
return 0;
if (wakeup && acpi_device_can_wakeup(adev)) {
error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return -EAGAIN;
} else {
wakeup = false;
}
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error && wakeup)
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_suspend);
/**
* acpi_dev_resume - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
* mechanism. Set the power state of the device to ACPI D0 and disable wakeup.
*/
int acpi_dev_resume(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
return 0;
error = acpi_dev_pm_full_power(adev);
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume);
/**
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
* @dev: Device to suspend.
*
* Carry out the generic runtime suspend procedure for @dev and use ACPI to put
* it into a runtime low-power state.
*/
int acpi_subsys_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
return ret ? ret : acpi_dev_suspend(dev, true);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
/**
* acpi_subsys_runtime_resume - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic runtime resume procedure for it.
*/
int acpi_subsys_runtime_resume(struct device *dev)
{
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
#ifdef CONFIG_PM_SLEEP
static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
{
u32 sys_target = acpi_target_system_state();
int ret, state;
if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
return true;
if (sys_target == ACPI_STATE_S0)
return false;
if (adev->power.flags.dsw_present)
return true;
ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
if (ret)
return true;
return state != adev->power.state;
}
/**
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
* @dev: Device to prepare.
*/
int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
if (ret < 0)
return ret;
if (!ret && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
return 0;
}
return !acpi_dev_needs_resume(dev, adev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
* acpi_subsys_complete - Finalize device's resume during system resume.
* @dev: Device to handle.
*/
void acpi_subsys_complete(struct device *dev)
{
pm_generic_complete(dev);
/*
* If the device had been runtime-suspended before the system went into
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
/**
* acpi_subsys_suspend - Run the device driver's suspend callback.
* @dev: Device to handle.
*
* Follow PCI and resume devices from runtime suspend before running their
* system suspend callbacks, unless the driver can cope with runtime-suspended
* devices during system suspend and there are no ACPI-specific reasons for
* resuming them.
*/
int acpi_subsys_suspend(struct device *dev)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
return pm_generic_suspend(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
/**
* acpi_subsys_suspend_late - Suspend device using ACPI.
* @dev: Device to suspend.
*
* Carry out the generic late suspend procedure for @dev and use ACPI to put
* it into a low-power state during system transition into a sleep state.
*/
int acpi_subsys_suspend_late(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
/**
* acpi_subsys_suspend_noirq - Run the device driver's "noirq" suspend callback.
* @dev: Device to suspend.
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_suspend_noirq(dev);
if (ret)
return ret;
/*
* If the target system sleep state is suspend-to-idle, it is sufficient
* to check whether or not the device's wakeup settings are good for
* runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
* acpi_subsys_complete() to take care of fixing up the device's state
* anyway, if need be.
*/
if (device_can_wakeup(dev) && !device_may_wakeup(dev))
dev->power.may_skip_resume = false;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
/**
* acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
* @dev: Device to handle.
*/
static int acpi_subsys_resume_noirq(struct device *dev)
{
if (dev_pm_skip_resume(dev))
return 0;
return pm_generic_resume_noirq(dev);
}
/**
* acpi_subsys_resume_early - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic early resume procedure for it during system transition into the
* working state, but only do that if device either defines early resume
* handler, or does not define power operations at all. Otherwise powering up
* of the device is postponed to the normal resume phase.
*/
static int acpi_subsys_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
if (dev_pm_skip_resume(dev))
return 0;
if (pm && !pm->resume_early) {
dev_dbg(dev, "postponing D0 transition to normal resume stage\n");
return 0;
}
ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
/**
* acpi_subsys_resume - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state if it has not been
* powered up during early resume phase, and carry out the generic resume
* procedure for it during system transition into the working state.
*/
static int acpi_subsys_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret = 0;
if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) {
dev_dbg(dev, "executing postponed D0 transition\n");
ret = acpi_dev_resume(dev);
}
return ret ? ret : pm_generic_resume(dev);
}
/**
* acpi_subsys_freeze - Run the device driver's freeze callback.
* @dev: Device to handle.
*/
int acpi_subsys_freeze(struct device *dev)
{
/*
* Resume all runtime-suspended devices before creating a snapshot
* image of system memory, because the restore kernel generally cannot
* be expected to always handle them consistently and they need to be
* put into the runtime-active metastate during system resume anyway,
* so it is better to ensure that the state saved in the image will be
* always consistent with that.
*/
pm_runtime_resume(dev);
return pm_generic_freeze(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
/**
* acpi_subsys_restore_early - Restore device using ACPI.
* @dev: Device to restore.
*/
int acpi_subsys_restore_early(struct device *dev)
{
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_restore_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
/**
* acpi_subsys_poweroff - Run the device driver's poweroff callback.
* @dev: Device to handle.
*
* Follow PCI and resume devices from runtime suspend before running their
* system poweroff callbacks, unless the driver can cope with runtime-suspended
* devices during system suspend and there are no ACPI-specific reasons for
* resuming them.
*/
int acpi_subsys_poweroff(struct device *dev)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
return pm_generic_poweroff(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_poweroff);
/**
* acpi_subsys_poweroff_late - Run the device driver's poweroff callback.
* @dev: Device to handle.
*
* Carry out the generic late poweroff procedure for @dev and use ACPI to put
* it into a low-power state during system transition into a sleep state.
*/
static int acpi_subsys_poweroff_late(struct device *dev)
{
int ret;
if (dev_pm_skip_suspend(dev))
return 0;
ret = pm_generic_poweroff_late(dev);
if (ret)
return ret;
return acpi_dev_suspend(dev, device_may_wakeup(dev));
}
/**
* acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback.
* @dev: Device to suspend.
*/
static int acpi_subsys_poweroff_noirq(struct device *dev)
{
if (dev_pm_skip_suspend(dev))
return 0;
return pm_generic_poweroff_noirq(dev);
}
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.resume = acpi_subsys_resume,
.suspend_late = acpi_subsys_suspend_late,
.suspend_noirq = acpi_subsys_suspend_noirq,
.resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_subsys_resume_early,
.freeze = acpi_subsys_freeze,
.poweroff = acpi_subsys_poweroff,
.poweroff_late = acpi_subsys_poweroff_late,
.poweroff_noirq = acpi_subsys_poweroff_noirq,
.restore_early = acpi_subsys_restore_early,
#endif
},
};
/**
* acpi_dev_pm_detach - Remove ACPI power management from the device.
* @dev: Device to take care of.
* @power_off: Whether or not to try to remove power from the device.
*
* Remove the device from the general ACPI PM domain and remove its wakeup
* notifier. If @power_off is set, additionally remove power from the device if
* possible.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*/
static void acpi_dev_pm_detach(struct device *dev, bool power_off)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev_pm_domain_set(dev, NULL);
acpi_remove_pm_notifier(adev);
if (power_off) {
/*
* If the device's PM QoS resume latency limit or flags
* have been exposed to user space, they have to be
* hidden at this point, so that they don't affect the
* choice of the low-power state to put the device into.
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
}
/**
* acpi_dev_pm_attach - Prepare device for ACPI power management.
* @dev: Device to prepare.
* @power_on: Whether or not to power on the device.
*
* If @dev has a valid ACPI handle that has a valid struct acpi_device object
* attached to it, install a wakeup notification handler for the device and
* add it to the general ACPI PM domain. If @power_on is set, the device will
* be put into the ACPI D0 state before the function returns.
*
* This assumes that the @dev's bus type uses generic power management callbacks
* (or doesn't use any power management callbacks at all).
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
/*
* Skip devices whose ACPI companions match the device IDs below,
* because they require special power management handling incompatible
* with the generic ACPI PM domain.
*/
static const struct acpi_device_id special_pm_ids[] = {
ACPI_FAN_DEVICE_IDS,
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev || !acpi_match_device_ids(adev, special_pm_ids))
return 0;
/*
* Only attach the power domain to the first device if the
* companion is shared by multiple. This is to prevent doing power
* management twice.
*/
if (!acpi_device_is_first_physical_node(adev, dev))
return 0;
acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
/**
* acpi_storage_d3 - Check if D3 should be used in the suspend path
* @dev: Device to check
*
* Return %true if the platform firmware wants @dev to be programmed
* into D3hot or D3cold (if supported) in the suspend path, or %false
* when there is no specific preference. On some platforms, if this
* hint is ignored, @dev may remain unresponsive after suspending the
* platform as a whole.
*
* Although the property has storage in the name it actually is
* applied to the PCIe slot and plugging in a non-storage device the
* same platform restrictions will likely apply.
*/
bool acpi_storage_d3(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u8 val;
if (force_storage_d3())
return true;
if (!adev)
return false;
if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
&val))
return false;
return val == 1;
}
EXPORT_SYMBOL_GPL(acpi_storage_d3);
/**
* acpi_dev_state_d0 - Tell if the device is in D0 power state
* @dev: Physical device the ACPI power state of which to check
*
* On a system without ACPI, return true. On a system with ACPI, return true if
* the current ACPI power state of the device is D0, or false otherwise.
*
* Note that the power state of a device is not well-defined after it has been
* passed to acpi_device_set_power() and before that function returns, so it is
* not valid to ask for the ACPI power state of the device in that time frame.
*
* This function is intended to be used in a driver's probe or remove
* function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for
* more information.
*/
bool acpi_dev_state_d0(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
if (!adev)
return true;
return adev->power.state == ACPI_STATE_D0;
}
EXPORT_SYMBOL_GPL(acpi_dev_state_d0);
#endif /* CONFIG_PM */
|
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTX CPT driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "otx_cptvf.h"
#include "otx_cptvf_algs.h"
/* Completion code size and initial value */
#define COMPLETION_CODE_SIZE 8
#define COMPLETION_CODE_INIT 0
/* SG list header size in bytes */
#define SG_LIST_HDR_SIZE 8
/* Default timeout when waiting for free pending entry in us */
#define CPT_PENTRY_TIMEOUT 1000
#define CPT_PENTRY_STEP 50
/* Default threshold for stopping and resuming sender requests */
#define CPT_IQ_STOP_MARGIN 128
#define CPT_IQ_RESUME_MARGIN 512
#define CPT_DMA_ALIGN 128
void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req)
{
int i;
pr_debug("Gather list size %d\n", req->incnt);
for (i = 0; i < req->incnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->in[i].size, req->in[i].vptr,
(void *) req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->in[i].vptr, req->in[i].size, false);
}
pr_debug("Scatter list size %d\n", req->outcnt);
for (i = 0; i < req->outcnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->out[i].size, req->out[i].vptr,
(void *) req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
}
}
static inline struct otx_cpt_pending_entry *get_free_pending_entry(
struct otx_cpt_pending_queue *q,
int qlen)
{
struct otx_cpt_pending_entry *ent = NULL;
ent = &q->head[q->rear];
if (unlikely(ent->busy))
return NULL;
q->rear++;
if (unlikely(q->rear == qlen))
q->rear = 0;
return ent;
}
static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
{
if (WARN_ON(inc > length))
inc = length;
index += inc;
if (unlikely(index >= length))
index -= length;
return index;
}
static inline void free_pentry(struct otx_cpt_pending_entry *pentry)
{
pentry->completion_addr = NULL;
pentry->info = NULL;
pentry->callback = NULL;
pentry->areq = NULL;
pentry->resume_sender = false;
pentry->busy = false;
}
static inline int setup_sgio_components(struct pci_dev *pdev,
struct otx_cpt_buf_ptr *list,
int buf_count, u8 *buffer)
{
struct otx_cpt_sglist_component *sg_ptr = NULL;
int ret = 0, i, j;
int components;
if (unlikely(!list)) {
dev_err(&pdev->dev, "Input list pointer is NULL\n");
return -EFAULT;
}
for (i = 0; i < buf_count; i++) {
if (likely(list[i].vptr)) {
list[i].dma_addr = dma_map_single(&pdev->dev,
list[i].vptr,
list[i].size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev,
list[i].dma_addr))) {
dev_err(&pdev->dev, "Dma mapping failed\n");
ret = -EIO;
goto sg_cleanup;
}
}
}
components = buf_count / 4;
sg_ptr = (struct otx_cpt_sglist_component *)buffer;
for (i = 0; i < components; i++) {
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
sg_ptr++;
}
components = buf_count % 4;
switch (components) {
case 3:
sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
fallthrough;
case 2:
sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
fallthrough;
case 1:
sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
break;
default:
break;
}
return ret;
sg_cleanup:
for (j = 0; j < i; j++) {
if (list[j].dma_addr) {
dma_unmap_single(&pdev->dev, list[i].dma_addr,
list[i].size, DMA_BIDIRECTIONAL);
}
list[j].dma_addr = 0;
}
return ret;
}
static inline int setup_sgio_list(struct pci_dev *pdev,
struct otx_cpt_info_buffer **pinfo,
struct otx_cpt_req_info *req, gfp_t gfp)
{
u32 dlen, align_dlen, info_len, rlen;
struct otx_cpt_info_buffer *info;
u16 g_sz_bytes, s_sz_bytes;
int align = CPT_DMA_ALIGN;
u32 total_mem_len;
if (unlikely(req->incnt > OTX_CPT_MAX_SG_IN_CNT ||
req->outcnt > OTX_CPT_MAX_SG_OUT_CNT)) {
dev_err(&pdev->dev, "Error too many sg components\n");
return -EINVAL;
}
g_sz_bytes = ((req->incnt + 3) / 4) *
sizeof(struct otx_cpt_sglist_component);
s_sz_bytes = ((req->outcnt + 3) / 4) *
sizeof(struct otx_cpt_sglist_component);
dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
align_dlen = ALIGN(dlen, align);
info_len = ALIGN(sizeof(*info), align);
rlen = ALIGN(sizeof(union otx_cpt_res_s), align);
total_mem_len = align_dlen + info_len + rlen + COMPLETION_CODE_SIZE;
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Memory allocation failed\n");
return -ENOMEM;
}
*pinfo = info;
info->dlen = dlen;
info->in_buffer = (u8 *)info + info_len;
((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
((u16 *)info->in_buffer)[2] = 0;
((u16 *)info->in_buffer)[3] = 0;
/* Setup gather (input) components */
if (setup_sgio_components(pdev, req->in, req->incnt,
&info->in_buffer[8])) {
dev_err(&pdev->dev, "Failed to setup gather list\n");
return -EFAULT;
}
if (setup_sgio_components(pdev, req->out, req->outcnt,
&info->in_buffer[8 + g_sz_bytes])) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
return -EFAULT;
}
info->dma_len = total_mem_len - info_len;
info->dptr_baddr = dma_map_single(&pdev->dev, (void *)info->in_buffer,
info->dma_len, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
return -EIO;
}
/*
* Get buffer for union otx_cpt_res_s response
* structure and its physical address
*/
info->completion_addr = (u64 *)(info->in_buffer + align_dlen);
info->comp_baddr = info->dptr_baddr + align_dlen;
/* Create and initialize RPTR */
info->out_buffer = (u8 *)info->completion_addr + rlen;
info->rptr_baddr = info->comp_baddr + rlen;
*((u64 *) info->out_buffer) = ~((u64) COMPLETION_CODE_INIT);
return 0;
}
static void cpt_fill_inst(union otx_cpt_inst_s *inst,
struct otx_cpt_info_buffer *info,
struct otx_cpt_iq_cmd *cmd)
{
inst->u[0] = 0x0;
inst->s.doneint = true;
inst->s.res_addr = (u64)info->comp_baddr;
inst->u[2] = 0x0;
inst->s.wq_ptr = 0;
inst->s.ei0 = cmd->cmd.u64;
inst->s.ei1 = cmd->dptr;
inst->s.ei2 = cmd->rptr;
inst->s.ei3 = cmd->cptr.u64;
}
/*
* On OcteonTX platform the parameter db_count is used as a count for ringing
* door bell. The valid values for db_count are:
* 0 - 1 CPT instruction will be enqueued however CPT will not be informed
* 1 - 1 CPT instruction will be enqueued and CPT will be informed
*/
static void cpt_send_cmd(union otx_cpt_inst_s *cptinst, struct otx_cptvf *cptvf)
{
struct otx_cpt_cmd_qinfo *qinfo = &cptvf->cqinfo;
struct otx_cpt_cmd_queue *queue;
struct otx_cpt_cmd_chunk *curr;
u8 *ent;
queue = &qinfo->queue[0];
/*
* cpt_send_cmd is currently called only from critical section
* therefore no locking is required for accessing instruction queue
*/
ent = &queue->qhead->head[queue->idx * OTX_CPT_INST_SIZE];
memcpy(ent, (void *) cptinst, OTX_CPT_INST_SIZE);
if (++queue->idx >= queue->qhead->size / 64) {
curr = queue->qhead;
if (list_is_last(&curr->nextchunk, &queue->chead))
queue->qhead = queue->base;
else
queue->qhead = list_next_entry(queue->qhead, nextchunk);
queue->idx = 0;
}
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
otx_cptvf_write_vq_doorbell(cptvf, 1);
}
static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
struct otx_cpt_pending_queue *pqueue,
struct otx_cptvf *cptvf)
{
struct otx_cptvf_request *cpt_req = &req->req;
struct otx_cpt_pending_entry *pentry = NULL;
union otx_cpt_ctrl_info *ctrl = &req->ctrl;
struct otx_cpt_info_buffer *info = NULL;
union otx_cpt_res_s *result = NULL;
struct otx_cpt_iq_cmd iq_cmd;
union otx_cpt_inst_s cptinst;
int retry, ret = 0;
u8 resume_sender;
gfp_t gfp;
gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
ret = setup_sgio_list(pdev, &info, req, gfp);
if (unlikely(ret)) {
dev_err(&pdev->dev, "Setting up SG list failed\n");
goto request_cleanup;
}
cpt_req->dlen = info->dlen;
result = (union otx_cpt_res_s *) info->completion_addr;
result->s.compcode = COMPLETION_CODE_INIT;
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
while (unlikely(!pentry) && retry--) {
spin_unlock_bh(&pqueue->lock);
udelay(CPT_PENTRY_STEP);
spin_lock_bh(&pqueue->lock);
pentry = get_free_pending_entry(pqueue, pqueue->qlen);
}
if (unlikely(!pentry)) {
ret = -ENOSPC;
spin_unlock_bh(&pqueue->lock);
goto request_cleanup;
}
/*
* Check if we are close to filling in entire pending queue,
* if so then tell the sender to stop/sleep by returning -EBUSY
* We do it only for context which can sleep (GFP_KERNEL)
*/
if (gfp == GFP_KERNEL &&
pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
pentry->resume_sender = true;
} else
pentry->resume_sender = false;
resume_sender = pentry->resume_sender;
pqueue->pending_count++;
pentry->completion_addr = info->completion_addr;
pentry->info = info;
pentry->callback = req->callback;
pentry->areq = req->areq;
pentry->busy = true;
info->pentry = pentry;
info->time_in = jiffies;
info->req = req;
/* Fill in the command */
iq_cmd.cmd.u64 = 0;
iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
iq_cmd.dptr = info->dptr_baddr;
iq_cmd.rptr = info->rptr_baddr;
iq_cmd.cptr.u64 = 0;
iq_cmd.cptr.s.grp = ctrl->s.grp;
/* Fill in the CPT_INST_S type command for HW interpretation */
cpt_fill_inst(&cptinst, info, &iq_cmd);
/* Print debug info if enabled */
otx_cpt_dump_sg_list(pdev, req);
pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX_CPT_INST_SIZE);
print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX_CPT_INST_SIZE, false);
pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
cpt_req->dlen, false);
/* Send CPT command */
cpt_send_cmd(&cptinst, cptvf);
/*
* We allocate and prepare pending queue entry in critical section
* together with submitting CPT instruction to CPT instruction queue
* to make sure that order of CPT requests is the same in both
* pending and instruction queues
*/
spin_unlock_bh(&pqueue->lock);
ret = resume_sender ? -EBUSY : -EINPROGRESS;
return ret;
request_cleanup:
do_request_cleanup(pdev, info);
return ret;
}
int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
int cpu_num)
{
struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
if (!otx_cpt_device_ready(cptvf)) {
dev_err(&pdev->dev, "CPT Device is not ready\n");
return -ENODEV;
}
if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n",
cptvf->vfid);
return -EINVAL;
} else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
(req->ctrl.s.se_req)) {
dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n",
cptvf->vfid);
return -EINVAL;
}
return process_request(pdev, req, &cptvf->pqinfo.queue[0], cptvf);
}
static int cpt_process_ccode(struct pci_dev *pdev,
union otx_cpt_res_s *cpt_status,
struct otx_cpt_info_buffer *cpt_info,
struct otx_cpt_req_info *req, u32 *res_code)
{
u8 ccode = cpt_status->s.compcode;
union otx_cpt_error_code ecode;
ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer);
switch (ccode) {
case CPT_COMP_E_FAULT:
dev_err(&pdev->dev,
"Request failed with DMA fault\n");
otx_cpt_dump_sg_list(pdev, req);
break;
case CPT_COMP_E_SWERR:
dev_err(&pdev->dev,
"Request failed with software error code %d\n",
ecode.s.ccode);
otx_cpt_dump_sg_list(pdev, req);
break;
case CPT_COMP_E_HWERR:
dev_err(&pdev->dev,
"Request failed with hardware error\n");
otx_cpt_dump_sg_list(pdev, req);
break;
case COMPLETION_CODE_INIT:
/* check for timeout */
if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
dev_warn(&pdev->dev, "Request timed out 0x%p\n", req);
else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
}
return 1;
case CPT_COMP_E_GOOD:
/* Check microcode completion code */
if (ecode.s.ccode) {
/*
* If requested hmac is truncated and ucode returns
* s/g write length error then we report success
* because ucode writes as many bytes of calculated
* hmac as available in gather buffer and reports
* s/g write length error if number of bytes in gather
* buffer is less than full hmac size.
*/
if (req->is_trunc_hmac &&
ecode.s.ccode == ERR_SCATTER_GATHER_WRITE_LENGTH) {
*res_code = 0;
break;
}
dev_err(&pdev->dev,
"Request failed with software error code 0x%x\n",
ecode.s.ccode);
otx_cpt_dump_sg_list(pdev, req);
break;
}
/* Request has been processed with success */
*res_code = 0;
break;
default:
dev_err(&pdev->dev, "Request returned invalid status\n");
break;
}
return 0;
}
static inline void process_pending_queue(struct pci_dev *pdev,
struct otx_cpt_pending_queue *pqueue)
{
void (*callback)(int status, void *arg1, void *arg2);
struct otx_cpt_pending_entry *resume_pentry = NULL;
struct otx_cpt_pending_entry *pentry = NULL;
struct otx_cpt_info_buffer *cpt_info = NULL;
union otx_cpt_res_s *cpt_status = NULL;
struct otx_cpt_req_info *req = NULL;
struct crypto_async_request *areq;
u32 res_code, resume_index;
while (1) {
spin_lock_bh(&pqueue->lock);
pentry = &pqueue->head[pqueue->front];
if (WARN_ON(!pentry)) {
spin_unlock_bh(&pqueue->lock);
break;
}
res_code = -EINVAL;
if (unlikely(!pentry->busy)) {
spin_unlock_bh(&pqueue->lock);
break;
}
if (unlikely(!pentry->callback)) {
dev_err(&pdev->dev, "Callback NULL\n");
goto process_pentry;
}
cpt_info = pentry->info;
if (unlikely(!cpt_info)) {
dev_err(&pdev->dev, "Pending entry post arg NULL\n");
goto process_pentry;
}
req = cpt_info->req;
if (unlikely(!req)) {
dev_err(&pdev->dev, "Request NULL\n");
goto process_pentry;
}
cpt_status = (union otx_cpt_res_s *) pentry->completion_addr;
if (unlikely(!cpt_status)) {
dev_err(&pdev->dev, "Completion address NULL\n");
goto process_pentry;
}
if (cpt_process_ccode(pdev, cpt_status, cpt_info, req,
&res_code)) {
spin_unlock_bh(&pqueue->lock);
return;
}
cpt_info->pdev = pdev;
process_pentry:
/*
* Check if we should inform sending side to resume
* We do it CPT_IQ_RESUME_MARGIN elements in advance before
* pending queue becomes empty
*/
resume_index = modulo_inc(pqueue->front, pqueue->qlen,
CPT_IQ_RESUME_MARGIN);
resume_pentry = &pqueue->head[resume_index];
if (resume_pentry &&
resume_pentry->resume_sender) {
resume_pentry->resume_sender = false;
callback = resume_pentry->callback;
areq = resume_pentry->areq;
if (callback) {
spin_unlock_bh(&pqueue->lock);
/*
* EINPROGRESS is an indication for sending
* side that it can resume sending requests
*/
callback(-EINPROGRESS, areq, cpt_info);
spin_lock_bh(&pqueue->lock);
}
}
callback = pentry->callback;
areq = pentry->areq;
free_pentry(pentry);
pqueue->pending_count--;
pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
spin_unlock_bh(&pqueue->lock);
/*
* Call callback after current pending entry has been
* processed, we don't do it if the callback pointer is
* invalid.
*/
if (callback)
callback(res_code, areq, cpt_info);
}
}
void otx_cpt_post_process(struct otx_cptvf_wqe *wqe)
{
process_pending_queue(wqe->cptvf->pdev, &wqe->cptvf->pqinfo.queue[0]);
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains the base functions to manage periodic tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <[email protected]>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <trace/events/power.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
/*
* Tick devices
*/
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
/*
* Tick next event: keeps track of the tick time. It's updated by the
* CPU which handles the tick and protected by jiffies_lock. There is
* no requirement to write hold the jiffies seqcount for it.
*/
ktime_t tick_next_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
* which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
* variable has two functions:
*
* 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
* timekeeping lock all at once. Only the CPU which is assigned to do the
* update is handling it.
*
* 2) Hand off the duty in the NOHZ idle case by setting the value to
* TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
* at it will take over and keep the time keeping alive. The handover
* procedure also covers cpu hotplug.
*/
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
#ifdef CONFIG_NO_HZ_FULL
/*
* tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
* tick_do_timer_cpu and it should be taken over by an eligible secondary
* when one comes online.
*/
static int tick_do_timer_boot_cpu __read_mostly = -1;
#endif
/*
* Debugging: see timer_list.c
*/
struct tick_device *tick_get_device(int cpu)
{
return &per_cpu(tick_cpu_device, cpu);
}
/**
* tick_is_oneshot_available - check for a oneshot capable event device
*/
int tick_is_oneshot_available(void)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
return 0;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 1;
return tick_broadcast_oneshot_available();
}
/*
* Periodic tick
*/
static void tick_periodic(int cpu)
{
if (READ_ONCE(tick_do_timer_cpu) == cpu) {
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
}
/*
* Event handler for periodic ticks
*/
void tick_handle_periodic(struct clock_event_device *dev)
{
int cpu = smp_processor_id();
ktime_t next = dev->next_event;
tick_periodic(cpu);
/*
* The cpu might have transitioned to HIGHRES or NOHZ mode via
* update_process_times() -> run_local_timers() ->
* hrtimer_run_queues().
*/
if (IS_ENABLED(CONFIG_TICK_ONESHOT) && dev->event_handler != tick_handle_periodic)
return;
if (!clockevent_state_oneshot(dev))
return;
for (;;) {
/*
* Setup the next period for devices, which do not have
* periodic mode:
*/
next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
/*
* Have to be careful here. If we're in oneshot mode,
* before we call tick_periodic() in a loop, we need
* to be sure we're using a real hardware clocksource.
* Otherwise we could get trapped in an infinite
* loop, as the tick_periodic() increments jiffies,
* which then will increment time, possibly causing
* the loop to trigger again and again.
*/
if (timekeeping_valid_for_hres())
tick_periodic(cpu);
}
}
/*
* Setup the device for a periodic tick
*/
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
{
tick_set_periodic_handler(dev, broadcast);
/* Broadcast setup ? */
if (!tick_device_is_functional(dev))
return;
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else {
unsigned int seq;
ktime_t next;
do {
seq = read_seqcount_begin(&jiffies_seq);
next = tick_next_period;
} while (read_seqcount_retry(&jiffies_seq, seq));
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
next = ktime_add_ns(next, TICK_NSEC);
}
}
}
/*
* Setup the tick device
*/
static void tick_setup_device(struct tick_device *td,
struct clock_event_device *newdev, int cpu,
const struct cpumask *cpumask)
{
void (*handler)(struct clock_event_device *) = NULL;
ktime_t next_event = 0;
/*
* First device setup ?
*/
if (!td->evtdev) {
/*
* If no cpu took the do_timer update, assign it to
* this cpu:
*/
if (READ_ONCE(tick_do_timer_cpu) == TICK_DO_TIMER_BOOT) {
WRITE_ONCE(tick_do_timer_cpu, cpu);
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case the
* first housekeeping secondary will take do_timer()
* from it.
*/
if (tick_nohz_full_cpu(cpu))
tick_do_timer_boot_cpu = cpu;
} else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
tick_do_timer_boot_cpu = -1;
/*
* The boot CPU will stay in periodic (NOHZ disabled)
* mode until clocksource_done_booting() called after
* smp_init() selects a high resolution clocksource and
* timekeeping_notify() kicks the NOHZ stuff alive.
*
* So this WRITE_ONCE can only race with the READ_ONCE
* check in tick_periodic() but this race is harmless.
*/
WRITE_ONCE(tick_do_timer_cpu, cpu);
#endif
}
/*
* Startup in periodic mode first.
*/
td->mode = TICKDEV_MODE_PERIODIC;
} else {
handler = td->evtdev->event_handler;
next_event = td->evtdev->next_event;
td->evtdev->event_handler = clockevents_handle_noop;
}
td->evtdev = newdev;
/*
* When the device is not per cpu, pin the interrupt to the
* current cpu:
*/
if (!cpumask_equal(newdev->cpumask, cpumask))
irq_set_affinity(newdev->irq, cpumask);
/*
* When global broadcasting is active, check if the current
* device is registered as a placeholder for broadcast mode.
* This allows us to handle this x86 misfeature in a generic
* way. This function also returns !=0 when we keep the
* current active broadcast state for this CPU.
*/
if (tick_device_uses_broadcast(newdev, cpu))
return;
if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(newdev, 0);
else
tick_setup_oneshot(newdev, handler, next_event);
}
void tick_install_replacement(struct clock_event_device *newdev)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int cpu = smp_processor_id();
clockevents_exchange_device(td->evtdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
}
static bool tick_check_percpu(struct clock_event_device *curdev,
struct clock_event_device *newdev, int cpu)
{
if (!cpumask_test_cpu(cpu, newdev->cpumask))
return false;
if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
return true;
/* Check if irq affinity can be set */
if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
return false;
/* Prefer an existing cpu local device */
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
return false;
return true;
}
static bool tick_check_preferred(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
/* Prefer oneshot capable device */
if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
return false;
if (tick_oneshot_mode_active())
return false;
}
/*
* Use the higher rated one, but prefer a CPU local device with a lower
* rating than a non-CPU local device
*/
return !curdev ||
newdev->rating > curdev->rating ||
!cpumask_equal(curdev->cpumask, newdev->cpumask);
}
/*
* Check whether the new device is a better fit than curdev. curdev
* can be NULL !
*/
bool tick_check_replacement(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
return false;
return tick_check_preferred(curdev, newdev);
}
/*
* Check, if the new registered device should be used. Called with
* clockevents_lock held and interrupts disabled.
*/
void tick_check_new_device(struct clock_event_device *newdev)
{
struct clock_event_device *curdev;
struct tick_device *td;
int cpu;
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev;
if (!tick_check_replacement(curdev, newdev))
goto out_bc;
if (!try_module_get(newdev->owner))
return;
/*
* Replace the eventually existing device by the new
* device. If the current device is the broadcast device, do
* not give it back to the clockevents layer !
*/
if (tick_is_broadcast_device(curdev)) {
clockevents_shutdown(curdev);
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
return;
out_bc:
/*
* Can the new device be used as a broadcast device ?
*/
tick_install_broadcast_device(newdev, cpu);
}
/**
* tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
* @state: The target state (enter/exit)
*
* The system enters/leaves a state, where affected devices might stop
* Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
*
* Called with interrupts disabled, so clockevents_lock is not
* required here because the local clock event device cannot go away
* under us.
*/
int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
return __tick_broadcast_oneshot_control(state);
}
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
#ifdef CONFIG_HOTPLUG_CPU
void tick_assert_timekeeping_handover(void)
{
WARN_ON_ONCE(tick_do_timer_cpu == smp_processor_id());
}
/*
* Stop the tick and transfer the timekeeping job away from a dying cpu.
*/
int tick_cpu_dying(unsigned int dying_cpu)
{
/*
* If the current CPU is the timekeeper, it's the only one that can
* safely hand over its duty. Also all online CPUs are in stop
* machine, guaranteed not to be idle, therefore there is no
* concurrency and it's safe to pick any online successor.
*/
if (tick_do_timer_cpu == dying_cpu)
tick_do_timer_cpu = cpumask_first(cpu_online_mask);
/* Make sure the CPU won't try to retake the timekeeping duty */
tick_sched_timer_dying(dying_cpu);
/* Remove CPU from timer broadcasting */
tick_offline_cpu(dying_cpu);
return 0;
}
/*
* Shutdown an event device on a given cpu:
*
* This is called on a life CPU, when a CPU is dead. So we cannot
* access the hardware device itself.
* We just set the mode and remove it from the lists.
*/
void tick_shutdown(unsigned int cpu)
{
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
struct clock_event_device *dev = td->evtdev;
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
/*
* Prevent that the clock events layer tries to call
* the set mode function!
*/
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL;
}
}
#endif
/**
* tick_suspend_local - Suspend the local tick device
*
* Called from the local cpu for freeze with interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend_local(void)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev);
}
/**
* tick_resume_local - Resume the local tick device
*
* Called from the local CPU for unfreeze or XEN resume magic.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume_local(void)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
bool broadcast = tick_resume_check_broadcast();
clockevents_tick_resume(td->evtdev);
if (!broadcast) {
if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(td->evtdev, 0);
else
tick_resume_oneshot();
}
/*
* Ensure that hrtimers are up to date and the clockevents device
* is reprogrammed correctly when high resolution timers are
* enabled.
*/
hrtimers_resume_local();
}
/**
* tick_suspend - Suspend the tick and the broadcast device
*
* Called from syscore_suspend() via timekeeping_suspend with only one
* CPU online and interrupts disabled or from tick_unfreeze() under
* tick_freeze_lock.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend(void)
{
tick_suspend_local();
tick_suspend_broadcast();
}
/**
* tick_resume - Resume the tick and the broadcast device
*
* Called from syscore_resume() via timekeeping_resume with only one
* CPU online and interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume(void)
{
tick_resume_broadcast();
tick_resume_local();
}
#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
/**
* tick_freeze - Suspend the local tick and (possibly) timekeeping.
*
* Check if this is the last online CPU executing the function and if so,
* suspend timekeeping. Otherwise suspend the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_unfreeze().
* Interrupts must not be enabled before the subsequent %tick_unfreeze().
*/
void tick_freeze(void)
{
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
system_state = SYSTEM_SUSPEND;
sched_clock_suspend();
timekeeping_suspend();
} else {
tick_suspend_local();
}
raw_spin_unlock(&tick_freeze_lock);
}
/**
* tick_unfreeze - Resume the local tick and (possibly) timekeeping.
*
* Check if this is the first CPU executing the function and if so, resume
* timekeeping. Otherwise resume the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_freeze().
* Interrupts must not be enabled after the preceding %tick_freeze().
*/
void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
sched_clock_resume();
system_state = SYSTEM_RUNNING;
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
touch_softlockup_watchdog();
tick_resume_local();
}
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
#endif /* CONFIG_SUSPEND */
/**
* tick_init - initialize the tick control
*/
void __init tick_init(void)
{
tick_broadcast_init();
tick_nohz_init();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.