code
stringlengths
0
23.9M
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SPARC64_HVTRAP_H #define _SPARC64_HVTRAP_H #ifndef __ASSEMBLY__ #include <linux/types.h> struct hvtramp_mapping { __u64 vaddr; __u64 tte; }; struct hvtramp_descr { __u32 cpu; __u32 num_mappings; __u64 fault_info_va; __u64 fault_info_pa; __u64 thread_reg; struct hvtramp_mapping maps[]; }; void hv_cpu_startup(unsigned long hvdescr_pa); #endif #define HVTRAMP_DESCR_CPU 0x00 #define HVTRAMP_DESCR_NUM_MAPPINGS 0x04 #define HVTRAMP_DESCR_FAULT_INFO_VA 0x08 #define HVTRAMP_DESCR_FAULT_INFO_PA 0x10 #define HVTRAMP_DESCR_THREAD_REG 0x18 #define HVTRAMP_DESCR_MAPS 0x20 #define HVTRAMP_MAPPING_VADDR 0x00 #define HVTRAMP_MAPPING_TTE 0x08 #define HVTRAMP_MAPPING_SIZE 0x10 #endif /* _SPARC64_HVTRAP_H */
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DAL_GRPH_OBJECT_DEFS_H__ #define __DAL_GRPH_OBJECT_DEFS_H__ #include "grph_object_id.h" /* ******************************************************************** * ******************************************************************** * * These defines shared between All Graphics Objects * * ******************************************************************** * ******************************************************************** */ #define MAX_CONNECTOR_NUMBER_PER_SLOT (16) #define MAX_BOARD_SLOTS (4) #define INVALID_CONNECTOR_INDEX ((unsigned int)(-1)) /* HPD unit id - HW direct translation */ enum hpd_source_id { HPD_SOURCEID1 = 0, HPD_SOURCEID2, HPD_SOURCEID3, HPD_SOURCEID4, HPD_SOURCEID5, HPD_SOURCEID6, HPD_SOURCEID_COUNT, HPD_SOURCEID_UNKNOWN }; /* DDC unit id - HW direct translation */ enum channel_id { CHANNEL_ID_UNKNOWN = 0, CHANNEL_ID_DDC1, CHANNEL_ID_DDC2, CHANNEL_ID_DDC3, CHANNEL_ID_DDC4, CHANNEL_ID_DDC5, CHANNEL_ID_DDC6, CHANNEL_ID_DDC_VGA, CHANNEL_ID_I2C_PAD, CHANNEL_ID_COUNT }; #define DECODE_CHANNEL_ID(ch_id) \ (ch_id) == CHANNEL_ID_DDC1 ? "CHANNEL_ID_DDC1" : \ (ch_id) == CHANNEL_ID_DDC2 ? "CHANNEL_ID_DDC2" : \ (ch_id) == CHANNEL_ID_DDC3 ? "CHANNEL_ID_DDC3" : \ (ch_id) == CHANNEL_ID_DDC4 ? "CHANNEL_ID_DDC4" : \ (ch_id) == CHANNEL_ID_DDC5 ? "CHANNEL_ID_DDC5" : \ (ch_id) == CHANNEL_ID_DDC6 ? "CHANNEL_ID_DDC6" : \ (ch_id) == CHANNEL_ID_DDC_VGA ? "CHANNEL_ID_DDC_VGA" : \ (ch_id) == CHANNEL_ID_I2C_PAD ? "CHANNEL_ID_I2C_PAD" : "Invalid" enum transmitter { TRANSMITTER_UNKNOWN = (-1L), TRANSMITTER_UNIPHY_A, TRANSMITTER_UNIPHY_B, TRANSMITTER_UNIPHY_C, TRANSMITTER_UNIPHY_D, TRANSMITTER_UNIPHY_E, TRANSMITTER_UNIPHY_F, TRANSMITTER_NUTMEG_CRT, TRANSMITTER_TRAVIS_CRT, TRANSMITTER_TRAVIS_LCD, TRANSMITTER_UNIPHY_G, TRANSMITTER_COUNT }; /* Generic source of the synchronisation input/output signal */ /* Can be used for flow control, stereo sync, timing sync, frame sync, etc */ enum sync_source { SYNC_SOURCE_NONE = 0, /* Source based on controllers */ SYNC_SOURCE_CONTROLLER0, SYNC_SOURCE_CONTROLLER1, SYNC_SOURCE_CONTROLLER2, SYNC_SOURCE_CONTROLLER3, SYNC_SOURCE_CONTROLLER4, SYNC_SOURCE_CONTROLLER5, /* Source based on GSL group */ SYNC_SOURCE_GSL_GROUP0, SYNC_SOURCE_GSL_GROUP1, SYNC_SOURCE_GSL_GROUP2, /* Source based on GSL IOs */ /* These IOs normally used as GSL input/output */ SYNC_SOURCE_GSL_IO_FIRST, SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK = SYNC_SOURCE_GSL_IO_FIRST, SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC, SYNC_SOURCE_GSL_IO_SWAPLOCK_A, SYNC_SOURCE_GSL_IO_SWAPLOCK_B, SYNC_SOURCE_GSL_IO_LAST = SYNC_SOURCE_GSL_IO_SWAPLOCK_B, /* Source based on regular IOs */ SYNC_SOURCE_IO_FIRST, SYNC_SOURCE_IO_GENERIC_A = SYNC_SOURCE_IO_FIRST, SYNC_SOURCE_IO_GENERIC_B, SYNC_SOURCE_IO_GENERIC_C, SYNC_SOURCE_IO_GENERIC_D, SYNC_SOURCE_IO_GENERIC_E, SYNC_SOURCE_IO_GENERIC_F, SYNC_SOURCE_IO_HPD1, SYNC_SOURCE_IO_HPD2, SYNC_SOURCE_IO_HSYNC_A, SYNC_SOURCE_IO_VSYNC_A, SYNC_SOURCE_IO_HSYNC_B, SYNC_SOURCE_IO_VSYNC_B, SYNC_SOURCE_IO_LAST = SYNC_SOURCE_IO_VSYNC_B, /* Misc. flow control sources */ SYNC_SOURCE_DUAL_GPU_PIN }; enum tx_ffe_id { TX_FFE0 = 0, TX_FFE1, TX_FFE2, TX_FFE3, TX_FFE_DeEmphasis_Only, TX_FFE_PreShoot_Only, TX_FFE_No_FFE, }; /* connector sizes in millimeters - from BiosParserTypes.hpp */ #define CONNECTOR_SIZE_DVI 40 #define CONNECTOR_SIZE_VGA 32 #define CONNECTOR_SIZE_HDMI 16 #define CONNECTOR_SIZE_DP 16 #define CONNECTOR_SIZE_MINI_DP 9 #define CONNECTOR_SIZE_UNKNOWN 30 enum connector_layout_type { CONNECTOR_LAYOUT_TYPE_UNKNOWN, CONNECTOR_LAYOUT_TYPE_DVI_D, CONNECTOR_LAYOUT_TYPE_DVI_I, CONNECTOR_LAYOUT_TYPE_VGA, CONNECTOR_LAYOUT_TYPE_HDMI, CONNECTOR_LAYOUT_TYPE_DP, CONNECTOR_LAYOUT_TYPE_MINI_DP, }; struct connector_layout_info { struct graphics_object_id connector_id; enum connector_layout_type connector_type; unsigned int length; unsigned int position; /* offset in mm from right side of the board */ }; /* length and width in mm */ struct slot_layout_info { unsigned int length; unsigned int width; unsigned int num_of_connectors; struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT]; }; struct board_layout_info { unsigned int num_of_slots; /* indicates valid information in bracket layout structure. */ unsigned int is_number_of_slots_valid : 1; unsigned int is_slots_size_valid : 1; unsigned int is_connector_offsets_valid : 1; unsigned int is_connector_lengths_valid : 1; struct slot_layout_info slots[MAX_BOARD_SLOTS]; }; #endif
/* * Copyright 2015 Chen-Yu Tsai * * Chen-Yu Tsai <[email protected]> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual * licensing only applies to this file, and not this project as a * whole. * * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Or, alternatively, * * b) Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /* * AXP809 Integrated Power Management Chip */ &axp809 { compatible = "x-powers,axp809"; interrupt-controller; #interrupt-cells = <1>; axp_gpio: gpio { compatible = "x-powers,axp809-gpio", "x-powers,axp221-gpio"; gpio-controller; #gpio-cells = <2>; }; };
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2024 Meta #include <test_progs.h> #include "network_helpers.h" #include "sock_iter_batch.skel.h" #define TEST_NS "sock_iter_batch_netns" static const int nr_soreuse = 4; static void do_test(int sock_type, bool onebyone) { int err, i, nread, to_read, total_read, iter_fd = -1; int first_idx, second_idx, indices[nr_soreuse]; struct bpf_link *link = NULL; struct sock_iter_batch *skel; int *fds[2] = {}; skel = sock_iter_batch__open(); if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open")) return; /* Prepare 2 buckets of sockets in the kernel hashtable */ for (i = 0; i < ARRAY_SIZE(fds); i++) { int local_port; fds[i] = start_reuseport_server(AF_INET6, sock_type, "::1", 0, 0, nr_soreuse); if (!ASSERT_OK_PTR(fds[i], "start_reuseport_server")) goto done; local_port = get_socket_local_port(*fds[i]); if (!ASSERT_GE(local_port, 0, "get_socket_local_port")) goto done; skel->rodata->ports[i] = ntohs(local_port); } err = sock_iter_batch__load(skel); if (!ASSERT_OK(err, "sock_iter_batch__load")) goto done; link = bpf_program__attach_iter(sock_type == SOCK_STREAM ? skel->progs.iter_tcp_soreuse : skel->progs.iter_udp_soreuse, NULL); if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter")) goto done; iter_fd = bpf_iter_create(bpf_link__fd(link)); if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create")) goto done; /* Test reading a bucket (either from fds[0] or fds[1]). * Only read "nr_soreuse - 1" number of sockets * from a bucket and leave one socket out from * that bucket on purpose. */ to_read = (nr_soreuse - 1) * sizeof(*indices); total_read = 0; first_idx = -1; do { nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read); if (nread <= 0 || nread % sizeof(*indices)) break; total_read += nread; if (first_idx == -1) first_idx = indices[0]; for (i = 0; i < nread / sizeof(*indices); i++) ASSERT_EQ(indices[i], first_idx, "first_idx"); } while (total_read < to_read); ASSERT_EQ(nread, onebyone ? sizeof(*indices) : to_read, "nread"); ASSERT_EQ(total_read, to_read, "total_read"); free_fds(fds[first_idx], nr_soreuse); fds[first_idx] = NULL; /* Read the "whole" second bucket */ to_read = nr_soreuse * sizeof(*indices); total_read = 0; second_idx = !first_idx; do { nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read); if (nread <= 0 || nread % sizeof(*indices)) break; total_read += nread; for (i = 0; i < nread / sizeof(*indices); i++) ASSERT_EQ(indices[i], second_idx, "second_idx"); } while (total_read <= to_read); ASSERT_EQ(nread, 0, "nread"); /* Both so_reuseport ports should be in different buckets, so * total_read must equal to the expected to_read. * * For a very unlikely case, both ports collide at the same bucket, * the bucket offset (i.e. 3) will be skipped and it cannot * expect the to_read number of bytes. */ if (skel->bss->bucket[0] != skel->bss->bucket[1]) ASSERT_EQ(total_read, to_read, "total_read"); done: for (i = 0; i < ARRAY_SIZE(fds); i++) free_fds(fds[i], nr_soreuse); if (iter_fd < 0) close(iter_fd); bpf_link__destroy(link); sock_iter_batch__destroy(skel); } void test_sock_iter_batch(void) { struct nstoken *nstoken = NULL; SYS_NOFAIL("ip netns del " TEST_NS); SYS(done, "ip netns add %s", TEST_NS); SYS(done, "ip -net %s link set dev lo up", TEST_NS); nstoken = open_netns(TEST_NS); if (!ASSERT_OK_PTR(nstoken, "open_netns")) goto done; if (test__start_subtest("tcp")) { do_test(SOCK_STREAM, true); do_test(SOCK_STREAM, false); } if (test__start_subtest("udp")) { do_test(SOCK_DGRAM, true); do_test(SOCK_DGRAM, false); } close_netns(nstoken); done: SYS_NOFAIL("ip netns del " TEST_NS); }
/* SPDX-License-Identifier: GPL-2.0 */ #define MMC_STRPCL 0x0000 #define STOP_CLOCK (1 << 0) #define START_CLOCK (2 << 0) #define MMC_STAT 0x0004 #define STAT_END_CMD_RES (1 << 13) #define STAT_PRG_DONE (1 << 12) #define STAT_DATA_TRAN_DONE (1 << 11) #define STAT_CLK_EN (1 << 8) #define STAT_RECV_FIFO_FULL (1 << 7) #define STAT_XMIT_FIFO_EMPTY (1 << 6) #define STAT_RES_CRC_ERR (1 << 5) #define STAT_SPI_READ_ERROR_TOKEN (1 << 4) #define STAT_CRC_READ_ERROR (1 << 3) #define STAT_CRC_WRITE_ERROR (1 << 2) #define STAT_TIME_OUT_RESPONSE (1 << 1) #define STAT_READ_TIME_OUT (1 << 0) #define MMC_CLKRT 0x0008 /* 3 bit */ #define MMC_SPI 0x000c #define SPI_CS_ADDRESS (1 << 3) #define SPI_CS_EN (1 << 2) #define CRC_ON (1 << 1) #define SPI_EN (1 << 0) #define MMC_CMDAT 0x0010 #define CMDAT_SDIO_INT_EN (1 << 11) #define CMDAT_SD_4DAT (1 << 8) #define CMDAT_DMAEN (1 << 7) #define CMDAT_INIT (1 << 6) #define CMDAT_BUSY (1 << 5) #define CMDAT_STREAM (1 << 4) /* 1 = stream */ #define CMDAT_WRITE (1 << 3) /* 1 = write */ #define CMDAT_DATAEN (1 << 2) #define CMDAT_RESP_NONE (0 << 0) #define CMDAT_RESP_SHORT (1 << 0) #define CMDAT_RESP_R2 (2 << 0) #define CMDAT_RESP_R3 (3 << 0) #define MMC_RESTO 0x0014 /* 7 bit */ #define MMC_RDTO 0x0018 /* 16 bit */ #define MMC_BLKLEN 0x001c /* 10 bit */ #define MMC_NOB 0x0020 /* 16 bit */ #define MMC_PRTBUF 0x0024 #define BUF_PART_FULL (1 << 0) #define MMC_I_MASK 0x0028 /*PXA27x MMC interrupts*/ #define SDIO_SUSPEND_ACK (1 << 12) #define SDIO_INT (1 << 11) #define RD_STALLED (1 << 10) #define RES_ERR (1 << 9) #define DAT_ERR (1 << 8) #define TINT (1 << 7) /*PXA2xx MMC interrupts*/ #define TXFIFO_WR_REQ (1 << 6) #define RXFIFO_RD_REQ (1 << 5) #define CLK_IS_OFF (1 << 4) #define STOP_CMD (1 << 3) #define END_CMD_RES (1 << 2) #define PRG_DONE (1 << 1) #define DATA_TRAN_DONE (1 << 0) #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) #define MMC_I_MASK_ALL 0x00001fff #else #define MMC_I_MASK_ALL 0x0000007f #endif #define MMC_I_REG 0x002c /* same as MMC_I_MASK */ #define MMC_CMD 0x0030 #define MMC_ARGH 0x0034 /* 16 bit */ #define MMC_ARGL 0x0038 /* 16 bit */ #define MMC_RES 0x003c /* 16 bit */ #define MMC_RXFIFO 0x0040 /* 8 bit */ #define MMC_TXFIFO 0x0044 /* 8 bit */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for * CPU modes 0 & 2. * * Author: Ivan Passos <[email protected]> * * Copyright: (c) 2000-2001 Cyclades Corp. * * $Log: hd64572.h,v $ * Revision 3.1 2001/06/15 12:41:10 regina * upping major version number * * Revision 1.1.1.1 2001/06/13 20:24:49 daniela * PC300 initial CVS version (3.4.0-pre1) * * Revision 1.0 2000/01/25 ivan * Initial version. */ #ifndef __HD64572_H #define __HD64572_H /* Illegal Access Register */ #define ILAR 0x00 /* Wait Controller Registers */ #define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */ #define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */ #define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */ #define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */ #define WCRL 0x24 /* Wait Control Register L */ #define WCRM 0x25 /* Wait Control Register M */ #define WCRH 0x26 /* Wait Control Register H */ /* Interrupt Registers */ #define IVR 0x60 /* Interrupt Vector Register */ #define IMVR 0x64 /* Interrupt Modified Vector Register */ #define ITCR 0x68 /* Interrupt Control Register */ #define ISR0 0x6c /* Interrupt Status Register 0 */ #define ISR1 0x70 /* Interrupt Status Register 1 */ #define IER0 0x74 /* Interrupt Enable Register 0 */ #define IER1 0x78 /* Interrupt Enable Register 1 */ /* Register Access Macros (chan is 0 or 1 in _any_ case) */ #define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */ #define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */ #define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */ #define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */ #define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */ #define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */ #define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */ #define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */ #define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */ /* MSCI Channel Registers */ #define MSCI0_OFFSET 0x00 #define MSCI1_OFFSET 0x80 #define MD0 0x138 /* Mode reg 0 */ #define MD1 0x139 /* Mode reg 1 */ #define MD2 0x13a /* Mode reg 2 */ #define MD3 0x13b /* Mode reg 3 */ #define CTL 0x130 /* Control reg */ #define RXS 0x13c /* RX clock source */ #define TXS 0x13d /* TX clock source */ #define EXS 0x13e /* External clock input selection */ #define TMCT 0x144 /* Time constant (Tx) */ #define TMCR 0x145 /* Time constant (Rx) */ #define CMD 0x128 /* Command reg */ #define ST0 0x118 /* Status reg 0 */ #define ST1 0x119 /* Status reg 1 */ #define ST2 0x11a /* Status reg 2 */ #define ST3 0x11b /* Status reg 3 */ #define ST4 0x11c /* Status reg 4 */ #define FST 0x11d /* frame Status reg */ #define IE0 0x120 /* Interrupt enable reg 0 */ #define IE1 0x121 /* Interrupt enable reg 1 */ #define IE2 0x122 /* Interrupt enable reg 2 */ #define IE4 0x124 /* Interrupt enable reg 4 */ #define FIE 0x125 /* Frame Interrupt enable reg */ #define SA0 0x140 /* Syn Address reg 0 */ #define SA1 0x141 /* Syn Address reg 1 */ #define IDL 0x142 /* Idle register */ #define TRBL 0x100 /* TX/RX buffer reg L */ #define TRBK 0x101 /* TX/RX buffer reg K */ #define TRBJ 0x102 /* TX/RX buffer reg J */ #define TRBH 0x103 /* TX/RX buffer reg H */ #define TRC0 0x148 /* TX Ready control reg 0 */ #define TRC1 0x149 /* TX Ready control reg 1 */ #define RRC 0x14a /* RX Ready control reg */ #define CST0 0x108 /* Current Status Register 0 */ #define CST1 0x109 /* Current Status Register 1 */ #define CST2 0x10a /* Current Status Register 2 */ #define CST3 0x10b /* Current Status Register 3 */ #define GPO 0x131 /* General Purpose Output Pin Ctl Reg */ #define TFS 0x14b /* Tx Start Threshold Ctl Reg */ #define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */ #define TBN 0x110 /* Tx Buffer Number Reg */ #define RBN 0x111 /* Rx Buffer Number Reg */ #define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */ #define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */ #define TCR 0x152 /* Tx DMA Critical Request Reg */ #define RNR 0x154 /* Rx DMA Request Ctl Reg */ #define RCR 0x156 /* Rx DMA Critical Request Reg */ /* Timer Registers */ #define TIMER0RX_OFFSET 0x00 #define TIMER0TX_OFFSET 0x10 #define TIMER1RX_OFFSET 0x20 #define TIMER1TX_OFFSET 0x30 #define TCNTL 0x200 /* Timer Upcounter L */ #define TCNTH 0x201 /* Timer Upcounter H */ #define TCONRL 0x204 /* Timer Constant Register L */ #define TCONRH 0x205 /* Timer Constant Register H */ #define TCSR 0x206 /* Timer Control/Status Register */ #define TEPR 0x207 /* Timer Expand Prescale Register */ /* DMA registers */ #define PCR 0x40 /* DMA priority control reg */ #define DRR 0x44 /* DMA reset reg */ #define DMER 0x07 /* DMA Master Enable reg */ #define BTCR 0x08 /* Burst Tx Ctl Reg */ #define BOLR 0x0c /* Back-off Length Reg */ #define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */ #define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */ #define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */ #define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */ #define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */ #define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */ #define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */ #define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */ #define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */ #define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */ /* DMA Channel Registers */ #define DMAC0RX_OFFSET 0x00 #define DMAC0TX_OFFSET 0x20 #define DMAC1RX_OFFSET 0x40 #define DMAC1TX_OFFSET 0x60 #define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */ #define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */ #define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */ #define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */ #define SARL 0x80 /* Source Addr Register L (single-block, TX only) */ #define SARH 0x81 /* Source Addr Register H (single-block, TX only) */ #define SARB 0x82 /* Source Addr Register B (single-block, TX only) */ #define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */ #define BARL 0x80 /* Buffer Addr Register L (chained-block) */ #define BARH 0x81 /* Buffer Addr Register H (chained-block) */ #define BARB 0x82 /* Buffer Addr Register B (chained-block) */ #define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */ #define CDAL 0x84 /* Current Descriptor Addr Register L */ #define CDAH 0x85 /* Current Descriptor Addr Register H */ #define CDAB 0x86 /* Current Descriptor Addr Register B */ #define CDABH 0x87 /* Current Descriptor Addr Register BH */ #define EDAL 0x88 /* Error Descriptor Addr Register L */ #define EDAH 0x89 /* Error Descriptor Addr Register H */ #define EDAB 0x8a /* Error Descriptor Addr Register B */ #define EDABH 0x8b /* Error Descriptor Addr Register BH */ #define BFLL 0x90 /* RX Buffer Length L (only RX) */ #define BFLH 0x91 /* RX Buffer Length H (only RX) */ #define BCRL 0x8c /* Byte Count Register L */ #define BCRH 0x8d /* Byte Count Register H */ /* Block Descriptor Structure */ typedef struct { unsigned long next; /* pointer to next block descriptor */ unsigned long ptbuf; /* buffer pointer */ unsigned short len; /* data length */ unsigned char status; /* status */ unsigned char filler[5]; /* alignment filler (16 bytes) */ } pcsca_bd_t; /* Block Descriptor Structure */ typedef struct { u32 cp; /* pointer to next block descriptor */ u32 bp; /* buffer pointer */ u16 len; /* data length */ u8 stat; /* status */ u8 unused; /* pads to 4-byte boundary */ }pkt_desc; /* Descriptor Status definitions: Bit Transmission Reception 7 EOM EOM 6 - Short Frame 5 - Abort 4 - Residual bit 3 Underrun Overrun 2 - CRC 1 Ownership Ownership 0 EOT - */ #define DST_EOT 0x01 /* End of transmit command */ #define DST_OSB 0x02 /* Ownership bit */ #define DST_CRC 0x04 /* CRC Error */ #define DST_OVR 0x08 /* Overrun */ #define DST_UDR 0x08 /* Underrun */ #define DST_RBIT 0x10 /* Residual bit */ #define DST_ABT 0x20 /* Abort */ #define DST_SHRT 0x40 /* Short Frame */ #define DST_EOM 0x80 /* End of Message */ /* Packet Descriptor Status bits */ #define ST_TX_EOM 0x80 /* End of frame */ #define ST_TX_UNDRRUN 0x08 #define ST_TX_OWNRSHP 0x02 #define ST_TX_EOT 0x01 /* End of transmission */ #define ST_RX_EOM 0x80 /* End of frame */ #define ST_RX_SHORT 0x40 /* Short frame */ #define ST_RX_ABORT 0x20 /* Abort */ #define ST_RX_RESBIT 0x10 /* Residual bit */ #define ST_RX_OVERRUN 0x08 /* Overrun */ #define ST_RX_CRC 0x04 /* CRC */ #define ST_RX_OWNRSHP 0x02 #define ST_ERROR_MASK 0x7C /* Status Counter Registers */ #define CMCR 0x158 /* Counter Master Ctl Reg */ #define TECNTL 0x160 /* Tx EOM Counter L */ #define TECNTM 0x161 /* Tx EOM Counter M */ #define TECNTH 0x162 /* Tx EOM Counter H */ #define TECCR 0x163 /* Tx EOM Counter Ctl Reg */ #define URCNTL 0x164 /* Underrun Counter L */ #define URCNTH 0x165 /* Underrun Counter H */ #define URCCR 0x167 /* Underrun Counter Ctl Reg */ #define RECNTL 0x168 /* Rx EOM Counter L */ #define RECNTM 0x169 /* Rx EOM Counter M */ #define RECNTH 0x16a /* Rx EOM Counter H */ #define RECCR 0x16b /* Rx EOM Counter Ctl Reg */ #define ORCNTL 0x16c /* Overrun Counter L */ #define ORCNTH 0x16d /* Overrun Counter H */ #define ORCCR 0x16f /* Overrun Counter Ctl Reg */ #define CECNTL 0x170 /* CRC Counter L */ #define CECNTH 0x171 /* CRC Counter H */ #define CECCR 0x173 /* CRC Counter Ctl Reg */ #define ABCNTL 0x174 /* Abort frame Counter L */ #define ABCNTH 0x175 /* Abort frame Counter H */ #define ABCCR 0x177 /* Abort frame Counter Ctl Reg */ #define SHCNTL 0x178 /* Short frame Counter L */ #define SHCNTH 0x179 /* Short frame Counter H */ #define SHCCR 0x17b /* Short frame Counter Ctl Reg */ #define RSCNTL 0x17c /* Residual bit Counter L */ #define RSCNTH 0x17d /* Residual bit Counter H */ #define RSCCR 0x17f /* Residual bit Counter Ctl Reg */ /* Register Programming Constants */ #define IR0_DMIC 0x00000001 #define IR0_DMIB 0x00000002 #define IR0_DMIA 0x00000004 #define IR0_EFT 0x00000008 #define IR0_DMAREQ 0x00010000 #define IR0_TXINT 0x00020000 #define IR0_RXINTB 0x00040000 #define IR0_RXINTA 0x00080000 #define IR0_TXRDY 0x00100000 #define IR0_RXRDY 0x00200000 #define MD0_CRC16_0 0x00 #define MD0_CRC16_1 0x01 #define MD0_CRC32 0x02 #define MD0_CRC_CCITT 0x03 #define MD0_CRCC0 0x04 #define MD0_CRCC1 0x08 #define MD0_AUTO_ENA 0x10 #define MD0_ASYNC 0x00 #define MD0_BY_MSYNC 0x20 #define MD0_BY_BISYNC 0x40 #define MD0_BY_EXT 0x60 #define MD0_BIT_SYNC 0x80 #define MD0_TRANSP 0xc0 #define MD0_HDLC 0x80 /* Bit-sync HDLC mode */ #define MD0_CRC_NONE 0x00 #define MD0_CRC_16_0 0x04 #define MD0_CRC_16 0x05 #define MD0_CRC_ITU32 0x06 #define MD0_CRC_ITU 0x07 #define MD1_NOADDR 0x00 #define MD1_SADDR1 0x40 #define MD1_SADDR2 0x80 #define MD1_DADDR 0xc0 #define MD2_NRZI_IEEE 0x40 #define MD2_MANCHESTER 0x80 #define MD2_FM_MARK 0xA0 #define MD2_FM_SPACE 0xC0 #define MD2_LOOPBACK 0x03 /* Local data Loopback */ #define MD2_F_DUPLEX 0x00 #define MD2_AUTO_ECHO 0x01 #define MD2_LOOP_HI_Z 0x02 #define MD2_LOOP_MIR 0x03 #define MD2_ADPLL_X8 0x00 #define MD2_ADPLL_X16 0x08 #define MD2_ADPLL_X32 0x10 #define MD2_NRZ 0x00 #define MD2_NRZI 0x20 #define MD2_NRZ_IEEE 0x40 #define MD2_MANCH 0x00 #define MD2_FM1 0x20 #define MD2_FM0 0x40 #define MD2_FM 0x80 #define CTL_RTS 0x01 #define CTL_DTR 0x02 #define CTL_SYN 0x04 #define CTL_IDLC 0x10 #define CTL_UDRNC 0x20 #define CTL_URSKP 0x40 #define CTL_URCT 0x80 #define CTL_NORTS 0x01 #define CTL_NODTR 0x02 #define CTL_IDLE 0x10 #define RXS_BR0 0x01 #define RXS_BR1 0x02 #define RXS_BR2 0x04 #define RXS_BR3 0x08 #define RXS_ECLK 0x00 #define RXS_ECLK_NS 0x20 #define RXS_IBRG 0x40 #define RXS_PLL1 0x50 #define RXS_PLL2 0x60 #define RXS_PLL3 0x70 #define RXS_DRTXC 0x80 #define TXS_BR0 0x01 #define TXS_BR1 0x02 #define TXS_BR2 0x04 #define TXS_BR3 0x08 #define TXS_ECLK 0x00 #define TXS_IBRG 0x40 #define TXS_RCLK 0x60 #define TXS_DTRXC 0x80 #define EXS_RES0 0x01 #define EXS_RES1 0x02 #define EXS_RES2 0x04 #define EXS_TES0 0x10 #define EXS_TES1 0x20 #define EXS_TES2 0x40 #define CLK_BRG_MASK 0x0F #define CLK_PIN_OUT 0x80 #define CLK_LINE 0x00 /* clock line input */ #define CLK_BRG 0x40 /* internal baud rate generator */ #define CLK_TX_RXCLK 0x60 /* TX clock from RX clock */ #define CMD_RX_RST 0x11 #define CMD_RX_ENA 0x12 #define CMD_RX_DIS 0x13 #define CMD_RX_CRC_INIT 0x14 #define CMD_RX_MSG_REJ 0x15 #define CMD_RX_MP_SRCH 0x16 #define CMD_RX_CRC_EXC 0x17 #define CMD_RX_CRC_FRC 0x18 #define CMD_TX_RST 0x01 #define CMD_TX_ENA 0x02 #define CMD_TX_DISA 0x03 #define CMD_TX_CRC_INIT 0x04 #define CMD_TX_CRC_EXC 0x05 #define CMD_TX_EOM 0x06 #define CMD_TX_ABORT 0x07 #define CMD_TX_MP_ON 0x08 #define CMD_TX_BUF_CLR 0x09 #define CMD_TX_DISB 0x0b #define CMD_CH_RST 0x21 #define CMD_SRCH_MODE 0x31 #define CMD_NOP 0x00 #define CMD_RESET 0x21 #define CMD_TX_ENABLE 0x02 #define CMD_RX_ENABLE 0x12 #define ST0_RXRDY 0x01 #define ST0_TXRDY 0x02 #define ST0_RXINTB 0x20 #define ST0_RXINTA 0x40 #define ST0_TXINT 0x80 #define ST1_IDLE 0x01 #define ST1_ABORT 0x02 #define ST1_CDCD 0x04 #define ST1_CCTS 0x08 #define ST1_SYN_FLAG 0x10 #define ST1_CLMD 0x20 #define ST1_TXIDLE 0x40 #define ST1_UDRN 0x80 #define ST2_CRCE 0x04 #define ST2_ONRN 0x08 #define ST2_RBIT 0x10 #define ST2_ABORT 0x20 #define ST2_SHORT 0x40 #define ST2_EOM 0x80 #define ST3_RX_ENA 0x01 #define ST3_TX_ENA 0x02 #define ST3_DCD 0x04 #define ST3_CTS 0x08 #define ST3_SRCH_MODE 0x10 #define ST3_SLOOP 0x20 #define ST3_GPI 0x80 #define ST4_RDNR 0x01 #define ST4_RDCR 0x02 #define ST4_TDNR 0x04 #define ST4_TDCR 0x08 #define ST4_OCLM 0x20 #define ST4_CFT 0x40 #define ST4_CGPI 0x80 #define FST_CRCEF 0x04 #define FST_OVRNF 0x08 #define FST_RBIF 0x10 #define FST_ABTF 0x20 #define FST_SHRTF 0x40 #define FST_EOMF 0x80 #define IE0_RXRDY 0x01 #define IE0_TXRDY 0x02 #define IE0_RXINTB 0x20 #define IE0_RXINTA 0x40 #define IE0_TXINT 0x80 #define IE0_UDRN 0x00008000 /* TX underrun MSCI interrupt enable */ #define IE0_CDCD 0x00000400 /* CD level change interrupt enable */ #define IE1_IDLD 0x01 #define IE1_ABTD 0x02 #define IE1_CDCD 0x04 #define IE1_CCTS 0x08 #define IE1_SYNCD 0x10 #define IE1_CLMD 0x20 #define IE1_IDL 0x40 #define IE1_UDRN 0x80 #define IE2_CRCE 0x04 #define IE2_OVRN 0x08 #define IE2_RBIT 0x10 #define IE2_ABT 0x20 #define IE2_SHRT 0x40 #define IE2_EOM 0x80 #define IE4_RDNR 0x01 #define IE4_RDCR 0x02 #define IE4_TDNR 0x04 #define IE4_TDCR 0x08 #define IE4_OCLM 0x20 #define IE4_CFT 0x40 #define IE4_CGPI 0x80 #define FIE_CRCEF 0x04 #define FIE_OVRNF 0x08 #define FIE_RBIF 0x10 #define FIE_ABTF 0x20 #define FIE_SHRTF 0x40 #define FIE_EOMF 0x80 #define DSR_DWE 0x01 #define DSR_DE 0x02 #define DSR_REF 0x04 #define DSR_UDRF 0x04 #define DSR_COA 0x08 #define DSR_COF 0x10 #define DSR_BOF 0x20 #define DSR_EOM 0x40 #define DSR_EOT 0x80 #define DIR_REF 0x04 #define DIR_UDRF 0x04 #define DIR_COA 0x08 #define DIR_COF 0x10 #define DIR_BOF 0x20 #define DIR_EOM 0x40 #define DIR_EOT 0x80 #define DIR_REFE 0x04 #define DIR_UDRFE 0x04 #define DIR_COAE 0x08 #define DIR_COFE 0x10 #define DIR_BOFE 0x20 #define DIR_EOME 0x40 #define DIR_EOTE 0x80 #define DMR_CNTE 0x02 #define DMR_NF 0x04 #define DMR_SEOME 0x08 #define DMR_TMOD 0x10 #define DMER_DME 0x80 /* DMA Master Enable */ #define DCR_SW_ABT 0x01 #define DCR_FCT_CLR 0x02 #define DCR_ABORT 0x01 #define DCR_CLEAR_EOF 0x02 #define PCR_COTE 0x80 #define PCR_PR0 0x01 #define PCR_PR1 0x02 #define PCR_PR2 0x04 #define PCR_CCC 0x08 #define PCR_BRC 0x10 #define PCR_OSB 0x40 #define PCR_BURST 0x80 #endif /* (__HD64572_H) */
// SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA SoC SPDIF Audio Layer * * Copyright 2015 Andrea Venturi <[email protected]> * Copyright 2015 Marcus Cooper <[email protected]> * * Based on the Allwinner SDK driver, released under the GPL. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/regmap.h> #include <linux/of.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spinlock.h> #include <sound/asoundef.h> #include <sound/dmaengine_pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #define SUN4I_SPDIF_CTL (0x00) #define SUN4I_SPDIF_CTL_MCLKDIV(v) ((v) << 4) /* v even */ #define SUN4I_SPDIF_CTL_MCLKOUTEN BIT(2) #define SUN4I_SPDIF_CTL_GEN BIT(1) #define SUN4I_SPDIF_CTL_RESET BIT(0) #define SUN4I_SPDIF_TXCFG (0x04) #define SUN4I_SPDIF_TXCFG_SINGLEMOD BIT(31) #define SUN4I_SPDIF_TXCFG_ASS BIT(17) #define SUN4I_SPDIF_TXCFG_NONAUDIO BIT(16) #define SUN4I_SPDIF_TXCFG_TXRATIO(v) ((v) << 4) #define SUN4I_SPDIF_TXCFG_TXRATIO_MASK GENMASK(8, 4) #define SUN4I_SPDIF_TXCFG_FMTRVD GENMASK(3, 2) #define SUN4I_SPDIF_TXCFG_FMT16BIT (0 << 2) #define SUN4I_SPDIF_TXCFG_FMT20BIT (1 << 2) #define SUN4I_SPDIF_TXCFG_FMT24BIT (2 << 2) #define SUN4I_SPDIF_TXCFG_CHSTMODE BIT(1) #define SUN4I_SPDIF_TXCFG_TXEN BIT(0) #define SUN4I_SPDIF_RXCFG (0x08) #define SUN4I_SPDIF_RXCFG_LOCKFLAG BIT(4) #define SUN4I_SPDIF_RXCFG_CHSTSRC BIT(3) #define SUN4I_SPDIF_RXCFG_CHSTCP BIT(1) #define SUN4I_SPDIF_RXCFG_RXEN BIT(0) #define SUN4I_SPDIF_TXFIFO (0x0C) #define SUN4I_SPDIF_RXFIFO (0x10) #define SUN4I_SPDIF_FCTL (0x14) #define SUN4I_SPDIF_FCTL_FIFOSRC BIT(31) #define SUN4I_SPDIF_FCTL_FTX BIT(17) #define SUN4I_SPDIF_FCTL_FRX BIT(16) #define SUN4I_SPDIF_FCTL_TXTL(v) ((v) << 8) #define SUN4I_SPDIF_FCTL_TXTL_MASK GENMASK(12, 8) #define SUN4I_SPDIF_FCTL_RXTL(v) ((v) << 3) #define SUN4I_SPDIF_FCTL_RXTL_MASK GENMASK(7, 3) #define SUN4I_SPDIF_FCTL_TXIM BIT(2) #define SUN4I_SPDIF_FCTL_RXOM(v) ((v) << 0) #define SUN4I_SPDIF_FCTL_RXOM_MASK GENMASK(1, 0) #define SUN50I_H6_SPDIF_FCTL (0x14) #define SUN50I_H6_SPDIF_FCTL_HUB_EN BIT(31) #define SUN50I_H6_SPDIF_FCTL_FTX BIT(30) #define SUN50I_H6_SPDIF_FCTL_FRX BIT(29) #define SUN50I_H6_SPDIF_FCTL_TXTL(v) ((v) << 12) #define SUN50I_H6_SPDIF_FCTL_TXTL_MASK GENMASK(19, 12) #define SUN50I_H6_SPDIF_FCTL_RXTL(v) ((v) << 4) #define SUN50I_H6_SPDIF_FCTL_RXTL_MASK GENMASK(10, 4) #define SUN50I_H6_SPDIF_FCTL_TXIM BIT(2) #define SUN50I_H6_SPDIF_FCTL_RXOM(v) ((v) << 0) #define SUN50I_H6_SPDIF_FCTL_RXOM_MASK GENMASK(1, 0) #define SUN4I_SPDIF_FSTA (0x18) #define SUN4I_SPDIF_FSTA_TXE BIT(14) #define SUN4I_SPDIF_FSTA_TXECNTSHT (8) #define SUN4I_SPDIF_FSTA_RXA BIT(6) #define SUN4I_SPDIF_FSTA_RXACNTSHT (0) #define SUN4I_SPDIF_INT (0x1C) #define SUN4I_SPDIF_INT_RXLOCKEN BIT(18) #define SUN4I_SPDIF_INT_RXUNLOCKEN BIT(17) #define SUN4I_SPDIF_INT_RXPARERREN BIT(16) #define SUN4I_SPDIF_INT_TXDRQEN BIT(7) #define SUN4I_SPDIF_INT_TXUIEN BIT(6) #define SUN4I_SPDIF_INT_TXOIEN BIT(5) #define SUN4I_SPDIF_INT_TXEIEN BIT(4) #define SUN4I_SPDIF_INT_RXDRQEN BIT(2) #define SUN4I_SPDIF_INT_RXOIEN BIT(1) #define SUN4I_SPDIF_INT_RXAIEN BIT(0) #define SUN4I_SPDIF_ISTA (0x20) #define SUN4I_SPDIF_ISTA_RXLOCKSTA BIT(18) #define SUN4I_SPDIF_ISTA_RXUNLOCKSTA BIT(17) #define SUN4I_SPDIF_ISTA_RXPARERRSTA BIT(16) #define SUN4I_SPDIF_ISTA_TXUSTA BIT(6) #define SUN4I_SPDIF_ISTA_TXOSTA BIT(5) #define SUN4I_SPDIF_ISTA_TXESTA BIT(4) #define SUN4I_SPDIF_ISTA_RXOSTA BIT(1) #define SUN4I_SPDIF_ISTA_RXASTA BIT(0) #define SUN8I_SPDIF_TXFIFO (0x20) #define SUN4I_SPDIF_TXCNT (0x24) #define SUN4I_SPDIF_RXCNT (0x28) #define SUN4I_SPDIF_TXCHSTA0 (0x2C) #define SUN4I_SPDIF_TXCHSTA0_CLK(v) ((v) << 28) #define SUN4I_SPDIF_TXCHSTA0_SAMFREQ(v) ((v) << 24) #define SUN4I_SPDIF_TXCHSTA0_SAMFREQ_MASK GENMASK(27, 24) #define SUN4I_SPDIF_TXCHSTA0_CHNUM(v) ((v) << 20) #define SUN4I_SPDIF_TXCHSTA0_CHNUM_MASK GENMASK(23, 20) #define SUN4I_SPDIF_TXCHSTA0_SRCNUM(v) ((v) << 16) #define SUN4I_SPDIF_TXCHSTA0_CATACOD(v) ((v) << 8) #define SUN4I_SPDIF_TXCHSTA0_MODE(v) ((v) << 6) #define SUN4I_SPDIF_TXCHSTA0_EMPHASIS(v) ((v) << 3) #define SUN4I_SPDIF_TXCHSTA0_CP BIT(2) #define SUN4I_SPDIF_TXCHSTA0_AUDIO BIT(1) #define SUN4I_SPDIF_TXCHSTA0_PRO BIT(0) #define SUN4I_SPDIF_TXCHSTA1 (0x30) #define SUN4I_SPDIF_TXCHSTA1_CGMSA(v) ((v) << 8) #define SUN4I_SPDIF_TXCHSTA1_ORISAMFREQ(v) ((v) << 4) #define SUN4I_SPDIF_TXCHSTA1_ORISAMFREQ_MASK GENMASK(7, 4) #define SUN4I_SPDIF_TXCHSTA1_SAMWORDLEN(v) ((v) << 1) #define SUN4I_SPDIF_TXCHSTA1_MAXWORDLEN BIT(0) #define SUN4I_SPDIF_RXCHSTA0 (0x34) #define SUN4I_SPDIF_RXCHSTA0_CLK(v) ((v) << 28) #define SUN4I_SPDIF_RXCHSTA0_SAMFREQ(v) ((v) << 24) #define SUN4I_SPDIF_RXCHSTA0_CHNUM(v) ((v) << 20) #define SUN4I_SPDIF_RXCHSTA0_SRCNUM(v) ((v) << 16) #define SUN4I_SPDIF_RXCHSTA0_CATACOD(v) ((v) << 8) #define SUN4I_SPDIF_RXCHSTA0_MODE(v) ((v) << 6) #define SUN4I_SPDIF_RXCHSTA0_EMPHASIS(v) ((v) << 3) #define SUN4I_SPDIF_RXCHSTA0_CP BIT(2) #define SUN4I_SPDIF_RXCHSTA0_AUDIO BIT(1) #define SUN4I_SPDIF_RXCHSTA0_PRO BIT(0) #define SUN4I_SPDIF_RXCHSTA1 (0x38) #define SUN4I_SPDIF_RXCHSTA1_CGMSA(v) ((v) << 8) #define SUN4I_SPDIF_RXCHSTA1_ORISAMFREQ(v) ((v) << 4) #define SUN4I_SPDIF_RXCHSTA1_SAMWORDLEN(v) ((v) << 1) #define SUN4I_SPDIF_RXCHSTA1_MAXWORDLEN BIT(0) /* Defines for Sampling Frequency */ #define SUN4I_SPDIF_SAMFREQ_44_1KHZ 0x0 #define SUN4I_SPDIF_SAMFREQ_NOT_INDICATED 0x1 #define SUN4I_SPDIF_SAMFREQ_48KHZ 0x2 #define SUN4I_SPDIF_SAMFREQ_32KHZ 0x3 #define SUN4I_SPDIF_SAMFREQ_22_05KHZ 0x4 #define SUN4I_SPDIF_SAMFREQ_24KHZ 0x6 #define SUN4I_SPDIF_SAMFREQ_88_2KHZ 0x8 #define SUN4I_SPDIF_SAMFREQ_76_8KHZ 0x9 #define SUN4I_SPDIF_SAMFREQ_96KHZ 0xa #define SUN4I_SPDIF_SAMFREQ_176_4KHZ 0xc #define SUN4I_SPDIF_SAMFREQ_192KHZ 0xe /** * struct sun4i_spdif_quirks - Differences between SoC variants. * * @reg_dac_txdata: TX FIFO offset for DMA config. * @has_reset: SoC needs reset deasserted. * @val_fctl_ftx: TX FIFO flush bitmask. */ struct sun4i_spdif_quirks { unsigned int reg_dac_txdata; bool has_reset; unsigned int val_fctl_ftx; }; struct sun4i_spdif_dev { struct platform_device *pdev; struct clk *spdif_clk; struct clk *apb_clk; struct reset_control *rst; struct snd_soc_dai_driver cpu_dai_drv; struct regmap *regmap; struct snd_dmaengine_dai_dma_data dma_params_tx; const struct sun4i_spdif_quirks *quirks; spinlock_t lock; }; static void sun4i_spdif_configure(struct sun4i_spdif_dev *host) { const struct sun4i_spdif_quirks *quirks = host->quirks; /* soft reset SPDIF */ regmap_write(host->regmap, SUN4I_SPDIF_CTL, SUN4I_SPDIF_CTL_RESET); /* flush TX FIFO */ regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL, quirks->val_fctl_ftx, quirks->val_fctl_ftx); /* clear TX counter */ regmap_write(host->regmap, SUN4I_SPDIF_TXCNT, 0); } static void sun4i_snd_txctrl_on(struct snd_pcm_substream *substream, struct sun4i_spdif_dev *host) { if (substream->runtime->channels == 1) regmap_update_bits(host->regmap, SUN4I_SPDIF_TXCFG, SUN4I_SPDIF_TXCFG_SINGLEMOD, SUN4I_SPDIF_TXCFG_SINGLEMOD); /* SPDIF TX ENABLE */ regmap_update_bits(host->regmap, SUN4I_SPDIF_TXCFG, SUN4I_SPDIF_TXCFG_TXEN, SUN4I_SPDIF_TXCFG_TXEN); /* DRQ ENABLE */ regmap_update_bits(host->regmap, SUN4I_SPDIF_INT, SUN4I_SPDIF_INT_TXDRQEN, SUN4I_SPDIF_INT_TXDRQEN); /* Global enable */ regmap_update_bits(host->regmap, SUN4I_SPDIF_CTL, SUN4I_SPDIF_CTL_GEN, SUN4I_SPDIF_CTL_GEN); } static void sun4i_snd_txctrl_off(struct snd_pcm_substream *substream, struct sun4i_spdif_dev *host) { /* SPDIF TX DISABLE */ regmap_update_bits(host->regmap, SUN4I_SPDIF_TXCFG, SUN4I_SPDIF_TXCFG_TXEN, 0); /* DRQ DISABLE */ regmap_update_bits(host->regmap, SUN4I_SPDIF_INT, SUN4I_SPDIF_INT_TXDRQEN, 0); /* Global disable */ regmap_update_bits(host->regmap, SUN4I_SPDIF_CTL, SUN4I_SPDIF_CTL_GEN, 0); } static int sun4i_spdif_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; sun4i_spdif_configure(host); return 0; } static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { int ret = 0; int fmt; unsigned long rate = params_rate(params); u32 mclk_div = 0; unsigned int mclk = 0; u32 reg_val; struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(cpu_dai); struct platform_device *pdev = host->pdev; /* Add the PCM and raw data select interface */ switch (params_channels(params)) { case 1: /* PCM mode */ case 2: fmt = 0; break; case 4: /* raw data mode */ fmt = SUN4I_SPDIF_TXCFG_NONAUDIO; break; default: return -EINVAL; } switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: fmt |= SUN4I_SPDIF_TXCFG_FMT16BIT; break; case SNDRV_PCM_FORMAT_S20_3LE: fmt |= SUN4I_SPDIF_TXCFG_FMT20BIT; break; case SNDRV_PCM_FORMAT_S24_LE: fmt |= SUN4I_SPDIF_TXCFG_FMT24BIT; break; default: return -EINVAL; } switch (rate) { case 22050: case 44100: case 88200: case 176400: mclk = 22579200; break; case 24000: case 32000: case 48000: case 96000: case 192000: mclk = 24576000; break; default: return -EINVAL; } ret = clk_set_rate(host->spdif_clk, mclk); if (ret < 0) { dev_err(&pdev->dev, "Setting SPDIF clock rate for %d Hz failed!\n", mclk); return ret; } regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL, SUN4I_SPDIF_FCTL_TXIM, SUN4I_SPDIF_FCTL_TXIM); switch (rate) { case 22050: case 24000: mclk_div = 8; break; case 32000: mclk_div = 6; break; case 44100: case 48000: mclk_div = 4; break; case 88200: case 96000: mclk_div = 2; break; case 176400: case 192000: mclk_div = 1; break; default: return -EINVAL; } reg_val = 0; reg_val |= SUN4I_SPDIF_TXCFG_ASS; reg_val |= fmt; /* set non audio and bit depth */ reg_val |= SUN4I_SPDIF_TXCFG_CHSTMODE; reg_val |= SUN4I_SPDIF_TXCFG_TXRATIO(mclk_div - 1); regmap_write(host->regmap, SUN4I_SPDIF_TXCFG, reg_val); return 0; } static int sun4i_spdif_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { int ret = 0; struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(dai); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: sun4i_snd_txctrl_on(substream, host); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: sun4i_snd_txctrl_off(substream, host); break; default: ret = -EINVAL; break; } return ret; } static int sun4i_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int sun4i_spdif_get_status_mask(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { u8 *status = ucontrol->value.iec958.status; status[0] = 0xff; status[1] = 0xff; status[2] = 0xff; status[3] = 0xff; status[4] = 0xff; status[5] = 0x03; return 0; } static int sun4i_spdif_get_status(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(cpu_dai); u8 *status = ucontrol->value.iec958.status; unsigned long flags; unsigned int reg; spin_lock_irqsave(&host->lock, flags); regmap_read(host->regmap, SUN4I_SPDIF_TXCHSTA0, &reg); status[0] = reg & 0xff; status[1] = (reg >> 8) & 0xff; status[2] = (reg >> 16) & 0xff; status[3] = (reg >> 24) & 0xff; regmap_read(host->regmap, SUN4I_SPDIF_TXCHSTA1, &reg); status[4] = reg & 0xff; status[5] = (reg >> 8) & 0x3; spin_unlock_irqrestore(&host->lock, flags); return 0; } static int sun4i_spdif_set_status(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(cpu_dai); u8 *status = ucontrol->value.iec958.status; unsigned long flags; unsigned int reg; bool chg0, chg1; spin_lock_irqsave(&host->lock, flags); reg = (u32)status[3] << 24; reg |= (u32)status[2] << 16; reg |= (u32)status[1] << 8; reg |= (u32)status[0]; regmap_update_bits_check(host->regmap, SUN4I_SPDIF_TXCHSTA0, GENMASK(31,0), reg, &chg0); reg = (u32)status[5] << 8; reg |= (u32)status[4]; regmap_update_bits_check(host->regmap, SUN4I_SPDIF_TXCHSTA1, GENMASK(9,0), reg, &chg1); reg = SUN4I_SPDIF_TXCFG_CHSTMODE; if (status[0] & IEC958_AES0_NONAUDIO) reg |= SUN4I_SPDIF_TXCFG_NONAUDIO; regmap_update_bits(host->regmap, SUN4I_SPDIF_TXCFG, SUN4I_SPDIF_TXCFG_CHSTMODE | SUN4I_SPDIF_TXCFG_NONAUDIO, reg); spin_unlock_irqrestore(&host->lock, flags); return chg0 || chg1; } static struct snd_kcontrol_new sun4i_spdif_controls[] = { { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK), .info = sun4i_spdif_info, .get = sun4i_spdif_get_status_mask }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .info = sun4i_spdif_info, .get = sun4i_spdif_get_status, .put = sun4i_spdif_set_status } }; static int sun4i_spdif_soc_dai_probe(struct snd_soc_dai *dai) { struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(dai); snd_soc_dai_init_dma_data(dai, &host->dma_params_tx, NULL); snd_soc_add_dai_controls(dai, sun4i_spdif_controls, ARRAY_SIZE(sun4i_spdif_controls)); return 0; } static const struct snd_soc_dai_ops sun4i_spdif_dai_ops = { .probe = sun4i_spdif_soc_dai_probe, .startup = sun4i_spdif_startup, .trigger = sun4i_spdif_trigger, .hw_params = sun4i_spdif_hw_params, }; static const struct regmap_config sun4i_spdif_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = SUN4I_SPDIF_RXCHSTA1, }; #define SUN4I_RATES SNDRV_PCM_RATE_8000_192000 #define SUN4I_FORMATS (SNDRV_PCM_FORMAT_S16_LE | \ SNDRV_PCM_FORMAT_S20_3LE | \ SNDRV_PCM_FORMAT_S24_LE) static struct snd_soc_dai_driver sun4i_spdif_dai = { .playback = { .channels_min = 1, .channels_max = 2, .rates = SUN4I_RATES, .formats = SUN4I_FORMATS, }, .ops = &sun4i_spdif_dai_ops, .name = "spdif", }; static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = { .reg_dac_txdata = SUN4I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, }; static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = { .reg_dac_txdata = SUN4I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, .has_reset = true, }; static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = { .reg_dac_txdata = SUN8I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, .has_reset = true, }; static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = { .reg_dac_txdata = SUN8I_SPDIF_TXFIFO, .val_fctl_ftx = SUN50I_H6_SPDIF_FCTL_FTX, .has_reset = true, }; static const struct of_device_id sun4i_spdif_of_match[] = { { .compatible = "allwinner,sun4i-a10-spdif", .data = &sun4i_a10_spdif_quirks, }, { .compatible = "allwinner,sun6i-a31-spdif", .data = &sun6i_a31_spdif_quirks, }, { .compatible = "allwinner,sun8i-h3-spdif", .data = &sun8i_h3_spdif_quirks, }, { .compatible = "allwinner,sun50i-h6-spdif", .data = &sun50i_h6_spdif_quirks, }, { .compatible = "allwinner,sun50i-h616-spdif", /* Essentially the same as the H6, but without RX */ .data = &sun50i_h6_spdif_quirks, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match); static const struct snd_soc_component_driver sun4i_spdif_component = { .name = "sun4i-spdif", .legacy_dai_naming = 1, }; static int sun4i_spdif_runtime_suspend(struct device *dev) { struct sun4i_spdif_dev *host = dev_get_drvdata(dev); clk_disable_unprepare(host->spdif_clk); clk_disable_unprepare(host->apb_clk); return 0; } static int sun4i_spdif_runtime_resume(struct device *dev) { struct sun4i_spdif_dev *host = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(host->spdif_clk); if (ret) return ret; ret = clk_prepare_enable(host->apb_clk); if (ret) clk_disable_unprepare(host->spdif_clk); return ret; } static int sun4i_spdif_probe(struct platform_device *pdev) { struct sun4i_spdif_dev *host; struct resource *res; const struct sun4i_spdif_quirks *quirks; int ret; void __iomem *base; dev_dbg(&pdev->dev, "Entered %s\n", __func__); host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; host->pdev = pdev; spin_lock_init(&host->lock); /* Initialize this copy of the CPU DAI driver structure */ memcpy(&host->cpu_dai_drv, &sun4i_spdif_dai, sizeof(sun4i_spdif_dai)); host->cpu_dai_drv.name = dev_name(&pdev->dev); /* Get the addresses */ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); quirks = of_device_get_match_data(&pdev->dev); if (quirks == NULL) { dev_err(&pdev->dev, "Failed to determine the quirks to use\n"); return -ENODEV; } host->quirks = quirks; host->regmap = devm_regmap_init_mmio(&pdev->dev, base, &sun4i_spdif_regmap_config); /* Clocks */ host->apb_clk = devm_clk_get(&pdev->dev, "apb"); if (IS_ERR(host->apb_clk)) { dev_err(&pdev->dev, "failed to get a apb clock.\n"); return PTR_ERR(host->apb_clk); } host->spdif_clk = devm_clk_get(&pdev->dev, "spdif"); if (IS_ERR(host->spdif_clk)) { dev_err(&pdev->dev, "failed to get a spdif clock.\n"); return PTR_ERR(host->spdif_clk); } host->dma_params_tx.addr = res->start + quirks->reg_dac_txdata; host->dma_params_tx.maxburst = 8; host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; platform_set_drvdata(pdev, host); if (quirks->has_reset) { host->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); if (PTR_ERR(host->rst) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; dev_err(&pdev->dev, "Failed to get reset: %d\n", ret); return ret; } if (!IS_ERR(host->rst)) reset_control_deassert(host->rst); } ret = devm_snd_soc_register_component(&pdev->dev, &sun4i_spdif_component, &sun4i_spdif_dai, 1); if (ret) return ret; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = sun4i_spdif_runtime_resume(&pdev->dev); if (ret) goto err_unregister; } ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) goto err_suspend; return 0; err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) sun4i_spdif_runtime_suspend(&pdev->dev); err_unregister: pm_runtime_disable(&pdev->dev); return ret; } static void sun4i_spdif_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) sun4i_spdif_runtime_suspend(&pdev->dev); } static const struct dev_pm_ops sun4i_spdif_pm = { SET_RUNTIME_PM_OPS(sun4i_spdif_runtime_suspend, sun4i_spdif_runtime_resume, NULL) }; static struct platform_driver sun4i_spdif_driver = { .driver = { .name = "sun4i-spdif", .of_match_table = sun4i_spdif_of_match, .pm = &sun4i_spdif_pm, }, .probe = sun4i_spdif_probe, .remove = sun4i_spdif_remove, }; module_platform_driver(sun4i_spdif_driver); MODULE_AUTHOR("Marcus Cooper <[email protected]>"); MODULE_AUTHOR("Andrea Venturi <[email protected]>"); MODULE_DESCRIPTION("Allwinner sun4i SPDIF SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sun4i-spdif");
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Statistics for Ocelot switch family * * Copyright (c) 2017 Microsemi Corporation * Copyright 2022 NXP */ #include <linux/ethtool_netlink.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include "ocelot.h" enum ocelot_stat { OCELOT_STAT_RX_OCTETS, OCELOT_STAT_RX_UNICAST, OCELOT_STAT_RX_MULTICAST, OCELOT_STAT_RX_BROADCAST, OCELOT_STAT_RX_SHORTS, OCELOT_STAT_RX_FRAGMENTS, OCELOT_STAT_RX_JABBERS, OCELOT_STAT_RX_CRC_ALIGN_ERRS, OCELOT_STAT_RX_SYM_ERRS, OCELOT_STAT_RX_64, OCELOT_STAT_RX_65_127, OCELOT_STAT_RX_128_255, OCELOT_STAT_RX_256_511, OCELOT_STAT_RX_512_1023, OCELOT_STAT_RX_1024_1526, OCELOT_STAT_RX_1527_MAX, OCELOT_STAT_RX_PAUSE, OCELOT_STAT_RX_CONTROL, OCELOT_STAT_RX_LONGS, OCELOT_STAT_RX_CLASSIFIED_DROPS, OCELOT_STAT_RX_RED_PRIO_0, OCELOT_STAT_RX_RED_PRIO_1, OCELOT_STAT_RX_RED_PRIO_2, OCELOT_STAT_RX_RED_PRIO_3, OCELOT_STAT_RX_RED_PRIO_4, OCELOT_STAT_RX_RED_PRIO_5, OCELOT_STAT_RX_RED_PRIO_6, OCELOT_STAT_RX_RED_PRIO_7, OCELOT_STAT_RX_YELLOW_PRIO_0, OCELOT_STAT_RX_YELLOW_PRIO_1, OCELOT_STAT_RX_YELLOW_PRIO_2, OCELOT_STAT_RX_YELLOW_PRIO_3, OCELOT_STAT_RX_YELLOW_PRIO_4, OCELOT_STAT_RX_YELLOW_PRIO_5, OCELOT_STAT_RX_YELLOW_PRIO_6, OCELOT_STAT_RX_YELLOW_PRIO_7, OCELOT_STAT_RX_GREEN_PRIO_0, OCELOT_STAT_RX_GREEN_PRIO_1, OCELOT_STAT_RX_GREEN_PRIO_2, OCELOT_STAT_RX_GREEN_PRIO_3, OCELOT_STAT_RX_GREEN_PRIO_4, OCELOT_STAT_RX_GREEN_PRIO_5, OCELOT_STAT_RX_GREEN_PRIO_6, OCELOT_STAT_RX_GREEN_PRIO_7, OCELOT_STAT_RX_ASSEMBLY_ERRS, OCELOT_STAT_RX_SMD_ERRS, OCELOT_STAT_RX_ASSEMBLY_OK, OCELOT_STAT_RX_MERGE_FRAGMENTS, OCELOT_STAT_RX_PMAC_OCTETS, OCELOT_STAT_RX_PMAC_UNICAST, OCELOT_STAT_RX_PMAC_MULTICAST, OCELOT_STAT_RX_PMAC_BROADCAST, OCELOT_STAT_RX_PMAC_SHORTS, OCELOT_STAT_RX_PMAC_FRAGMENTS, OCELOT_STAT_RX_PMAC_JABBERS, OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS, OCELOT_STAT_RX_PMAC_SYM_ERRS, OCELOT_STAT_RX_PMAC_64, OCELOT_STAT_RX_PMAC_65_127, OCELOT_STAT_RX_PMAC_128_255, OCELOT_STAT_RX_PMAC_256_511, OCELOT_STAT_RX_PMAC_512_1023, OCELOT_STAT_RX_PMAC_1024_1526, OCELOT_STAT_RX_PMAC_1527_MAX, OCELOT_STAT_RX_PMAC_PAUSE, OCELOT_STAT_RX_PMAC_CONTROL, OCELOT_STAT_RX_PMAC_LONGS, OCELOT_STAT_TX_OCTETS, OCELOT_STAT_TX_UNICAST, OCELOT_STAT_TX_MULTICAST, OCELOT_STAT_TX_BROADCAST, OCELOT_STAT_TX_COLLISION, OCELOT_STAT_TX_DROPS, OCELOT_STAT_TX_PAUSE, OCELOT_STAT_TX_64, OCELOT_STAT_TX_65_127, OCELOT_STAT_TX_128_255, OCELOT_STAT_TX_256_511, OCELOT_STAT_TX_512_1023, OCELOT_STAT_TX_1024_1526, OCELOT_STAT_TX_1527_MAX, OCELOT_STAT_TX_YELLOW_PRIO_0, OCELOT_STAT_TX_YELLOW_PRIO_1, OCELOT_STAT_TX_YELLOW_PRIO_2, OCELOT_STAT_TX_YELLOW_PRIO_3, OCELOT_STAT_TX_YELLOW_PRIO_4, OCELOT_STAT_TX_YELLOW_PRIO_5, OCELOT_STAT_TX_YELLOW_PRIO_6, OCELOT_STAT_TX_YELLOW_PRIO_7, OCELOT_STAT_TX_GREEN_PRIO_0, OCELOT_STAT_TX_GREEN_PRIO_1, OCELOT_STAT_TX_GREEN_PRIO_2, OCELOT_STAT_TX_GREEN_PRIO_3, OCELOT_STAT_TX_GREEN_PRIO_4, OCELOT_STAT_TX_GREEN_PRIO_5, OCELOT_STAT_TX_GREEN_PRIO_6, OCELOT_STAT_TX_GREEN_PRIO_7, OCELOT_STAT_TX_AGED, OCELOT_STAT_TX_MM_HOLD, OCELOT_STAT_TX_MERGE_FRAGMENTS, OCELOT_STAT_TX_PMAC_OCTETS, OCELOT_STAT_TX_PMAC_UNICAST, OCELOT_STAT_TX_PMAC_MULTICAST, OCELOT_STAT_TX_PMAC_BROADCAST, OCELOT_STAT_TX_PMAC_PAUSE, OCELOT_STAT_TX_PMAC_64, OCELOT_STAT_TX_PMAC_65_127, OCELOT_STAT_TX_PMAC_128_255, OCELOT_STAT_TX_PMAC_256_511, OCELOT_STAT_TX_PMAC_512_1023, OCELOT_STAT_TX_PMAC_1024_1526, OCELOT_STAT_TX_PMAC_1527_MAX, OCELOT_STAT_DROP_LOCAL, OCELOT_STAT_DROP_TAIL, OCELOT_STAT_DROP_YELLOW_PRIO_0, OCELOT_STAT_DROP_YELLOW_PRIO_1, OCELOT_STAT_DROP_YELLOW_PRIO_2, OCELOT_STAT_DROP_YELLOW_PRIO_3, OCELOT_STAT_DROP_YELLOW_PRIO_4, OCELOT_STAT_DROP_YELLOW_PRIO_5, OCELOT_STAT_DROP_YELLOW_PRIO_6, OCELOT_STAT_DROP_YELLOW_PRIO_7, OCELOT_STAT_DROP_GREEN_PRIO_0, OCELOT_STAT_DROP_GREEN_PRIO_1, OCELOT_STAT_DROP_GREEN_PRIO_2, OCELOT_STAT_DROP_GREEN_PRIO_3, OCELOT_STAT_DROP_GREEN_PRIO_4, OCELOT_STAT_DROP_GREEN_PRIO_5, OCELOT_STAT_DROP_GREEN_PRIO_6, OCELOT_STAT_DROP_GREEN_PRIO_7, OCELOT_NUM_STATS, }; struct ocelot_stat_layout { enum ocelot_reg reg; char name[ETH_GSTRING_LEN]; }; /* 32-bit counter checked for wraparound by ocelot_port_update_stats() * and copied to ocelot->stats. */ #define OCELOT_STAT(kind) \ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind } /* Same as above, except also exported to ethtool -S. Standard counters should * only be exposed to more specific interfaces rather than by their string name. */ #define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name } #define OCELOT_COMMON_STATS \ OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \ OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \ OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \ OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \ OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \ OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \ OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \ OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \ OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \ OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \ OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \ OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \ OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \ OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \ OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \ OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \ OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \ OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \ OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \ OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \ OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \ OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \ OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \ OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \ OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \ OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \ OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \ OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \ OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \ OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \ OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \ OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \ OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \ OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \ OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \ OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \ OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7") struct ocelot_stats_region { struct list_head node; enum ocelot_reg base; enum ocelot_stat first_stat; int count; u32 *buf; }; static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = { OCELOT_COMMON_STATS, }; static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] = { OCELOT_COMMON_STATS, OCELOT_STAT(RX_ASSEMBLY_ERRS), OCELOT_STAT(RX_SMD_ERRS), OCELOT_STAT(RX_ASSEMBLY_OK), OCELOT_STAT(RX_MERGE_FRAGMENTS), OCELOT_STAT(TX_MERGE_FRAGMENTS), OCELOT_STAT(TX_MM_HOLD), OCELOT_STAT(RX_PMAC_OCTETS), OCELOT_STAT(RX_PMAC_UNICAST), OCELOT_STAT(RX_PMAC_MULTICAST), OCELOT_STAT(RX_PMAC_BROADCAST), OCELOT_STAT(RX_PMAC_SHORTS), OCELOT_STAT(RX_PMAC_FRAGMENTS), OCELOT_STAT(RX_PMAC_JABBERS), OCELOT_STAT(RX_PMAC_CRC_ALIGN_ERRS), OCELOT_STAT(RX_PMAC_SYM_ERRS), OCELOT_STAT(RX_PMAC_64), OCELOT_STAT(RX_PMAC_65_127), OCELOT_STAT(RX_PMAC_128_255), OCELOT_STAT(RX_PMAC_256_511), OCELOT_STAT(RX_PMAC_512_1023), OCELOT_STAT(RX_PMAC_1024_1526), OCELOT_STAT(RX_PMAC_1527_MAX), OCELOT_STAT(RX_PMAC_PAUSE), OCELOT_STAT(RX_PMAC_CONTROL), OCELOT_STAT(RX_PMAC_LONGS), OCELOT_STAT(TX_PMAC_OCTETS), OCELOT_STAT(TX_PMAC_UNICAST), OCELOT_STAT(TX_PMAC_MULTICAST), OCELOT_STAT(TX_PMAC_BROADCAST), OCELOT_STAT(TX_PMAC_PAUSE), OCELOT_STAT(TX_PMAC_64), OCELOT_STAT(TX_PMAC_65_127), OCELOT_STAT(TX_PMAC_128_255), OCELOT_STAT(TX_PMAC_256_511), OCELOT_STAT(TX_PMAC_512_1023), OCELOT_STAT(TX_PMAC_1024_1526), OCELOT_STAT(TX_PMAC_1527_MAX), }; static const struct ocelot_stat_layout * ocelot_get_stats_layout(struct ocelot *ocelot) { if (ocelot->mm_supported) return ocelot_mm_stats_layout; return ocelot_stats_layout; } /* Read the counters from hardware and keep them in region->buf. * Caller must hold &ocelot->stat_view_lock. */ static int ocelot_port_update_stats(struct ocelot *ocelot, int port) { struct ocelot_stats_region *region; int err; /* Configure the port to read the stats from */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG); list_for_each_entry(region, &ocelot->stats_regions, node) { err = ocelot_bulk_read(ocelot, region->base, region->buf, region->count); if (err) return err; } return 0; } /* Transfer the counters from region->buf to ocelot->stats. * Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock. */ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port) { struct ocelot_stats_region *region; int j; list_for_each_entry(region, &ocelot->stats_regions, node) { unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat; for (j = 0; j < region->count; j++) { u64 *stat = &ocelot->stats[idx + j]; u64 val = region->buf[j]; if (val < (*stat & U32_MAX)) *stat += (u64)1 << 32; *stat = (*stat & ~(u64)U32_MAX) + val; } } } static void ocelot_check_stats_work(struct work_struct *work) { struct delayed_work *del_work = to_delayed_work(work); struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); int port, err; mutex_lock(&ocelot->stat_view_lock); for (port = 0; port < ocelot->num_phys_ports; port++) { err = ocelot_port_update_stats(ocelot, port); if (err) break; spin_lock(&ocelot->stats_lock); ocelot_port_transfer_stats(ocelot, port); spin_unlock(&ocelot->stats_lock); } if (!err && ocelot->ops->update_stats) ocelot->ops->update_stats(ocelot); mutex_unlock(&ocelot->stat_view_lock); if (err) dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); } void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) { const struct ocelot_stat_layout *layout; enum ocelot_stat i; if (sset != ETH_SS_STATS) return; layout = ocelot_get_stats_layout(ocelot); for (i = 0; i < OCELOT_NUM_STATS; i++) { if (layout[i].name[0] == '\0') continue; memcpy(data, layout[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } } EXPORT_SYMBOL(ocelot_get_strings); /* Update ocelot->stats for the given port and run the given callback */ static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv, void (*cb)(struct ocelot *ocelot, int port, void *priv)) { int err; mutex_lock(&ocelot->stat_view_lock); err = ocelot_port_update_stats(ocelot, port); if (err) { dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n", port, ERR_PTR(err)); goto out_unlock; } spin_lock(&ocelot->stats_lock); ocelot_port_transfer_stats(ocelot, port); cb(ocelot, port, priv); spin_unlock(&ocelot->stats_lock); out_unlock: mutex_unlock(&ocelot->stat_view_lock); } int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) { const struct ocelot_stat_layout *layout; enum ocelot_stat i; int num_stats = 0; if (sset != ETH_SS_STATS) return -EOPNOTSUPP; layout = ocelot_get_stats_layout(ocelot); for (i = 0; i < OCELOT_NUM_STATS; i++) if (layout[i].name[0] != '\0') num_stats++; return num_stats; } EXPORT_SYMBOL(ocelot_get_sset_count); static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port, void *priv) { const struct ocelot_stat_layout *layout; enum ocelot_stat i; u64 *data = priv; layout = ocelot_get_stats_layout(ocelot); /* Copy all supported counters */ for (i = 0; i < OCELOT_NUM_STATS; i++) { int index = port * OCELOT_NUM_STATS + i; if (layout[i].name[0] == '\0') continue; *data++ = ocelot->stats[index]; } } void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb); } EXPORT_SYMBOL(ocelot_get_ethtool_stats); static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_pause_stats *pause_stats = priv; pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE]; pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE]; } static void ocelot_port_pmac_pause_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_pause_stats *pause_stats = priv; pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PMAC_PAUSE]; pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PMAC_PAUSE]; } static void ocelot_port_mm_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_mm_stats *stats = priv; stats->MACMergeFrameAssErrorCount = s[OCELOT_STAT_RX_ASSEMBLY_ERRS]; stats->MACMergeFrameSmdErrorCount = s[OCELOT_STAT_RX_SMD_ERRS]; stats->MACMergeFrameAssOkCount = s[OCELOT_STAT_RX_ASSEMBLY_OK]; stats->MACMergeFragCountRx = s[OCELOT_STAT_RX_MERGE_FRAGMENTS]; stats->MACMergeFragCountTx = s[OCELOT_STAT_TX_MERGE_FRAGMENTS]; stats->MACMergeHoldCount = s[OCELOT_STAT_TX_MM_HOLD]; } void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port, struct ethtool_pause_stats *pause_stats) { struct net_device *dev; switch (pause_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: ocelot_port_stats_run(ocelot, port, pause_stats, ocelot_port_pause_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (ocelot->mm_supported) ocelot_port_stats_run(ocelot, port, pause_stats, ocelot_port_pmac_pause_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: dev = ocelot->ops->port_to_netdev(ocelot, port); ethtool_aggregate_pause_stats(dev, pause_stats); break; } } EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats); void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port, struct ethtool_mm_stats *stats) { if (!ocelot->mm_supported) return; ocelot_port_stats_run(ocelot, port, stats, ocelot_port_mm_stats_cb); } EXPORT_SYMBOL_GPL(ocelot_port_get_mm_stats); static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = { { 64, 64 }, { 65, 127 }, { 128, 255 }, { 256, 511 }, { 512, 1023 }, { 1024, 1526 }, { 1527, 65535 }, {}, }; static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_rmon_stats *rmon_stats = priv; rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS]; rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS]; rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS]; rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS]; rmon_stats->hist[0] = s[OCELOT_STAT_RX_64]; rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127]; rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255]; rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511]; rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023]; rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526]; rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX]; rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; } static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_rmon_stats *rmon_stats = priv; rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_PMAC_SHORTS]; rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_PMAC_LONGS]; rmon_stats->fragments = s[OCELOT_STAT_RX_PMAC_FRAGMENTS]; rmon_stats->jabbers = s[OCELOT_STAT_RX_PMAC_JABBERS]; rmon_stats->hist[0] = s[OCELOT_STAT_RX_PMAC_64]; rmon_stats->hist[1] = s[OCELOT_STAT_RX_PMAC_65_127]; rmon_stats->hist[2] = s[OCELOT_STAT_RX_PMAC_128_255]; rmon_stats->hist[3] = s[OCELOT_STAT_RX_PMAC_256_511]; rmon_stats->hist[4] = s[OCELOT_STAT_RX_PMAC_512_1023]; rmon_stats->hist[5] = s[OCELOT_STAT_RX_PMAC_1024_1526]; rmon_stats->hist[6] = s[OCELOT_STAT_RX_PMAC_1527_MAX]; rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255]; rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511]; rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023]; rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526]; rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX]; } void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port, struct ethtool_rmon_stats *rmon_stats, const struct ethtool_rmon_hist_range **ranges) { struct net_device *dev; *ranges = ocelot_rmon_ranges; switch (rmon_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: ocelot_port_stats_run(ocelot, port, rmon_stats, ocelot_port_rmon_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (ocelot->mm_supported) ocelot_port_stats_run(ocelot, port, rmon_stats, ocelot_port_pmac_rmon_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: dev = ocelot->ops->port_to_netdev(ocelot, port); ethtool_aggregate_rmon_stats(dev, rmon_stats); break; } } EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats); static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_ctrl_stats *ctrl_stats = priv; ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL]; } static void ocelot_port_pmac_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_ctrl_stats *ctrl_stats = priv; ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_PMAC_CONTROL]; } void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port, struct ethtool_eth_ctrl_stats *ctrl_stats) { struct net_device *dev; switch (ctrl_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: ocelot_port_stats_run(ocelot, port, ctrl_stats, ocelot_port_ctrl_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (ocelot->mm_supported) ocelot_port_stats_run(ocelot, port, ctrl_stats, ocelot_port_pmac_ctrl_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: dev = ocelot->ops->port_to_netdev(ocelot, port); ethtool_aggregate_ctrl_stats(dev, ctrl_stats); break; } } EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats); static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_mac_stats *mac_stats = priv; mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS]; mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] + s[OCELOT_STAT_TX_65_127] + s[OCELOT_STAT_TX_128_255] + s[OCELOT_STAT_TX_256_511] + s[OCELOT_STAT_TX_512_1023] + s[OCELOT_STAT_TX_1024_1526] + s[OCELOT_STAT_TX_1527_MAX]; mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS]; mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] + s[OCELOT_STAT_RX_GREEN_PRIO_1] + s[OCELOT_STAT_RX_GREEN_PRIO_2] + s[OCELOT_STAT_RX_GREEN_PRIO_3] + s[OCELOT_STAT_RX_GREEN_PRIO_4] + s[OCELOT_STAT_RX_GREEN_PRIO_5] + s[OCELOT_STAT_RX_GREEN_PRIO_6] + s[OCELOT_STAT_RX_GREEN_PRIO_7] + s[OCELOT_STAT_RX_YELLOW_PRIO_0] + s[OCELOT_STAT_RX_YELLOW_PRIO_1] + s[OCELOT_STAT_RX_YELLOW_PRIO_2] + s[OCELOT_STAT_RX_YELLOW_PRIO_3] + s[OCELOT_STAT_RX_YELLOW_PRIO_4] + s[OCELOT_STAT_RX_YELLOW_PRIO_5] + s[OCELOT_STAT_RX_YELLOW_PRIO_6] + s[OCELOT_STAT_RX_YELLOW_PRIO_7]; mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST]; mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST]; mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST]; mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST]; mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS]; /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not * counted individually. */ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS]; mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS]; } static void ocelot_port_pmac_mac_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_mac_stats *mac_stats = priv; mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_PMAC_OCTETS]; mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_PMAC_64] + s[OCELOT_STAT_TX_PMAC_65_127] + s[OCELOT_STAT_TX_PMAC_128_255] + s[OCELOT_STAT_TX_PMAC_256_511] + s[OCELOT_STAT_TX_PMAC_512_1023] + s[OCELOT_STAT_TX_PMAC_1024_1526] + s[OCELOT_STAT_TX_PMAC_1527_MAX]; mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_PMAC_OCTETS]; mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_PMAC_64] + s[OCELOT_STAT_RX_PMAC_65_127] + s[OCELOT_STAT_RX_PMAC_128_255] + s[OCELOT_STAT_RX_PMAC_256_511] + s[OCELOT_STAT_RX_PMAC_512_1023] + s[OCELOT_STAT_RX_PMAC_1024_1526] + s[OCELOT_STAT_RX_PMAC_1527_MAX]; mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_MULTICAST]; mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_BROADCAST]; mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_MULTICAST]; mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_BROADCAST]; mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_PMAC_LONGS]; /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not * counted individually. */ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS]; mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS]; } void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port, struct ethtool_eth_mac_stats *mac_stats) { struct net_device *dev; switch (mac_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: ocelot_port_stats_run(ocelot, port, mac_stats, ocelot_port_mac_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (ocelot->mm_supported) ocelot_port_stats_run(ocelot, port, mac_stats, ocelot_port_pmac_mac_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: dev = ocelot->ops->port_to_netdev(ocelot, port); ethtool_aggregate_mac_stats(dev, mac_stats); break; } } EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats); static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_phy_stats *phy_stats = priv; phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS]; } static void ocelot_port_pmac_phy_stats_cb(struct ocelot *ocelot, int port, void *priv) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; struct ethtool_eth_phy_stats *phy_stats = priv; phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_PMAC_SYM_ERRS]; } void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port, struct ethtool_eth_phy_stats *phy_stats) { struct net_device *dev; switch (phy_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: ocelot_port_stats_run(ocelot, port, phy_stats, ocelot_port_phy_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (ocelot->mm_supported) ocelot_port_stats_run(ocelot, port, phy_stats, ocelot_port_pmac_phy_stats_cb); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: dev = ocelot->ops->port_to_netdev(ocelot, port); ethtool_aggregate_phy_stats(dev, phy_stats); break; } } EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats); void ocelot_port_get_stats64(struct ocelot *ocelot, int port, struct rtnl_link_stats64 *stats) { u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; spin_lock(&ocelot->stats_lock); /* Get Rx stats */ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS]; stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] + s[OCELOT_STAT_RX_FRAGMENTS] + s[OCELOT_STAT_RX_JABBERS] + s[OCELOT_STAT_RX_LONGS] + s[OCELOT_STAT_RX_64] + s[OCELOT_STAT_RX_65_127] + s[OCELOT_STAT_RX_128_255] + s[OCELOT_STAT_RX_256_511] + s[OCELOT_STAT_RX_512_1023] + s[OCELOT_STAT_RX_1024_1526] + s[OCELOT_STAT_RX_1527_MAX]; stats->multicast = s[OCELOT_STAT_RX_MULTICAST]; stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL]; stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] + s[OCELOT_STAT_RX_RED_PRIO_1] + s[OCELOT_STAT_RX_RED_PRIO_2] + s[OCELOT_STAT_RX_RED_PRIO_3] + s[OCELOT_STAT_RX_RED_PRIO_4] + s[OCELOT_STAT_RX_RED_PRIO_5] + s[OCELOT_STAT_RX_RED_PRIO_6] + s[OCELOT_STAT_RX_RED_PRIO_7] + s[OCELOT_STAT_DROP_LOCAL] + s[OCELOT_STAT_DROP_YELLOW_PRIO_0] + s[OCELOT_STAT_DROP_YELLOW_PRIO_1] + s[OCELOT_STAT_DROP_YELLOW_PRIO_2] + s[OCELOT_STAT_DROP_YELLOW_PRIO_3] + s[OCELOT_STAT_DROP_YELLOW_PRIO_4] + s[OCELOT_STAT_DROP_YELLOW_PRIO_5] + s[OCELOT_STAT_DROP_YELLOW_PRIO_6] + s[OCELOT_STAT_DROP_YELLOW_PRIO_7] + s[OCELOT_STAT_DROP_GREEN_PRIO_0] + s[OCELOT_STAT_DROP_GREEN_PRIO_1] + s[OCELOT_STAT_DROP_GREEN_PRIO_2] + s[OCELOT_STAT_DROP_GREEN_PRIO_3] + s[OCELOT_STAT_DROP_GREEN_PRIO_4] + s[OCELOT_STAT_DROP_GREEN_PRIO_5] + s[OCELOT_STAT_DROP_GREEN_PRIO_6] + s[OCELOT_STAT_DROP_GREEN_PRIO_7]; /* Get Tx stats */ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS]; stats->tx_packets = s[OCELOT_STAT_TX_64] + s[OCELOT_STAT_TX_65_127] + s[OCELOT_STAT_TX_128_255] + s[OCELOT_STAT_TX_256_511] + s[OCELOT_STAT_TX_512_1023] + s[OCELOT_STAT_TX_1024_1526] + s[OCELOT_STAT_TX_1527_MAX]; stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] + s[OCELOT_STAT_TX_AGED]; stats->collisions = s[OCELOT_STAT_TX_COLLISION]; spin_unlock(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_port_get_stats64); static int ocelot_prepare_stats_regions(struct ocelot *ocelot) { struct ocelot_stats_region *region = NULL; const struct ocelot_stat_layout *layout; enum ocelot_reg last = 0; enum ocelot_stat i; INIT_LIST_HEAD(&ocelot->stats_regions); layout = ocelot_get_stats_layout(ocelot); for (i = 0; i < OCELOT_NUM_STATS; i++) { if (!layout[i].reg) continue; /* enum ocelot_stat must be kept sorted in the same order * as the addresses behind layout[i].reg in order to have * efficient bulking */ if (last) { WARN(ocelot->map[SYS][last & REG_MASK] >= ocelot->map[SYS][layout[i].reg & REG_MASK], "reg 0x%x had address 0x%x but reg 0x%x has address 0x%x, bulking broken!", last, ocelot->map[SYS][last & REG_MASK], layout[i].reg, ocelot->map[SYS][layout[i].reg & REG_MASK]); } if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] == ocelot->map[SYS][last & REG_MASK] + 4) { region->count++; } else { region = devm_kzalloc(ocelot->dev, sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; region->base = layout[i].reg; region->first_stat = i; region->count = 1; list_add_tail(&region->node, &ocelot->stats_regions); } last = layout[i].reg; } list_for_each_entry(region, &ocelot->stats_regions, node) { enum ocelot_target target; u32 addr; ocelot_reg_to_target_addr(ocelot, region->base, &target, &addr); dev_dbg(ocelot->dev, "region of %d contiguous counters starting with SYS:STAT:CNT[0x%03x]\n", region->count, addr / 4); region->buf = devm_kcalloc(ocelot->dev, region->count, sizeof(*region->buf), GFP_KERNEL); if (!region->buf) return -ENOMEM; } return 0; } int ocelot_stats_init(struct ocelot *ocelot) { char queue_name[32]; int ret; ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * OCELOT_NUM_STATS, sizeof(u64), GFP_KERNEL); if (!ocelot->stats) return -ENOMEM; snprintf(queue_name, sizeof(queue_name), "%s-stats", dev_name(ocelot->dev)); ocelot->stats_queue = create_singlethread_workqueue(queue_name); if (!ocelot->stats_queue) return -ENOMEM; spin_lock_init(&ocelot->stats_lock); mutex_init(&ocelot->stat_view_lock); ret = ocelot_prepare_stats_regions(ocelot); if (ret) { destroy_workqueue(ocelot->stats_queue); return ret; } INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); return 0; } void ocelot_stats_deinit(struct ocelot *ocelot) { cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); }
// SPDX-License-Identifier: GPL-2.0 #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; __u32 pids[3]; __u32 test[3][2]; static void update_pid(int idx) { __u32 pid = bpf_get_current_pid_tgid() >> 32; if (pid == pids[idx]) test[idx][0]++; else test[idx][1]++; } SEC("uprobe.multi") int uprobe_multi_0(struct pt_regs *ctx) { update_pid(0); return 0; } SEC("uprobe.multi") int uprobe_multi_1(struct pt_regs *ctx) { update_pid(1); return 0; } SEC("uprobe.multi") int uprobe_multi_2(struct pt_regs *ctx) { update_pid(2); return 0; }
// SPDX-License-Identifier: GPL-2.0 #include <linux/cache.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/pid_namespace.h> #include "internal.h" /* * /proc/thread_self: */ static const char *proc_thread_self_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct pid_namespace *ns = proc_pid_ns(inode->i_sb); pid_t tgid = task_tgid_nr_ns(current, ns); pid_t pid = task_pid_nr_ns(current, ns); char *name; if (!pid) return ERR_PTR(-ENOENT); name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!name)) return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD); sprintf(name, "%u/task/%u", tgid, pid); set_delayed_call(done, kfree_link, name); return name; } static const struct inode_operations proc_thread_self_inode_operations = { .get_link = proc_thread_self_get_link, }; static unsigned thread_self_inum __ro_after_init; int proc_setup_thread_self(struct super_block *s) { struct inode *root_inode = d_inode(s->s_root); struct proc_fs_info *fs_info = proc_sb_info(s); struct dentry *thread_self; int ret = -ENOMEM; inode_lock(root_inode); thread_self = d_alloc_name(s->s_root, "thread-self"); if (thread_self) { struct inode *inode = new_inode(s); if (inode) { inode->i_ino = thread_self_inum; simple_inode_init_ts(inode); inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; inode->i_op = &proc_thread_self_inode_operations; d_add(thread_self, inode); ret = 0; } else { dput(thread_self); } } inode_unlock(root_inode); if (ret) pr_err("proc_fill_super: can't allocate /proc/thread-self\n"); else fs_info->proc_thread_self = thread_self; return ret; } void __init proc_thread_self_init(void) { proc_alloc_inum(&thread_self_inum); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010-2017 Mathieu Desnoyers <[email protected]> * * membarrier system call */ /* * For documentation purposes, here are some membarrier ordering * scenarios to keep in mind: * * A) Userspace thread execution after IPI vs membarrier's memory * barrier before sending the IPI * * Userspace variables: * * int x = 0, y = 0; * * The memory barrier at the start of membarrier() on CPU0 is necessary in * order to enforce the guarantee that any writes occurring on CPU0 before * the membarrier() is executed will be visible to any code executing on * CPU1 after the IPI-induced memory barrier: * * CPU0 CPU1 * * x = 1 * membarrier(): * a: smp_mb() * b: send IPI IPI-induced mb * c: smp_mb() * r2 = y * y = 1 * barrier() * r1 = x * * BUG_ON(r1 == 0 && r2 == 0) * * The write to y and load from x by CPU1 are unordered by the hardware, * so it's possible to have "r1 = x" reordered before "y = 1" at any * point after (b). If the memory barrier at (a) is omitted, then "x = 1" * can be reordered after (a) (although not after (c)), so we get r1 == 0 * and r2 == 0. This violates the guarantee that membarrier() is * supposed by provide. * * The timing of the memory barrier at (a) has to ensure that it executes * before the IPI-induced memory barrier on CPU1. * * B) Userspace thread execution before IPI vs membarrier's memory * barrier after completing the IPI * * Userspace variables: * * int x = 0, y = 0; * * The memory barrier at the end of membarrier() on CPU0 is necessary in * order to enforce the guarantee that any writes occurring on CPU1 before * the membarrier() is executed will be visible to any code executing on * CPU0 after the membarrier(): * * CPU0 CPU1 * * x = 1 * barrier() * y = 1 * r2 = y * membarrier(): * a: smp_mb() * b: send IPI IPI-induced mb * c: smp_mb() * r1 = x * BUG_ON(r1 == 0 && r2 == 1) * * The writes to x and y are unordered by the hardware, so it's possible to * have "r2 = 1" even though the write to x doesn't execute until (b). If * the memory barrier at (c) is omitted then "r1 = x" can be reordered * before (b) (although not before (a)), so we get "r1 = 0". This violates * the guarantee that membarrier() is supposed to provide. * * The timing of the memory barrier at (c) has to ensure that it executes * after the IPI-induced memory barrier on CPU1. * * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier * * CPU0 CPU1 * * membarrier(): * a: smp_mb() * d: switch to kthread (includes mb) * b: read rq->curr->mm == NULL * e: switch to user (includes mb) * c: smp_mb() * * Using the scenario from (A), we can show that (a) needs to be paired * with (e). Using the scenario from (B), we can show that (c) needs to * be paired with (d). * * D) exit_mm vs membarrier * * Two thread groups are created, A and B. Thread group B is created by * issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD. * Let's assume we have a single thread within each thread group (Thread A * and Thread B). Thread A runs on CPU0, Thread B runs on CPU1. * * CPU0 CPU1 * * membarrier(): * a: smp_mb() * exit_mm(): * d: smp_mb() * e: current->mm = NULL * b: read rq->curr->mm == NULL * c: smp_mb() * * Using scenario (B), we can show that (c) needs to be paired with (d). * * E) kthread_{use,unuse}_mm vs membarrier * * CPU0 CPU1 * * membarrier(): * a: smp_mb() * kthread_unuse_mm() * d: smp_mb() * e: current->mm = NULL * b: read rq->curr->mm == NULL * kthread_use_mm() * f: current->mm = mm * g: smp_mb() * c: smp_mb() * * Using the scenario from (A), we can show that (a) needs to be paired * with (g). Using the scenario from (B), we can show that (c) needs to * be paired with (d). */ /* * Bitmask made from a "or" of all commands within enum membarrier_cmd, * except MEMBARRIER_CMD_QUERY. */ #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE) #else #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0 #endif #ifdef CONFIG_RSEQ #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ) #else #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 #endif #define MEMBARRIER_CMD_BITMASK \ (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \ | MEMBARRIER_CMD_GET_REGISTRATIONS) static DEFINE_MUTEX(membarrier_ipi_mutex); #define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex) static void ipi_mb(void *info) { smp_mb(); /* IPIs should be serializing but paranoid. */ } static void ipi_sync_core(void *info) { /* * The smp_mb() in membarrier after all the IPIs is supposed to * ensure that memory on remote CPUs that occur before the IPI * become visible to membarrier()'s caller -- see scenario B in * the big comment at the top of this file. * * A sync_core() would provide this guarantee, but * sync_core_before_usermode() might end up being deferred until * after membarrier()'s smp_mb(). */ smp_mb(); /* IPIs should be serializing but paranoid. */ sync_core_before_usermode(); } static void ipi_rseq(void *info) { /* * Ensure that all stores done by the calling thread are visible * to the current task before the current task resumes. We could * probably optimize this away on most architectures, but by the * time we've already sent an IPI, the cost of the extra smp_mb() * is negligible. */ smp_mb(); rseq_preempt(current); } static void ipi_sync_rq_state(void *info) { struct mm_struct *mm = (struct mm_struct *) info; if (current->mm != mm) return; this_cpu_write(runqueues.membarrier_state, atomic_read(&mm->membarrier_state)); /* * Issue a memory barrier after setting * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to * guarantee that no memory access following registration is reordered * before registration. */ smp_mb(); } void membarrier_exec_mmap(struct mm_struct *mm) { /* * Issue a memory barrier before clearing membarrier_state to * guarantee that no memory access prior to exec is reordered after * clearing this state. */ smp_mb(); atomic_set(&mm->membarrier_state, 0); /* * Keep the runqueue membarrier_state in sync with this mm * membarrier_state. */ this_cpu_write(runqueues.membarrier_state, 0); } void membarrier_update_current_mm(struct mm_struct *next_mm) { struct rq *rq = this_rq(); int membarrier_state = 0; if (next_mm) membarrier_state = atomic_read(&next_mm->membarrier_state); if (READ_ONCE(rq->membarrier_state) == membarrier_state) return; WRITE_ONCE(rq->membarrier_state, membarrier_state); } static int membarrier_global_expedited(void) { int cpu; cpumask_var_t tmpmask; if (num_online_cpus() == 1) return 0; /* * Matches memory barriers after rq->curr modification in * scheduler. */ smp_mb(); /* system call entry is not a mb. */ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; SERIALIZE_IPI(); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { struct task_struct *p; /* * Skipping the current CPU is OK even through we can be * migrated at any point. The current CPU, at the point * where we read raw_smp_processor_id(), is ensured to * be in program order with respect to the caller * thread. Therefore, we can skip this CPU from the * iteration. */ if (cpu == raw_smp_processor_id()) continue; if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & MEMBARRIER_STATE_GLOBAL_EXPEDITED)) continue; /* * Skip the CPU if it runs a kernel thread which is not using * a task mm. */ p = rcu_dereference(cpu_rq(cpu)->curr); if (!p->mm) continue; __cpumask_set_cpu(cpu, tmpmask); } rcu_read_unlock(); preempt_disable(); smp_call_function_many(tmpmask, ipi_mb, NULL, 1); preempt_enable(); free_cpumask_var(tmpmask); cpus_read_unlock(); /* * Memory barrier on the caller thread _after_ we finished * waiting for the last IPI. Matches memory barriers before * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ return 0; } static int membarrier_private_expedited(int flags, int cpu_id) { cpumask_var_t tmpmask; struct mm_struct *mm = current->mm; smp_call_func_t ipi_func = ipi_mb; if (flags == MEMBARRIER_FLAG_SYNC_CORE) { if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE)) return -EINVAL; if (!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY)) return -EPERM; ipi_func = ipi_sync_core; prepare_sync_core_cmd(mm); } else if (flags == MEMBARRIER_FLAG_RSEQ) { if (!IS_ENABLED(CONFIG_RSEQ)) return -EINVAL; if (!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY)) return -EPERM; ipi_func = ipi_rseq; } else { WARN_ON_ONCE(flags); if (!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) return -EPERM; } if (flags != MEMBARRIER_FLAG_SYNC_CORE && (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) return 0; /* * Matches memory barriers after rq->curr modification in * scheduler. * * On RISC-V, this barrier pairing is also needed for the * SYNC_CORE command when switching between processes, cf. * the inline comments in membarrier_arch_switch_mm(). */ smp_mb(); /* system call entry is not a mb. */ if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; SERIALIZE_IPI(); cpus_read_lock(); if (cpu_id >= 0) { struct task_struct *p; if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id)) goto out; rcu_read_lock(); p = rcu_dereference(cpu_rq(cpu_id)->curr); if (!p || p->mm != mm) { rcu_read_unlock(); goto out; } rcu_read_unlock(); } else { int cpu; rcu_read_lock(); for_each_online_cpu(cpu) { struct task_struct *p; p = rcu_dereference(cpu_rq(cpu)->curr); if (p && p->mm == mm) __cpumask_set_cpu(cpu, tmpmask); } rcu_read_unlock(); } if (cpu_id >= 0) { /* * smp_call_function_single() will call ipi_func() if cpu_id * is the calling CPU. */ smp_call_function_single(cpu_id, ipi_func, NULL, 1); } else { /* * For regular membarrier, we can save a few cycles by * skipping the current cpu -- we're about to do smp_mb() * below, and if we migrate to a different cpu, this cpu * and the new cpu will execute a full barrier in the * scheduler. * * For SYNC_CORE, we do need a barrier on the current cpu -- * otherwise, if we are migrated and replaced by a different * task in the same mm just before, during, or after * membarrier, we will end up with some thread in the mm * running without a core sync. * * For RSEQ, don't rseq_preempt() the caller. User code * is not supposed to issue syscalls at all from inside an * rseq critical section. */ if (flags != MEMBARRIER_FLAG_SYNC_CORE) { preempt_disable(); smp_call_function_many(tmpmask, ipi_func, NULL, true); preempt_enable(); } else { on_each_cpu_mask(tmpmask, ipi_func, NULL, true); } } out: if (cpu_id < 0) free_cpumask_var(tmpmask); cpus_read_unlock(); /* * Memory barrier on the caller thread _after_ we finished * waiting for the last IPI. Matches memory barriers before * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ return 0; } static int sync_runqueues_membarrier_state(struct mm_struct *mm) { int membarrier_state = atomic_read(&mm->membarrier_state); cpumask_var_t tmpmask; int cpu; if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { this_cpu_write(runqueues.membarrier_state, membarrier_state); /* * For single mm user, we can simply issue a memory barrier * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the * mm and in the current runqueue to guarantee that no memory * access following registration is reordered before * registration. */ smp_mb(); return 0; } if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; /* * For mm with multiple users, we need to ensure all future * scheduler executions will observe @mm's new membarrier * state. */ synchronize_rcu(); /* * For each cpu runqueue, if the task's mm match @mm, ensure that all * @mm's membarrier state set bits are also set in the runqueue's * membarrier state. This ensures that a runqueue scheduling * between threads which are users of @mm has its membarrier state * updated. */ SERIALIZE_IPI(); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); struct task_struct *p; p = rcu_dereference(rq->curr); if (p && p->mm == mm) __cpumask_set_cpu(cpu, tmpmask); } rcu_read_unlock(); on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true); free_cpumask_var(tmpmask); cpus_read_unlock(); return 0; } static int membarrier_register_global_expedited(void) { struct task_struct *p = current; struct mm_struct *mm = p->mm; int ret; if (atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY) return 0; atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state); ret = sync_runqueues_membarrier_state(mm); if (ret) return ret; atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, &mm->membarrier_state); return 0; } static int membarrier_register_private_expedited(int flags) { struct task_struct *p = current; struct mm_struct *mm = p->mm; int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY, set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED, ret; if (flags == MEMBARRIER_FLAG_SYNC_CORE) { if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE)) return -EINVAL; ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY; } else if (flags == MEMBARRIER_FLAG_RSEQ) { if (!IS_ENABLED(CONFIG_RSEQ)) return -EINVAL; ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY; } else { WARN_ON_ONCE(flags); } /* * We need to consider threads belonging to different thread * groups, which use the same mm. (CLONE_VM but not * CLONE_THREAD). */ if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) return 0; if (flags & MEMBARRIER_FLAG_SYNC_CORE) set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE; if (flags & MEMBARRIER_FLAG_RSEQ) set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ; atomic_or(set_state, &mm->membarrier_state); ret = sync_runqueues_membarrier_state(mm); if (ret) return ret; atomic_or(ready_state, &mm->membarrier_state); return 0; } static int membarrier_get_registrations(void) { struct task_struct *p = current; struct mm_struct *mm = p->mm; int registrations_mask = 0, membarrier_state, i; static const int states[] = { MEMBARRIER_STATE_GLOBAL_EXPEDITED | MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, MEMBARRIER_STATE_PRIVATE_EXPEDITED | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY, MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY, MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ | MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY }; static const int registration_cmds[] = { MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ }; BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds)); membarrier_state = atomic_read(&mm->membarrier_state); for (i = 0; i < ARRAY_SIZE(states); ++i) { if (membarrier_state & states[i]) { registrations_mask |= registration_cmds[i]; membarrier_state &= ~states[i]; } } WARN_ON_ONCE(membarrier_state != 0); return registrations_mask; } /** * sys_membarrier - issue memory barriers on a set of threads * @cmd: Takes command values defined in enum membarrier_cmd. * @flags: Currently needs to be 0 for all commands other than * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter * case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id * contains the CPU on which to interrupt (= restart) * the RSEQ critical section. * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which * RSEQ CS should be interrupted (@cmd must be * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ). * * If this system call is not implemented, -ENOSYS is returned. If the * command specified does not exist, not available on the running * kernel, or if the command argument is invalid, this system call * returns -EINVAL. For a given command, with flags argument set to 0, * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to * always return the same value until reboot. In addition, it can return * -ENOMEM if there is not enough memory available to perform the system * call. * * All memory accesses performed in program order from each targeted thread * is guaranteed to be ordered with respect to sys_membarrier(). If we use * the semantic "barrier()" to represent a compiler barrier forcing memory * accesses to be performed in program order across the barrier, and * smp_mb() to represent explicit memory barriers forcing full memory * ordering across the barrier, we have the following ordering table for * each pair of barrier(), sys_membarrier() and smp_mb(): * * The pair ordering is detailed as (O: ordered, X: not ordered): * * barrier() smp_mb() sys_membarrier() * barrier() X X O * smp_mb() X O O * sys_membarrier() O O O */ SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id) { switch (cmd) { case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: if (unlikely(flags && flags != MEMBARRIER_CMD_FLAG_CPU)) return -EINVAL; break; default: if (unlikely(flags)) return -EINVAL; } if (!(flags & MEMBARRIER_CMD_FLAG_CPU)) cpu_id = -1; switch (cmd) { case MEMBARRIER_CMD_QUERY: { int cmd_mask = MEMBARRIER_CMD_BITMASK; if (tick_nohz_full_enabled()) cmd_mask &= ~MEMBARRIER_CMD_GLOBAL; return cmd_mask; } case MEMBARRIER_CMD_GLOBAL: /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */ if (tick_nohz_full_enabled()) return -EINVAL; if (num_online_cpus() > 1) synchronize_rcu(); return 0; case MEMBARRIER_CMD_GLOBAL_EXPEDITED: return membarrier_global_expedited(); case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: return membarrier_register_global_expedited(); case MEMBARRIER_CMD_PRIVATE_EXPEDITED: return membarrier_private_expedited(0, cpu_id); case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: return membarrier_register_private_expedited(0); case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE: return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, cpu_id); case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE: return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE); case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id); case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ); case MEMBARRIER_CMD_GET_REGISTRATIONS: return membarrier_get_registrations(); default: return -EINVAL; } }
// SPDX-License-Identifier: GPL-2.0-only /* * drivers/hwmon/applesmc.c - driver for Apple's SMC (accelerometer, temperature * sensors, fan control, keyboard backlight control) used in Intel-based Apple * computers. * * Copyright (C) 2007 Nicolas Boichat <[email protected]> * Copyright (C) 2010 Henrik Rydberg <[email protected]> * * Based on hdaps.c driver: * Copyright (C) 2005 Robert Love <[email protected]> * Copyright (C) 2005 Jesper Juhl <[email protected]> * * Fan control based on smcFanControl: * Copyright (C) 2006 Hendrik Holtmann <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/dmi.h> #include <linux/mutex.h> #include <linux/hwmon-sysfs.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/hwmon.h> #include <linux/workqueue.h> #include <linux/err.h> #include <linux/bits.h> /* data port used by Apple SMC */ #define APPLESMC_DATA_PORT 0x300 /* command/status port used by Apple SMC */ #define APPLESMC_CMD_PORT 0x304 #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ #define APPLESMC_MAX_DATA_LENGTH 32 /* Apple SMC status bits */ #define SMC_STATUS_AWAITING_DATA BIT(0) /* SMC has data waiting to be read */ #define SMC_STATUS_IB_CLOSED BIT(1) /* Will ignore any input */ #define SMC_STATUS_BUSY BIT(2) /* Command in progress */ /* Initial wait is 8us */ #define APPLESMC_MIN_WAIT 0x0008 #define APPLESMC_READ_CMD 0x10 #define APPLESMC_WRITE_CMD 0x11 #define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12 #define APPLESMC_GET_KEY_TYPE_CMD 0x13 #define KEY_COUNT_KEY "#KEY" /* r-o ui32 */ #define LIGHT_SENSOR_LEFT_KEY "ALV0" /* r-o {alv (6-10 bytes) */ #define LIGHT_SENSOR_RIGHT_KEY "ALV1" /* r-o {alv (6-10 bytes) */ #define BACKLIGHT_KEY "LKSB" /* w-o {lkb (2 bytes) */ #define CLAMSHELL_KEY "MSLD" /* r-o ui8 (unused) */ #define MOTION_SENSOR_X_KEY "MO_X" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Y_KEY "MO_Y" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_Z_KEY "MO_Z" /* r-o sp78 (2 bytes) */ #define MOTION_SENSOR_KEY "MOCN" /* r/w ui16 */ #define FANS_COUNT "FNum" /* r-o ui8 */ #define FANS_MANUAL "FS! " /* r-w ui16 */ #define FAN_ID_FMT "F%dID" /* r-o char[16] */ #define TEMP_SENSOR_TYPE "sp78" /* List of keys used to read/write fan speeds */ static const char *const fan_speed_fmt[] = { "F%dAc", /* actual speed */ "F%dMn", /* minimum speed (rw) */ "F%dMx", /* maximum speed */ "F%dSf", /* safe speed - not all models */ "F%dTg", /* target speed (manual: rw) */ }; #define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */ #define INIT_WAIT_MSECS 50 /* ... in 50ms increments */ #define APPLESMC_POLL_INTERVAL 50 /* msecs */ #define APPLESMC_INPUT_FUZZ 4 /* input event threshold */ #define APPLESMC_INPUT_FLAT 4 #define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff) #define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16) /* Dynamic device node attributes */ struct applesmc_dev_attr { struct sensor_device_attribute sda; /* hwmon attributes */ char name[32]; /* room for node file name */ }; /* Dynamic device node group */ struct applesmc_node_group { char *format; /* format string */ void *show; /* show function */ void *store; /* store function */ int option; /* function argument */ struct applesmc_dev_attr *nodes; /* dynamic node array */ }; /* AppleSMC entry - cached register information */ struct applesmc_entry { char key[5]; /* four-letter key code */ u8 valid; /* set when entry is successfully read once */ u8 len; /* bounded by APPLESMC_MAX_DATA_LENGTH */ char type[5]; /* four-letter type code */ u8 flags; /* 0x10: func; 0x40: write; 0x80: read */ }; /* Register lookup and registers common to all SMCs */ static struct applesmc_registers { struct mutex mutex; /* register read/write mutex */ unsigned int key_count; /* number of SMC registers */ unsigned int fan_count; /* number of fans */ unsigned int temp_count; /* number of temperature registers */ unsigned int temp_begin; /* temperature lower index bound */ unsigned int temp_end; /* temperature upper index bound */ unsigned int index_count; /* size of temperature index array */ int num_light_sensors; /* number of light sensors */ bool has_accelerometer; /* has motion sensor */ bool has_key_backlight; /* has keyboard backlight */ bool init_complete; /* true when fully initialized */ struct applesmc_entry *cache; /* cached key entries */ const char **index; /* temperature key index */ } smcreg = { .mutex = __MUTEX_INITIALIZER(smcreg.mutex), }; static const int debug; static struct platform_device *pdev; static s16 rest_x; static s16 rest_y; static u8 backlight_state[2]; static struct device *hwmon_dev; static struct input_dev *applesmc_idev; /* * Last index written to key_at_index sysfs file, and value to use for all other * key_at_index_* sysfs files. */ static unsigned int key_at_index; static struct workqueue_struct *applesmc_led_wq; /* * Wait for specific status bits with a mask on the SMC. * Used before all transactions. * This does 10 fast loops of 8us then exponentially backs off for a * minimum total wait of 262ms. Depending on usleep_range this could * run out past 500ms. */ static int wait_status(u8 val, u8 mask) { u8 status; int us; int i; us = APPLESMC_MIN_WAIT; for (i = 0; i < 24 ; i++) { status = inb(APPLESMC_CMD_PORT); if ((status & mask) == val) return 0; usleep_range(us, us * 2); if (i > 9) us <<= 1; } return -EIO; } /* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ static int send_byte(u8 cmd, u16 port) { int status; status = wait_status(0, SMC_STATUS_IB_CLOSED); if (status) return status; /* * This needs to be a separate read looking for bit 0x04 * after bit 0x02 falls. If consolidated with the wait above * this extra read may not happen if status returns both * simultaneously and this would appear to be required. */ status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY); if (status) return status; outb(cmd, port); return 0; } /* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ static int send_command(u8 cmd) { int ret; ret = wait_status(0, SMC_STATUS_IB_CLOSED); if (ret) return ret; outb(cmd, APPLESMC_CMD_PORT); return 0; } /* * Based on logic from the Apple driver. This is issued before any interaction * If busy is stuck high, issue a read command to reset the SMC state machine. * If busy is stuck high after the command then the SMC is jammed. */ static int smc_sane(void) { int ret; ret = wait_status(0, SMC_STATUS_BUSY); if (!ret) return ret; ret = send_command(APPLESMC_READ_CMD); if (ret) return ret; return wait_status(0, SMC_STATUS_BUSY); } static int send_argument(const char *key) { int i; for (i = 0; i < 4; i++) if (send_byte(key[i], APPLESMC_DATA_PORT)) return -EIO; return 0; } static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) { u8 status, data = 0; int i; int ret; ret = smc_sane(); if (ret) return ret; if (send_command(cmd) || send_argument(key)) { pr_warn("%.4s: read arg fail\n", key); return -EIO; } /* This has no effect on newer (2012) SMCs */ if (send_byte(len, APPLESMC_DATA_PORT)) { pr_warn("%.4s: read len fail\n", key); return -EIO; } for (i = 0; i < len; i++) { if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) { pr_warn("%.4s: read data[%d] fail\n", key, i); return -EIO; } buffer[i] = inb(APPLESMC_DATA_PORT); } /* Read the data port until bit0 is cleared */ for (i = 0; i < 16; i++) { udelay(APPLESMC_MIN_WAIT); status = inb(APPLESMC_CMD_PORT); if (!(status & SMC_STATUS_AWAITING_DATA)) break; data = inb(APPLESMC_DATA_PORT); } if (i) pr_warn("flushed %d bytes, last value is: %d\n", i, data); return wait_status(0, SMC_STATUS_BUSY); } static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) { int i; int ret; ret = smc_sane(); if (ret) return ret; if (send_command(cmd) || send_argument(key)) { pr_warn("%s: write arg fail\n", key); return -EIO; } if (send_byte(len, APPLESMC_DATA_PORT)) { pr_warn("%.4s: write len fail\n", key); return -EIO; } for (i = 0; i < len; i++) { if (send_byte(buffer[i], APPLESMC_DATA_PORT)) { pr_warn("%s: write data fail\n", key); return -EIO; } } return wait_status(0, SMC_STATUS_BUSY); } static int read_register_count(unsigned int *count) { __be32 be; int ret; ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); if (ret) return ret; *count = be32_to_cpu(be); return 0; } /* * Serialized I/O * * Returns zero on success or a negative error on failure. * All functions below are concurrency safe - callers should NOT hold lock. */ static int applesmc_read_entry(const struct applesmc_entry *entry, u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static int applesmc_write_entry(const struct applesmc_entry *entry, const u8 *buf, u8 len) { int ret; if (entry->len != len) return -EINVAL; mutex_lock(&smcreg.mutex); ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); mutex_unlock(&smcreg.mutex); return ret; } static const struct applesmc_entry *applesmc_get_entry_by_index(int index) { struct applesmc_entry *cache = &smcreg.cache[index]; u8 key[4], info[6]; __be32 be; int ret = 0; if (cache->valid) return cache; mutex_lock(&smcreg.mutex); if (cache->valid) goto out; be = cpu_to_be32(index); ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); if (ret) goto out; ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); if (ret) goto out; memcpy(cache->key, key, 4); cache->len = info[0]; memcpy(cache->type, &info[1], 4); cache->flags = info[5]; cache->valid = true; out: mutex_unlock(&smcreg.mutex); if (ret) return ERR_PTR(ret); return cache; } static int applesmc_get_lower_bound(unsigned int *lo, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) { *lo = 0; return PTR_ERR(entry); } if (strcmp(entry->key, key) < 0) begin = middle + 1; else end = middle; } *lo = begin; return 0; } static int applesmc_get_upper_bound(unsigned int *hi, const char *key) { int begin = 0, end = smcreg.key_count; const struct applesmc_entry *entry; while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); if (IS_ERR(entry)) { *hi = smcreg.key_count; return PTR_ERR(entry); } if (strcmp(key, entry->key) < 0) end = middle; else begin = middle + 1; } *hi = begin; return 0; } static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key) { int begin, end; int ret; ret = applesmc_get_lower_bound(&begin, key); if (ret) return ERR_PTR(ret); ret = applesmc_get_upper_bound(&end, key); if (ret) return ERR_PTR(ret); if (end - begin != 1) return ERR_PTR(-EINVAL); return applesmc_get_entry_by_index(begin); } static int applesmc_read_key(const char *key, u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_read_entry(entry, buffer, len); } static int applesmc_write_key(const char *key, const u8 *buffer, u8 len) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry)) return PTR_ERR(entry); return applesmc_write_entry(entry, buffer, len); } static int applesmc_has_key(const char *key, bool *value) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_key(key); if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL) return PTR_ERR(entry); *value = !IS_ERR(entry); return 0; } /* * applesmc_read_s16 - Read 16-bit signed big endian register */ static int applesmc_read_s16(const char *key, s16 *value) { u8 buffer[2]; int ret; ret = applesmc_read_key(key, buffer, 2); if (ret) return ret; *value = ((s16)buffer[0] << 8) | buffer[1]; return 0; } /* * applesmc_device_init - initialize the accelerometer. Can sleep. */ static void applesmc_device_init(void) { int total; u8 buffer[2]; if (!smcreg.has_accelerometer) return; for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) { if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) && (buffer[0] != 0x00 || buffer[1] != 0x00)) return; buffer[0] = 0xe0; buffer[1] = 0x00; applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2); msleep(INIT_WAIT_MSECS); } pr_warn("failed to init the device\n"); } static int applesmc_init_index(struct applesmc_registers *s) { const struct applesmc_entry *entry; unsigned int i; if (s->index) return 0; s->index = kcalloc(s->temp_count, sizeof(s->index[0]), GFP_KERNEL); if (!s->index) return -ENOMEM; for (i = s->temp_begin; i < s->temp_end; i++) { entry = applesmc_get_entry_by_index(i); if (IS_ERR(entry)) continue; if (strcmp(entry->type, TEMP_SENSOR_TYPE)) continue; s->index[s->index_count++] = entry->key; } return 0; } /* * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent. */ static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; bool left_light_sensor = false, right_light_sensor = false; unsigned int count; u8 tmp[1]; int ret; if (s->init_complete) return 0; ret = read_register_count(&count); if (ret) return ret; if (s->cache && s->key_count != count) { pr_warn("key count changed from %d to %d\n", s->key_count, count); kfree(s->cache); s->cache = NULL; } s->key_count = count; if (!s->cache) s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); if (!s->cache) return -ENOMEM; ret = applesmc_read_key(FANS_COUNT, tmp, 1); if (ret) return ret; s->fan_count = tmp[0]; if (s->fan_count > 10) s->fan_count = 10; ret = applesmc_get_lower_bound(&s->temp_begin, "T"); if (ret) return ret; ret = applesmc_get_lower_bound(&s->temp_end, "U"); if (ret) return ret; s->temp_count = s->temp_end - s->temp_begin; ret = applesmc_init_index(s); if (ret) return ret; ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); if (ret) return ret; ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); if (ret) return ret; ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer); if (ret) return ret; ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight); if (ret) return ret; s->num_light_sensors = left_light_sensor + right_light_sensor; s->init_complete = true; pr_info("key=%d fan=%d temp=%d index=%d acc=%d lux=%d kbd=%d\n", s->key_count, s->fan_count, s->temp_count, s->index_count, s->has_accelerometer, s->num_light_sensors, s->has_key_backlight); return 0; } static void applesmc_destroy_smcreg(void) { kfree(smcreg.index); smcreg.index = NULL; kfree(smcreg.cache); smcreg.cache = NULL; smcreg.init_complete = false; } /* * applesmc_init_smcreg - Initialize register cache. * * Retries until initialization is successful, or the operation times out. * */ static int applesmc_init_smcreg(void) { int ms, ret; for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) { ret = applesmc_init_smcreg_try(); if (!ret) { if (ms) pr_info("init_smcreg() took %d ms\n", ms); return 0; } msleep(INIT_WAIT_MSECS); } applesmc_destroy_smcreg(); return ret; } /* Device model stuff */ static int applesmc_probe(struct platform_device *dev) { int ret; ret = applesmc_init_smcreg(); if (ret) return ret; applesmc_device_init(); return 0; } /* Synchronize device with memorized backlight state */ static int applesmc_pm_resume(struct device *dev) { if (smcreg.has_key_backlight) applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); return 0; } /* Reinitialize device on resume from hibernation */ static int applesmc_pm_restore(struct device *dev) { applesmc_device_init(); return applesmc_pm_resume(dev); } static const struct dev_pm_ops applesmc_pm_ops = { .resume = applesmc_pm_resume, .restore = applesmc_pm_restore, }; static struct platform_driver applesmc_driver = { .probe = applesmc_probe, .driver = { .name = "applesmc", .pm = &applesmc_pm_ops, }, }; /* * applesmc_calibrate - Set our "resting" values. Callers must * hold applesmc_lock. */ static void applesmc_calibrate(void) { applesmc_read_s16(MOTION_SENSOR_X_KEY, &rest_x); applesmc_read_s16(MOTION_SENSOR_Y_KEY, &rest_y); rest_x = -rest_x; } static void applesmc_idev_poll(struct input_dev *idev) { s16 x, y; if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x)) return; if (applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y)) return; x = -x; input_report_abs(idev, ABS_X, x - rest_x); input_report_abs(idev, ABS_Y, y - rest_y); input_sync(idev); } /* Sysfs Files */ static ssize_t applesmc_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "applesmc\n"); } static ssize_t applesmc_position_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; s16 x, y, z; ret = applesmc_read_s16(MOTION_SENSOR_X_KEY, &x); if (ret) goto out; ret = applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y); if (ret) goto out; ret = applesmc_read_s16(MOTION_SENSOR_Z_KEY, &z); if (ret) goto out; out: if (ret) return ret; return sysfs_emit(buf, "(%d,%d,%d)\n", x, y, z); } static ssize_t applesmc_light_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; static int data_length; int ret; u8 left = 0, right = 0; u8 buffer[10]; if (!data_length) { entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY); if (IS_ERR(entry)) return PTR_ERR(entry); if (entry->len > 10) return -ENXIO; data_length = entry->len; pr_info("light sensor data length set to %d\n", data_length); } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); if (ret) goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; right = buffer[2]; out: if (ret) return ret; return sysfs_emit(sysfsbuf, "(%d,%d)\n", left, right); } /* Displays sensor key as label */ static ssize_t applesmc_show_sensor_label(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { const char *key = smcreg.index[to_index(devattr)]; return sysfs_emit(sysfsbuf, "%s\n", key); } /* Displays degree Celsius * 1000 */ static ssize_t applesmc_show_temperature(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) { const char *key = smcreg.index[to_index(devattr)]; int ret; s16 value; int temp; ret = applesmc_read_s16(key, &value); if (ret) return ret; temp = 250 * (value >> 6); return sysfs_emit(sysfsbuf, "%d\n", temp); } static ssize_t applesmc_show_fan_speed(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; unsigned int speed = 0; char newkey[5]; u8 buffer[2]; scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); if (ret) return ret; speed = ((buffer[0] << 8 | buffer[1]) >> 2); return sysfs_emit(sysfsbuf, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; unsigned long speed; char newkey[5]; u8 buffer[2]; if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000) return -EINVAL; /* Bigger than a 14-bit value */ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], to_index(attr)); buffer[0] = (speed >> 6) & 0xff; buffer[1] = (speed << 2) & 0xff; ret = applesmc_write_key(newkey, buffer, 2); if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_manual(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u16 manual = 0; u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); if (ret) return ret; manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; return sysfs_emit(sysfsbuf, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { int ret; u8 buffer[2]; unsigned long input; u16 val; if (kstrtoul(sysfsbuf, 10, &input) < 0) return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); if (ret) goto out; val = (buffer[0] << 8 | buffer[1]); if (input) val = val | (0x01 << to_index(attr)); else val = val & ~(0x01 << to_index(attr)); buffer[0] = (val >> 8) & 0xFF; buffer[1] = val & 0xFF; ret = applesmc_write_key(FANS_MANUAL, buffer, 2); out: if (ret) return ret; else return count; } static ssize_t applesmc_show_fan_position(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; char newkey[5]; u8 buffer[17]; scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 16); buffer[16] = 0; if (ret) return ret; return sysfs_emit(sysfsbuf, "%s\n", buffer + 4); } static ssize_t applesmc_calibrate_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return sysfs_emit(sysfsbuf, "(%d,%d)\n", rest_x, rest_y); } static ssize_t applesmc_calibrate_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { applesmc_calibrate(); return count; } static void applesmc_backlight_set(struct work_struct *work) { applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); } static DECLARE_WORK(backlight_work, &applesmc_backlight_set); static void applesmc_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { int ret; backlight_state[0] = value; ret = queue_work(applesmc_led_wq, &backlight_work); if (debug && (!ret)) dev_dbg(led_cdev->dev, "work was already on the queue.\n"); } static ssize_t applesmc_key_count_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { int ret; u8 buffer[4]; u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); if (ret) return ret; count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + ((u32)buffer[2]<<8) + buffer[3]; return sysfs_emit(sysfsbuf, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; int ret; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); ret = applesmc_read_entry(entry, sysfsbuf, entry->len); if (ret) return ret; return entry->len; } static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return sysfs_emit(sysfsbuf, "%d\n", entry->len); } static ssize_t applesmc_key_at_index_type_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return sysfs_emit(sysfsbuf, "%s\n", entry->type); } static ssize_t applesmc_key_at_index_name_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { const struct applesmc_entry *entry; entry = applesmc_get_entry_by_index(key_at_index); if (IS_ERR(entry)) return PTR_ERR(entry); return sysfs_emit(sysfsbuf, "%s\n", entry->key); } static ssize_t applesmc_key_at_index_show(struct device *dev, struct device_attribute *attr, char *sysfsbuf) { return sysfs_emit(sysfsbuf, "%d\n", key_at_index); } static ssize_t applesmc_key_at_index_store(struct device *dev, struct device_attribute *attr, const char *sysfsbuf, size_t count) { unsigned long newkey; if (kstrtoul(sysfsbuf, 10, &newkey) < 0 || newkey >= smcreg.key_count) return -EINVAL; key_at_index = newkey; return count; } static struct led_classdev applesmc_backlight = { .name = "smc::kbd_backlight", .default_trigger = "nand-disk", .brightness_set = applesmc_brightness_set, }; static struct applesmc_node_group info_group[] = { { "name", applesmc_name_show }, { "key_count", applesmc_key_count_show }, { "key_at_index", applesmc_key_at_index_show, applesmc_key_at_index_store }, { "key_at_index_name", applesmc_key_at_index_name_show }, { "key_at_index_type", applesmc_key_at_index_type_show }, { "key_at_index_data_length", applesmc_key_at_index_data_length_show }, { "key_at_index_data", applesmc_key_at_index_read_show }, { } }; static struct applesmc_node_group accelerometer_group[] = { { "position", applesmc_position_show }, { "calibrate", applesmc_calibrate_show, applesmc_calibrate_store }, { } }; static struct applesmc_node_group light_sensor_group[] = { { "light", applesmc_light_show }, { } }; static struct applesmc_node_group fan_group[] = { { "fan%d_label", applesmc_show_fan_position }, { "fan%d_input", applesmc_show_fan_speed, NULL, 0 }, { "fan%d_min", applesmc_show_fan_speed, applesmc_store_fan_speed, 1 }, { "fan%d_max", applesmc_show_fan_speed, NULL, 2 }, { "fan%d_safe", applesmc_show_fan_speed, NULL, 3 }, { "fan%d_output", applesmc_show_fan_speed, applesmc_store_fan_speed, 4 }, { "fan%d_manual", applesmc_show_fan_manual, applesmc_store_fan_manual }, { } }; static struct applesmc_node_group temp_group[] = { { "temp%d_label", applesmc_show_sensor_label }, { "temp%d_input", applesmc_show_temperature }, { } }; /* Module stuff */ /* * applesmc_destroy_nodes - remove files and free associated memory */ static void applesmc_destroy_nodes(struct applesmc_node_group *groups) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; for (grp = groups; grp->nodes; grp++) { for (node = grp->nodes; node->sda.dev_attr.attr.name; node++) sysfs_remove_file(&pdev->dev.kobj, &node->sda.dev_attr.attr); kfree(grp->nodes); grp->nodes = NULL; } } /* * applesmc_create_nodes - create a two-dimensional group of sysfs files */ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) { struct applesmc_node_group *grp; struct applesmc_dev_attr *node; struct attribute *attr; int ret, i; for (grp = groups; grp->format; grp++) { grp->nodes = kcalloc(num + 1, sizeof(*node), GFP_KERNEL); if (!grp->nodes) { ret = -ENOMEM; goto out; } for (i = 0; i < num; i++) { node = &grp->nodes[i]; scnprintf(node->name, sizeof(node->name), grp->format, i + 1); node->sda.index = (grp->option << 16) | (i & 0xffff); node->sda.dev_attr.show = grp->show; node->sda.dev_attr.store = grp->store; attr = &node->sda.dev_attr.attr; sysfs_attr_init(attr); attr->name = node->name; attr->mode = 0444 | (grp->store ? 0200 : 0); ret = sysfs_create_file(&pdev->dev.kobj, attr); if (ret) { attr->name = NULL; goto out; } } } return 0; out: applesmc_destroy_nodes(groups); return ret; } /* Create accelerometer resources */ static int applesmc_create_accelerometer(void) { int ret; if (!smcreg.has_accelerometer) return 0; ret = applesmc_create_nodes(accelerometer_group, 1); if (ret) goto out; applesmc_idev = input_allocate_device(); if (!applesmc_idev) { ret = -ENOMEM; goto out_sysfs; } /* initial calibrate for the input device */ applesmc_calibrate(); /* initialize the input device */ applesmc_idev->name = "applesmc"; applesmc_idev->id.bustype = BUS_HOST; applesmc_idev->dev.parent = &pdev->dev; input_set_abs_params(applesmc_idev, ABS_X, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); input_set_abs_params(applesmc_idev, ABS_Y, -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); ret = input_setup_polling(applesmc_idev, applesmc_idev_poll); if (ret) goto out_idev; input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL); ret = input_register_device(applesmc_idev); if (ret) goto out_idev; return 0; out_idev: input_free_device(applesmc_idev); out_sysfs: applesmc_destroy_nodes(accelerometer_group); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } /* Release all resources used by the accelerometer */ static void applesmc_release_accelerometer(void) { if (!smcreg.has_accelerometer) return; input_unregister_device(applesmc_idev); applesmc_destroy_nodes(accelerometer_group); } static int applesmc_create_light_sensor(void) { if (!smcreg.num_light_sensors) return 0; return applesmc_create_nodes(light_sensor_group, 1); } static void applesmc_release_light_sensor(void) { if (!smcreg.num_light_sensors) return; applesmc_destroy_nodes(light_sensor_group); } static int applesmc_create_key_backlight(void) { if (!smcreg.has_key_backlight) return 0; applesmc_led_wq = create_singlethread_workqueue("applesmc-led"); if (!applesmc_led_wq) return -ENOMEM; return led_classdev_register(&pdev->dev, &applesmc_backlight); } static void applesmc_release_key_backlight(void) { if (!smcreg.has_key_backlight) return; led_classdev_unregister(&applesmc_backlight); destroy_workqueue(applesmc_led_wq); } static int applesmc_dmi_match(const struct dmi_system_id *id) { return 1; } /* * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ static const struct dmi_system_id applesmc_whitelist[] __initconst = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, }, { applesmc_dmi_match, "Apple MacBook Pro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro") }, }, { applesmc_dmi_match, "Apple MacBook", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, }, { applesmc_dmi_match, "Apple Macmini", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") }, }, { applesmc_dmi_match, "Apple MacPro", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, }, { applesmc_dmi_match, "Apple iMac", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "iMac") }, }, { applesmc_dmi_match, "Apple Xserve", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Xserve") }, }, { .ident = NULL } }; static int __init applesmc_init(void) { int ret; if (!dmi_check_system(applesmc_whitelist)) { pr_warn("supported laptop not found!\n"); ret = -ENODEV; goto out; } if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, "applesmc")) { ret = -ENXIO; goto out; } ret = platform_driver_register(&applesmc_driver); if (ret) goto out_region; pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, NULL, 0); if (IS_ERR(pdev)) { ret = PTR_ERR(pdev); goto out_driver; } /* create register cache */ ret = applesmc_init_smcreg(); if (ret) goto out_device; ret = applesmc_create_nodes(info_group, 1); if (ret) goto out_smcreg; ret = applesmc_create_nodes(fan_group, smcreg.fan_count); if (ret) goto out_info; ret = applesmc_create_nodes(temp_group, smcreg.index_count); if (ret) goto out_fans; ret = applesmc_create_accelerometer(); if (ret) goto out_temperature; ret = applesmc_create_light_sensor(); if (ret) goto out_accelerometer; ret = applesmc_create_key_backlight(); if (ret) goto out_light_sysfs; hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); goto out_light_ledclass; } return 0; out_light_ledclass: applesmc_release_key_backlight(); out_light_sysfs: applesmc_release_light_sensor(); out_accelerometer: applesmc_release_accelerometer(); out_temperature: applesmc_destroy_nodes(temp_group); out_fans: applesmc_destroy_nodes(fan_group); out_info: applesmc_destroy_nodes(info_group); out_smcreg: applesmc_destroy_smcreg(); out_device: platform_device_unregister(pdev); out_driver: platform_driver_unregister(&applesmc_driver); out_region: release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); out: pr_warn("driver init failed (ret=%d)!\n", ret); return ret; } static void __exit applesmc_exit(void) { hwmon_device_unregister(hwmon_dev); applesmc_release_key_backlight(); applesmc_release_light_sensor(); applesmc_release_accelerometer(); applesmc_destroy_nodes(temp_group); applesmc_destroy_nodes(fan_group); applesmc_destroy_nodes(info_group); applesmc_destroy_smcreg(); platform_device_unregister(pdev); platform_driver_unregister(&applesmc_driver); release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); } module_init(applesmc_init); module_exit(applesmc_exit); MODULE_AUTHOR("Nicolas Boichat"); MODULE_DESCRIPTION("Apple SMC"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(dmi, applesmc_whitelist);
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* **************************************************************************** * * "DHRYSTONE" Benchmark Program * ----------------------------- * * Version: C, Version 2.1 * * File: dhry.h (part 1 of 3) * * Date: May 25, 1988 * * Author: Reinhold P. Weicker * Siemens AG, AUT E 51 * Postfach 3220 * 8520 Erlangen * Germany (West) * Phone: [+49]-9131-7-20330 * (8-17 Central European Time) * Usenet: ..!mcsun!unido!estevax!weicker * * Original Version (in Ada) published in * "Communications of the ACM" vol. 27., no. 10 (Oct. 1984), * pp. 1013 - 1030, together with the statistics * on which the distribution of statements etc. is based. * * In this C version, the following C library functions are used: * - strcpy, strcmp (inside the measurement loop) * - printf, scanf (outside the measurement loop) * In addition, Berkeley UNIX system calls "times ()" or "time ()" * are used for execution time measurement. For measurements * on other systems, these calls have to be changed. * * Collection of Results: * Reinhold Weicker (address see above) and * * Rick Richardson * PC Research. Inc. * 94 Apple Orchard Drive * Tinton Falls, NJ 07724 * Phone: (201) 389-8963 (9-17 EST) * Usenet: ...!uunet!pcrat!rick * * Please send results to Rick Richardson and/or Reinhold Weicker. * Complete information should be given on hardware and software used. * Hardware information includes: Machine type, CPU, type and size * of caches; for microprocessors: clock frequency, memory speed * (number of wait states). * Software information includes: Compiler (and runtime library) * manufacturer and version, compilation switches, OS version. * The Operating System version may give an indication about the * compiler; Dhrystone itself performs no OS calls in the measurement loop. * * The complete output generated by the program should be mailed * such that at least some checks for correctness can be made. * *************************************************************************** * * History: This version C/2.1 has been made for two reasons: * * 1) There is an obvious need for a common C version of * Dhrystone, since C is at present the most popular system * programming language for the class of processors * (microcomputers, minicomputers) where Dhrystone is used most. * There should be, as far as possible, only one C version of * Dhrystone such that results can be compared without * restrictions. In the past, the C versions distributed * by Rick Richardson (Version 1.1) and by Reinhold Weicker * had small (though not significant) differences. * * 2) As far as it is possible without changes to the Dhrystone * statistics, optimizing compilers should be prevented from * removing significant statements. * * This C version has been developed in cooperation with * Rick Richardson (Tinton Falls, NJ), it incorporates many * ideas from the "Version 1.1" distributed previously by * him over the UNIX network Usenet. * I also thank Chaim Benedelac (National Semiconductor), * David Ditzel (SUN), Earl Killian and John Mashey (MIPS), * Alan Smith and Rafael Saavedra-Barrera (UC at Berkeley) * for their help with comments on earlier versions of the * benchmark. * * Changes: In the initialization part, this version follows mostly * Rick Richardson's version distributed via Usenet, not the * version distributed earlier via floppy disk by Reinhold Weicker. * As a concession to older compilers, names have been made * unique within the first 8 characters. * Inside the measurement loop, this version follows the * version previously distributed by Reinhold Weicker. * * At several places in the benchmark, code has been added, * but within the measurement loop only in branches that * are not executed. The intention is that optimizing compilers * should be prevented from moving code out of the measurement * loop, or from removing code altogether. Since the statements * that are executed within the measurement loop have NOT been * changed, the numbers defining the "Dhrystone distribution" * (distribution of statements, operand types and locality) * still hold. Except for sophisticated optimizing compilers, * execution times for this version should be the same as * for previous versions. * * Since it has proven difficult to subtract the time for the * measurement loop overhead in a correct way, the loop check * has been made a part of the benchmark. This does have * an impact - though a very minor one - on the distribution * statistics which have been updated for this version. * * All changes within the measurement loop are described * and discussed in the companion paper "Rationale for * Dhrystone version 2". * * Because of the self-imposed limitation that the order and * distribution of the executed statements should not be * changed, there are still cases where optimizing compilers * may not generate code for some statements. To a certain * degree, this is unavoidable for small synthetic benchmarks. * Users of the benchmark are advised to check code listings * whether code is generated for all statements of Dhrystone. * * Version 2.1 is identical to version 2.0 distributed via * the UNIX network Usenet in March 1988 except that it corrects * some minor deficiencies that were found by users of version 2.0. * The only change within the measurement loop is that a * non-executed "else" part was added to the "if" statement in * Func_3, and a non-executed "else" part removed from Proc_3. * *************************************************************************** * * Compilation model and measurement (IMPORTANT): * * This C version of Dhrystone consists of three files: * - dhry.h (this file, containing global definitions and comments) * - dhry_1.c (containing the code corresponding to Ada package Pack_1) * - dhry_2.c (containing the code corresponding to Ada package Pack_2) * * The following "ground rules" apply for measurements: * - Separate compilation * - No procedure merging * - Otherwise, compiler optimizations are allowed but should be indicated * - Default results are those without register declarations * See the companion paper "Rationale for Dhrystone Version 2" for a more * detailed discussion of these ground rules. * * For 16-Bit processors (e.g. 80186, 80286), times for all compilation * models ("small", "medium", "large" etc.) should be given if possible, * together with a definition of these models for the compiler system used. * ************************************************************************** * * Dhrystone (C version) statistics: * * [Comment from the first distribution, updated for version 2. * Note that because of language differences, the numbers are slightly * different from the Ada version.] * * The following program contains statements of a high level programming * language (here: C) in a distribution considered representative: * * assignments 52 (51.0 %) * control statements 33 (32.4 %) * procedure, function calls 17 (16.7 %) * * 103 statements are dynamically executed. The program is balanced with * respect to the three aspects: * * - statement type * - operand type * - operand locality * operand global, local, parameter, or constant. * * The combination of these three aspects is balanced only approximately. * * 1. Statement Type: * ----------------- number * * V1 = V2 9 * (incl. V1 = F(..) * V = Constant 12 * Assignment, 7 * with array element * Assignment, 6 * with record component * -- * 34 34 * * X = Y +|-|"&&"|"|" Z 5 * X = Y +|-|"==" Constant 6 * X = X +|- 1 3 * X = Y *|/ Z 2 * X = Expression, 1 * two operators * X = Expression, 1 * three operators * -- * 18 18 * * if .... 14 * with "else" 7 * without "else" 7 * executed 3 * not executed 4 * for ... 7 | counted every time * while ... 4 | the loop condition * do ... while 1 | is evaluated * switch ... 1 * break 1 * declaration with 1 * initialization * -- * 34 34 * * P (...) procedure call 11 * user procedure 10 * library procedure 1 * X = F (...) * function call 6 * user function 5 * library function 1 * -- * 17 17 * --- * 103 * * The average number of parameters in procedure or function calls * is 1.82 (not counting the function values as implicit parameters). * * * 2. Operators * ------------ * number approximate * percentage * * Arithmetic 32 50.8 * * + 21 33.3 * - 7 11.1 * * 3 4.8 * / (int div) 1 1.6 * * Comparison 27 42.8 * * == 9 14.3 * /= 4 6.3 * > 1 1.6 * < 3 4.8 * >= 1 1.6 * <= 9 14.3 * * Logic 4 6.3 * * && (AND-THEN) 1 1.6 * | (OR) 1 1.6 * ! (NOT) 2 3.2 * * -- ----- * 63 100.1 * * * 3. Operand Type (counted once per operand reference): * --------------- * number approximate * percentage * * Integer 175 72.3 % * Character 45 18.6 % * Pointer 12 5.0 % * String30 6 2.5 % * Array 2 0.8 % * Record 2 0.8 % * --- ------- * 242 100.0 % * * When there is an access path leading to the final operand (e.g. a record * component), only the final data type on the access path is counted. * * * 4. Operand Locality: * ------------------- * number approximate * percentage * * local variable 114 47.1 % * global variable 22 9.1 % * parameter 45 18.6 % * value 23 9.5 % * reference 22 9.1 % * function result 6 2.5 % * constant 55 22.7 % * --- ------- * 242 100.0 % * * * The program does not compute anything meaningful, but it is syntactically * and semantically correct. All variables have a value assigned to them * before they are used as a source operand. * * There has been no explicit effort to account for the effects of a * cache, or to balance the use of long or short displacements for code or * data. * *************************************************************************** */ typedef enum { Ident_1, Ident_2, Ident_3, Ident_4, Ident_5 } Enumeration; /* for boolean and enumeration types in Ada, Pascal */ /* General definitions: */ typedef int One_Thirty; typedef int One_Fifty; typedef char Capital_Letter; typedef int Boolean; typedef char Str_30[31]; typedef int Arr_1_Dim[50]; typedef int Arr_2_Dim[50][50]; typedef struct record { struct record *Ptr_Comp; Enumeration Discr; union { struct { Enumeration Enum_Comp; int Int_Comp; char Str_Comp[31]; } var_1; struct { Enumeration E_Comp_2; char Str_2_Comp[31]; } var_2; struct { char Ch_1_Comp; char Ch_2_Comp; } var_3; } variant; } Rec_Type, *Rec_Pointer; extern int Int_Glob; extern char Ch_1_Glob; void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par); void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val, One_Fifty *Int_Par_Ref); void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref, int Int_1_Par_Val, int Int_2_Par_Val); Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val); Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref); int dhry(int n);
// SPDX-License-Identifier: GPL-2.0-only /* * Kernel Debug Core * * Maintainer: Jason Wessel <[email protected]> * * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002-2004 Timesys Corporation * Copyright (C) 2003-2004 Amit S. Kale <[email protected]> * Copyright (C) 2004 Pavel Machek <[email protected]> * Copyright (C) 2004-2006 Tom Rini <[email protected]> * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2005-2009 Wind River Systems, Inc. * Copyright (C) 2007 MontaVista Software, Inc. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <[email protected]> * * Contributors at various stages not listed above: * Jason Wessel ( [email protected] ) * George Anzinger <[email protected]> * Anurekh Saxena ([email protected]) * Lake Stevens Instrument Division (Glenn Engel) * Jim Kingdon, Cygnus Support. * * Original KGDB stub: David Grothe <[email protected]>, * Tigran Aivazian <[email protected]> */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/serial_core.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <linux/unaligned.h> #include "debug_core.h" #define KGDB_MAX_THREAD_QUERY 17 /* Our I/O buffers. */ static char remcom_in_buffer[BUFMAX]; static char remcom_out_buffer[BUFMAX]; static int gdbstub_use_prev_in_buf; static int gdbstub_prev_in_buf_pos; /* Storage for the registers, in GDB format. */ static unsigned long gdb_regs[(NUMREGBYTES + sizeof(unsigned long) - 1) / sizeof(unsigned long)]; /* * GDB remote protocol parser: */ #ifdef CONFIG_KGDB_KDB static int gdbstub_read_wait(void) { int ret = -1; int i; if (unlikely(gdbstub_use_prev_in_buf)) { if (gdbstub_prev_in_buf_pos < gdbstub_use_prev_in_buf) return remcom_in_buffer[gdbstub_prev_in_buf_pos++]; else gdbstub_use_prev_in_buf = 0; } /* poll any additional I/O interfaces that are defined */ while (ret < 0) for (i = 0; kdb_poll_funcs[i] != NULL; i++) { ret = kdb_poll_funcs[i](); if (ret > 0) break; } return ret; } #else static int gdbstub_read_wait(void) { int ret = dbg_io_ops->read_char(); while (ret == NO_POLL_CHAR) ret = dbg_io_ops->read_char(); return ret; } #endif /* scan for the sequence $<data>#<checksum> */ static void get_packet(char *buffer) { unsigned char checksum; unsigned char xmitcsum; int count; char ch; do { /* * Spin and wait around for the start character, ignore all * other characters: */ while ((ch = (gdbstub_read_wait())) != '$') /* nothing */; kgdb_connected = 1; checksum = 0; xmitcsum = -1; count = 0; /* * now, read until a # or end of buffer is found: */ while (count < (BUFMAX - 1)) { ch = gdbstub_read_wait(); if (ch == '#') break; checksum = checksum + ch; buffer[count] = ch; count = count + 1; } if (ch == '#') { xmitcsum = hex_to_bin(gdbstub_read_wait()) << 4; xmitcsum += hex_to_bin(gdbstub_read_wait()); if (checksum != xmitcsum) /* failed checksum */ dbg_io_ops->write_char('-'); else /* successful transfer */ dbg_io_ops->write_char('+'); if (dbg_io_ops->flush) dbg_io_ops->flush(); } buffer[count] = 0; } while (checksum != xmitcsum); } /* * Send the packet in buffer. * Check for gdb connection if asked for. */ static void put_packet(char *buffer) { unsigned char checksum; int count; char ch; /* * $<packet info>#<checksum>. */ while (1) { dbg_io_ops->write_char('$'); checksum = 0; count = 0; while ((ch = buffer[count])) { dbg_io_ops->write_char(ch); checksum += ch; count++; } dbg_io_ops->write_char('#'); dbg_io_ops->write_char(hex_asc_hi(checksum)); dbg_io_ops->write_char(hex_asc_lo(checksum)); if (dbg_io_ops->flush) dbg_io_ops->flush(); /* Now see what we get in reply. */ ch = gdbstub_read_wait(); if (ch == 3) ch = gdbstub_read_wait(); /* If we get an ACK, we are done. */ if (ch == '+') return; /* * If we get the start of another packet, this means * that GDB is attempting to reconnect. We will NAK * the packet being sent, and stop trying to send this * packet. */ if (ch == '$') { dbg_io_ops->write_char('-'); if (dbg_io_ops->flush) dbg_io_ops->flush(); return; } } } static char gdbmsgbuf[BUFMAX + 1]; void gdbstub_msg_write(const char *s, int len) { char *bufptr; int wcount; int i; if (len == 0) len = strlen(s); /* 'O'utput */ gdbmsgbuf[0] = 'O'; /* Fill and send buffers... */ while (len > 0) { bufptr = gdbmsgbuf + 1; /* Calculate how many this time */ if ((len << 1) > (BUFMAX - 2)) wcount = (BUFMAX - 2) >> 1; else wcount = len; /* Pack in hex chars */ for (i = 0; i < wcount; i++) bufptr = hex_byte_pack(bufptr, s[i]); *bufptr = '\0'; /* Move up */ s += wcount; len -= wcount; /* Write packet */ put_packet(gdbmsgbuf); } } /* * Convert the memory pointed to by mem into hex, placing result in * buf. Return a pointer to the last char put in buf (null). May * return an error. */ char *kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; int err; /* * We use the upper half of buf as an intermediate buffer for the * raw memory copy. Hex conversion will work against this one. */ tmp = buf + count; err = copy_from_kernel_nofault(tmp, mem, count); if (err) return NULL; while (count > 0) { buf = hex_byte_pack(buf, *tmp); tmp++; count--; } *buf = 0; return buf; } /* * Convert the hex array pointed to by buf into binary to be placed in * mem. Return a pointer to the character AFTER the last byte * written. May return an error. */ int kgdb_hex2mem(char *buf, char *mem, int count) { char *tmp_raw; char *tmp_hex; /* * We use the upper half of buf as an intermediate buffer for the * raw memory that is converted from hex. */ tmp_raw = buf + count * 2; tmp_hex = tmp_raw - 1; while (tmp_hex >= buf) { tmp_raw--; *tmp_raw = hex_to_bin(*tmp_hex--); *tmp_raw |= hex_to_bin(*tmp_hex--) << 4; } return copy_to_kernel_nofault(mem, tmp_raw, count); } /* * While we find nice hex chars, build a long_val. * Return number of chars processed. */ int kgdb_hex2long(char **ptr, unsigned long *long_val) { int hex_val; int num = 0; int negate = 0; *long_val = 0; if (**ptr == '-') { negate = 1; (*ptr)++; } while (**ptr) { hex_val = hex_to_bin(**ptr); if (hex_val < 0) break; *long_val = (*long_val << 4) | hex_val; num++; (*ptr)++; } if (negate) *long_val = -*long_val; return num; } /* * Copy the binary array pointed to by buf into mem. Fix $, #, and * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success. * The input buf is overwritten with the result to write to mem. */ static int kgdb_ebin2mem(char *buf, char *mem, int count) { int size = 0; char *c = buf; while (count-- > 0) { c[size] = *buf++; if (c[size] == 0x7d) c[size] = *buf++ ^ 0x20; size++; } return copy_to_kernel_nofault(mem, c, size); } #if DBG_MAX_REG_NUM > 0 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { int i; int idx = 0; char *ptr = (char *)gdb_regs; for (i = 0; i < DBG_MAX_REG_NUM; i++) { dbg_get_reg(i, ptr + idx, regs); idx += dbg_reg_def[i].size; } } void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { int i; int idx = 0; char *ptr = (char *)gdb_regs; for (i = 0; i < DBG_MAX_REG_NUM; i++) { dbg_set_reg(i, ptr + idx, regs); idx += dbg_reg_def[i].size; } } #endif /* DBG_MAX_REG_NUM > 0 */ /* Write memory due to an 'M' or 'X' packet. */ static int write_mem_msg(int binary) { char *ptr = &remcom_in_buffer[1]; unsigned long addr; unsigned long length; int err; if (kgdb_hex2long(&ptr, &addr) > 0 && *(ptr++) == ',' && kgdb_hex2long(&ptr, &length) > 0 && *(ptr++) == ':') { if (binary) err = kgdb_ebin2mem(ptr, (char *)addr, length); else err = kgdb_hex2mem(ptr, (char *)addr, length); if (err) return err; if (CACHE_FLUSH_IS_SAFE) flush_icache_range(addr, addr + length); return 0; } return -EINVAL; } static void error_packet(char *pkt, int error) { error = -error; pkt[0] = 'E'; pkt[1] = hex_asc[(error / 10)]; pkt[2] = hex_asc[(error % 10)]; pkt[3] = '\0'; } /* * Thread ID accessors. We represent a flat TID space to GDB, where * the per CPU idle threads (which under Linux all have PID 0) are * remapped to negative TIDs. */ #define BUF_THREAD_ID_SIZE 8 static char *pack_threadid(char *pkt, unsigned char *id) { unsigned char *limit; int lzero = 1; limit = id + (BUF_THREAD_ID_SIZE / 2); while (id < limit) { if (!lzero || *id != 0) { pkt = hex_byte_pack(pkt, *id); lzero = 0; } id++; } if (lzero) pkt = hex_byte_pack(pkt, 0); return pkt; } static void int_to_threadref(unsigned char *id, int value) { put_unaligned_be32(value, id); } static struct task_struct *getthread(struct pt_regs *regs, int tid) { /* * Non-positive TIDs are remapped to the cpu shadow information */ if (tid == 0 || tid == -1) tid = -atomic_read(&kgdb_active) - 2; if (tid < -1 && tid > -NR_CPUS - 2) { if (kgdb_info[-tid - 2].task) return kgdb_info[-tid - 2].task; else return idle_task(-tid - 2); } if (tid <= 0) { printk(KERN_ERR "KGDB: Internal thread select error\n"); dump_stack(); return NULL; } /* * find_task_by_pid_ns() does not take the tasklist lock anymore * but is nicely RCU locked - hence is a pretty resilient * thing to use: */ return find_task_by_pid_ns(tid, &init_pid_ns); } /* * Remap normal tasks to their real PID, * CPU shadow threads are mapped to -CPU - 2 */ static inline int shadow_pid(int realpid) { if (realpid) return realpid; return -raw_smp_processor_id() - 2; } /* * All the functions that start with gdb_cmd are the various * operations to implement the handlers for the gdbserial protocol * where KGDB is communicating with an external debugger */ /* Handle the '?' status packets */ static void gdb_cmd_status(struct kgdb_state *ks) { /* * We know that this packet is only sent * during initial connect. So to be safe, * we clear out our breakpoints now in case * GDB is reconnecting. */ dbg_remove_all_break(); remcom_out_buffer[0] = 'S'; hex_byte_pack(&remcom_out_buffer[1], ks->signo); } static void gdb_get_regs_helper(struct kgdb_state *ks) { struct task_struct *thread; void *local_debuggerinfo; int i; thread = kgdb_usethread; if (!thread) { thread = kgdb_info[ks->cpu].task; local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; } else { local_debuggerinfo = NULL; for_each_online_cpu(i) { /* * Try to find the task on some other * or possibly this node if we do not * find the matching task then we try * to approximate the results. */ if (thread == kgdb_info[i].task) local_debuggerinfo = kgdb_info[i].debuggerinfo; } } /* * All threads that don't have debuggerinfo should be * in schedule() sleeping, since all other CPUs * are in kgdb_wait, and thus have debuggerinfo. */ if (local_debuggerinfo) { pt_regs_to_gdb_regs(gdb_regs, local_debuggerinfo); } else { /* * Pull stuff saved during switch_to; nothing * else is accessible (or even particularly * relevant). * * This should be enough for a stack trace. */ sleeping_thread_to_gdb_regs(gdb_regs, thread); } } /* Handle the 'g' get registers request */ static void gdb_cmd_getregs(struct kgdb_state *ks) { gdb_get_regs_helper(ks); kgdb_mem2hex((char *)gdb_regs, remcom_out_buffer, NUMREGBYTES); } /* Handle the 'G' set registers request */ static void gdb_cmd_setregs(struct kgdb_state *ks) { kgdb_hex2mem(&remcom_in_buffer[1], (char *)gdb_regs, NUMREGBYTES); if (kgdb_usethread && kgdb_usethread != current) { error_packet(remcom_out_buffer, -EINVAL); } else { gdb_regs_to_pt_regs(gdb_regs, ks->linux_regs); strcpy(remcom_out_buffer, "OK"); } } /* Handle the 'm' memory read bytes */ static void gdb_cmd_memread(struct kgdb_state *ks) { char *ptr = &remcom_in_buffer[1]; unsigned long length; unsigned long addr; char *err; if (kgdb_hex2long(&ptr, &addr) > 0 && *ptr++ == ',' && kgdb_hex2long(&ptr, &length) > 0) { err = kgdb_mem2hex((char *)addr, remcom_out_buffer, length); if (!err) error_packet(remcom_out_buffer, -EINVAL); } else { error_packet(remcom_out_buffer, -EINVAL); } } /* Handle the 'M' memory write bytes */ static void gdb_cmd_memwrite(struct kgdb_state *ks) { int err = write_mem_msg(0); if (err) error_packet(remcom_out_buffer, err); else strcpy(remcom_out_buffer, "OK"); } #if DBG_MAX_REG_NUM > 0 static char *gdb_hex_reg_helper(int regnum, char *out) { int i; int offset = 0; for (i = 0; i < regnum; i++) offset += dbg_reg_def[i].size; return kgdb_mem2hex((char *)gdb_regs + offset, out, dbg_reg_def[i].size); } /* Handle the 'p' individual register get */ static void gdb_cmd_reg_get(struct kgdb_state *ks) { unsigned long regnum; char *ptr = &remcom_in_buffer[1]; kgdb_hex2long(&ptr, &regnum); if (regnum >= DBG_MAX_REG_NUM) { error_packet(remcom_out_buffer, -EINVAL); return; } gdb_get_regs_helper(ks); gdb_hex_reg_helper(regnum, remcom_out_buffer); } /* Handle the 'P' individual register set */ static void gdb_cmd_reg_set(struct kgdb_state *ks) { unsigned long regnum; char *ptr = &remcom_in_buffer[1]; int i = 0; kgdb_hex2long(&ptr, &regnum); if (*ptr++ != '=' || !(!kgdb_usethread || kgdb_usethread == current) || !dbg_get_reg(regnum, gdb_regs, ks->linux_regs)) { error_packet(remcom_out_buffer, -EINVAL); return; } memset(gdb_regs, 0, sizeof(gdb_regs)); while (i < sizeof(gdb_regs) * 2) if (hex_to_bin(ptr[i]) >= 0) i++; else break; i = i / 2; kgdb_hex2mem(ptr, (char *)gdb_regs, i); dbg_set_reg(regnum, gdb_regs, ks->linux_regs); strcpy(remcom_out_buffer, "OK"); } #endif /* DBG_MAX_REG_NUM > 0 */ /* Handle the 'X' memory binary write bytes */ static void gdb_cmd_binwrite(struct kgdb_state *ks) { int err = write_mem_msg(1); if (err) error_packet(remcom_out_buffer, err); else strcpy(remcom_out_buffer, "OK"); } /* Handle the 'D' or 'k', detach or kill packets */ static void gdb_cmd_detachkill(struct kgdb_state *ks) { int error; /* The detach case */ if (remcom_in_buffer[0] == 'D') { error = dbg_remove_all_break(); if (error < 0) { error_packet(remcom_out_buffer, error); } else { strcpy(remcom_out_buffer, "OK"); kgdb_connected = 0; } put_packet(remcom_out_buffer); } else { /* * Assume the kill case, with no exit code checking, * trying to force detach the debugger: */ dbg_remove_all_break(); kgdb_connected = 0; } } /* Handle the 'R' reboot packets */ static int gdb_cmd_reboot(struct kgdb_state *ks) { /* For now, only honor R0 */ if (strcmp(remcom_in_buffer, "R0") == 0) { printk(KERN_CRIT "Executing emergency reboot\n"); strcpy(remcom_out_buffer, "OK"); put_packet(remcom_out_buffer); /* * Execution should not return from * machine_emergency_restart() */ machine_emergency_restart(); kgdb_connected = 0; return 1; } return 0; } /* Handle the 'q' query packets */ static void gdb_cmd_query(struct kgdb_state *ks) { struct task_struct *g; struct task_struct *p; unsigned char thref[BUF_THREAD_ID_SIZE]; char *ptr; int i; int cpu; int finished = 0; switch (remcom_in_buffer[1]) { case 's': case 'f': if (memcmp(remcom_in_buffer + 2, "ThreadInfo", 10)) break; i = 0; remcom_out_buffer[0] = 'm'; ptr = remcom_out_buffer + 1; if (remcom_in_buffer[1] == 'f') { /* Each cpu is a shadow thread */ for_each_online_cpu(cpu) { ks->thr_query = 0; int_to_threadref(thref, -cpu - 2); ptr = pack_threadid(ptr, thref); *(ptr++) = ','; i++; } } for_each_process_thread(g, p) { if (i >= ks->thr_query && !finished) { int_to_threadref(thref, p->pid); ptr = pack_threadid(ptr, thref); *(ptr++) = ','; ks->thr_query++; if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0) finished = 1; } i++; } *(--ptr) = '\0'; break; case 'C': /* Current thread id */ strcpy(remcom_out_buffer, "QC"); ks->threadid = shadow_pid(current->pid); int_to_threadref(thref, ks->threadid); pack_threadid(remcom_out_buffer + 2, thref); break; case 'T': if (memcmp(remcom_in_buffer + 1, "ThreadExtraInfo,", 16)) break; ks->threadid = 0; ptr = remcom_in_buffer + 17; kgdb_hex2long(&ptr, &ks->threadid); if (!getthread(ks->linux_regs, ks->threadid)) { error_packet(remcom_out_buffer, -EINVAL); break; } if ((int)ks->threadid > 0) { kgdb_mem2hex(getthread(ks->linux_regs, ks->threadid)->comm, remcom_out_buffer, 16); } else { static char tmpstr[23 + BUF_THREAD_ID_SIZE]; sprintf(tmpstr, "shadowCPU%d", (int)(-ks->threadid - 2)); kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); } break; #ifdef CONFIG_KGDB_KDB case 'R': if (strncmp(remcom_in_buffer, "qRcmd,", 6) == 0) { int len = strlen(remcom_in_buffer + 6); if ((len % 2) != 0) { strcpy(remcom_out_buffer, "E01"); break; } kgdb_hex2mem(remcom_in_buffer + 6, remcom_out_buffer, len); len = len / 2; remcom_out_buffer[len++] = 0; kdb_common_init_state(ks); kdb_parse(remcom_out_buffer); kdb_common_deinit_state(); strcpy(remcom_out_buffer, "OK"); } break; #endif #ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT case 'S': if (!strncmp(remcom_in_buffer, "qSupported:", 11)) strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature); break; case 'X': if (!strncmp(remcom_in_buffer, "qXfer:", 6)) kgdb_arch_handle_qxfer_pkt(remcom_in_buffer, remcom_out_buffer); break; #endif default: break; } } /* Handle the 'H' task query packets */ static void gdb_cmd_task(struct kgdb_state *ks) { struct task_struct *thread; char *ptr; switch (remcom_in_buffer[1]) { case 'g': ptr = &remcom_in_buffer[2]; kgdb_hex2long(&ptr, &ks->threadid); thread = getthread(ks->linux_regs, ks->threadid); if (!thread && ks->threadid > 0) { error_packet(remcom_out_buffer, -EINVAL); break; } kgdb_usethread = thread; ks->kgdb_usethreadid = ks->threadid; strcpy(remcom_out_buffer, "OK"); break; case 'c': ptr = &remcom_in_buffer[2]; kgdb_hex2long(&ptr, &ks->threadid); if (!ks->threadid) { kgdb_contthread = NULL; } else { thread = getthread(ks->linux_regs, ks->threadid); if (!thread && ks->threadid > 0) { error_packet(remcom_out_buffer, -EINVAL); break; } kgdb_contthread = thread; } strcpy(remcom_out_buffer, "OK"); break; } } /* Handle the 'T' thread query packets */ static void gdb_cmd_thread(struct kgdb_state *ks) { char *ptr = &remcom_in_buffer[1]; struct task_struct *thread; kgdb_hex2long(&ptr, &ks->threadid); thread = getthread(ks->linux_regs, ks->threadid); if (thread) strcpy(remcom_out_buffer, "OK"); else error_packet(remcom_out_buffer, -EINVAL); } /* Handle the 'z' or 'Z' breakpoint remove or set packets */ static void gdb_cmd_break(struct kgdb_state *ks) { /* * Since GDB-5.3, it's been drafted that '0' is a software * breakpoint, '1' is a hardware breakpoint, so let's do that. */ char *bpt_type = &remcom_in_buffer[1]; char *ptr = &remcom_in_buffer[2]; unsigned long addr; unsigned long length; int error = 0; if (arch_kgdb_ops.set_hw_breakpoint && *bpt_type >= '1') { /* Unsupported */ if (*bpt_type > '4') return; } else { if (*bpt_type != '0' && *bpt_type != '1') /* Unsupported. */ return; } /* * Test if this is a hardware breakpoint, and * if we support it: */ if (*bpt_type == '1' && !(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)) /* Unsupported. */ return; if (*(ptr++) != ',') { error_packet(remcom_out_buffer, -EINVAL); return; } if (!kgdb_hex2long(&ptr, &addr)) { error_packet(remcom_out_buffer, -EINVAL); return; } if (*(ptr++) != ',' || !kgdb_hex2long(&ptr, &length)) { error_packet(remcom_out_buffer, -EINVAL); return; } if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0') error = dbg_set_sw_break(addr); else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0') error = dbg_remove_sw_break(addr); else if (remcom_in_buffer[0] == 'Z') error = arch_kgdb_ops.set_hw_breakpoint(addr, (int)length, *bpt_type - '0'); else if (remcom_in_buffer[0] == 'z') error = arch_kgdb_ops.remove_hw_breakpoint(addr, (int) length, *bpt_type - '0'); if (error == 0) strcpy(remcom_out_buffer, "OK"); else error_packet(remcom_out_buffer, error); } /* Handle the 'C' signal / exception passing packets */ static int gdb_cmd_exception_pass(struct kgdb_state *ks) { /* C09 == pass exception * C15 == detach kgdb, pass exception */ if (remcom_in_buffer[1] == '0' && remcom_in_buffer[2] == '9') { ks->pass_exception = 1; remcom_in_buffer[0] = 'c'; } else if (remcom_in_buffer[1] == '1' && remcom_in_buffer[2] == '5') { ks->pass_exception = 1; remcom_in_buffer[0] = 'D'; dbg_remove_all_break(); kgdb_connected = 0; return 1; } else { gdbstub_msg_write("KGDB only knows signal 9 (pass)" " and 15 (pass and disconnect)\n" "Executing a continue without signal passing\n", 0); remcom_in_buffer[0] = 'c'; } /* Indicate fall through */ return -1; } /* * This function performs all gdbserial command processing */ int gdb_serial_stub(struct kgdb_state *ks) { int error = 0; int tmp; /* Initialize comm buffer and globals. */ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); kgdb_usethread = kgdb_info[ks->cpu].task; ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid); ks->pass_exception = 0; if (kgdb_connected) { unsigned char thref[BUF_THREAD_ID_SIZE]; char *ptr; /* Reply to host that an exception has occurred */ ptr = remcom_out_buffer; *ptr++ = 'T'; ptr = hex_byte_pack(ptr, ks->signo); ptr += strlen(strcpy(ptr, "thread:")); int_to_threadref(thref, shadow_pid(current->pid)); ptr = pack_threadid(ptr, thref); *ptr++ = ';'; put_packet(remcom_out_buffer); } while (1) { error = 0; /* Clear the out buffer. */ memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer)); get_packet(remcom_in_buffer); switch (remcom_in_buffer[0]) { case '?': /* gdbserial status */ gdb_cmd_status(ks); break; case 'g': /* return the value of the CPU registers */ gdb_cmd_getregs(ks); break; case 'G': /* set the value of the CPU registers - return OK */ gdb_cmd_setregs(ks); break; case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ gdb_cmd_memread(ks); break; case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_memwrite(ks); break; #if DBG_MAX_REG_NUM > 0 case 'p': /* pXX Return gdb register XX (in hex) */ gdb_cmd_reg_get(ks); break; case 'P': /* PXX=aaaa Set gdb register XX to aaaa (in hex) */ gdb_cmd_reg_set(ks); break; #endif /* DBG_MAX_REG_NUM > 0 */ case 'X': /* XAA..AA,LLLL: Write LLLL bytes at address AA..AA */ gdb_cmd_binwrite(ks); break; /* kill or detach. KGDB should treat this like a * continue. */ case 'D': /* Debugger detach */ case 'k': /* Debugger detach via kill */ gdb_cmd_detachkill(ks); goto default_handle; case 'R': /* Reboot */ if (gdb_cmd_reboot(ks)) goto default_handle; break; case 'q': /* query command */ gdb_cmd_query(ks); break; case 'H': /* task related */ gdb_cmd_task(ks); break; case 'T': /* Query thread status */ gdb_cmd_thread(ks); break; case 'z': /* Break point remove */ case 'Z': /* Break point set */ gdb_cmd_break(ks); break; #ifdef CONFIG_KGDB_KDB case '3': /* Escape into back into kdb */ if (remcom_in_buffer[1] == '\0') { gdb_cmd_detachkill(ks); return DBG_PASS_EVENT; } fallthrough; #endif case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) goto default_handle; if (tmp == 0) break; fallthrough; /* on tmp < 0 */ case 'c': /* Continue packet */ case 's': /* Single step packet */ if (kgdb_contthread && kgdb_contthread != current) { /* Can't switch threads in kgdb */ error_packet(remcom_out_buffer, -EINVAL); break; } fallthrough; /* to default processing */ default: default_handle: error = kgdb_arch_handle_exception(ks->ex_vector, ks->signo, ks->err_code, remcom_in_buffer, remcom_out_buffer, ks->linux_regs); /* * Leave cmd processing on error, detach, * kill, continue, or single step. */ if (error >= 0 || remcom_in_buffer[0] == 'D' || remcom_in_buffer[0] == 'k') { error = 0; goto kgdb_exit; } } /* reply to the request */ put_packet(remcom_out_buffer); } kgdb_exit: if (ks->pass_exception) error = 1; return error; } int gdbstub_state(struct kgdb_state *ks, char *cmd) { int error; switch (cmd[0]) { case 'e': error = kgdb_arch_handle_exception(ks->ex_vector, ks->signo, ks->err_code, remcom_in_buffer, remcom_out_buffer, ks->linux_regs); return error; case 's': case 'c': strscpy(remcom_in_buffer, cmd, sizeof(remcom_in_buffer)); return 0; case '$': strscpy(remcom_in_buffer, cmd, sizeof(remcom_in_buffer)); gdbstub_use_prev_in_buf = strlen(remcom_in_buffer); gdbstub_prev_in_buf_pos = 0; return 0; } dbg_io_ops->write_char('+'); put_packet(remcom_out_buffer); return 0; } /** * gdbstub_exit - Send an exit message to GDB * @status: The exit code to report. */ void gdbstub_exit(int status) { unsigned char checksum, ch, buffer[3]; int loop; if (!kgdb_connected) return; kgdb_connected = 0; if (!dbg_io_ops || dbg_kdb_mode) return; buffer[0] = 'W'; buffer[1] = hex_asc_hi(status); buffer[2] = hex_asc_lo(status); dbg_io_ops->write_char('$'); checksum = 0; for (loop = 0; loop < 3; loop++) { ch = buffer[loop]; checksum += ch; dbg_io_ops->write_char(ch); } dbg_io_ops->write_char('#'); dbg_io_ops->write_char(hex_asc_hi(checksum)); dbg_io_ops->write_char(hex_asc_lo(checksum)); /* make sure the output is flushed, lest the bootloader clobber it */ if (dbg_io_ops->flush) dbg_io_ops->flush(); }
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) // // This file is provided under a dual BSD/GPLv2 license. When using or // redistributing this file, you may do so under either license. // // Copyright(c) 2018 Intel Corporation // // Author: Liam Girdwood <[email protected]> // #include <linux/acpi.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <sound/soc-acpi.h> #include <sound/soc-acpi-intel-match.h> #include <sound/sof.h> #include "../intel/common/soc-intel-quirks.h" #include "ops.h" #include "sof-acpi-dev.h" /* platform specific devices */ #include "intel/shim.h" static char *fw_path; module_param(fw_path, charp, 0444); MODULE_PARM_DESC(fw_path, "deprecated - moved to snd-sof module."); static char *tplg_path; module_param(tplg_path, charp, 0444); MODULE_PARM_DESC(tplg_path, "deprecated - moved to snd-sof module."); static int sof_acpi_debug; module_param_named(sof_acpi_debug, sof_acpi_debug, int, 0444); MODULE_PARM_DESC(sof_acpi_debug, "SOF ACPI debug options (0x0 all off)"); #define SOF_ACPI_DISABLE_PM_RUNTIME BIT(0) const struct dev_pm_ops sof_acpi_pm = { SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume) SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume, snd_sof_runtime_idle) }; EXPORT_SYMBOL_NS(sof_acpi_pm, "SND_SOC_SOF_ACPI_DEV"); static void sof_acpi_probe_complete(struct device *dev) { dev_dbg(dev, "Completing SOF ACPI probe"); if (sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME) return; /* allow runtime_pm */ pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); } int sof_acpi_probe(struct platform_device *pdev, const struct sof_dev_desc *desc) { struct device *dev = &pdev->dev; struct snd_sof_pdata *sof_pdata; dev_dbg(dev, "ACPI DSP detected"); sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL); if (!sof_pdata) return -ENOMEM; if (!desc->ops) { dev_err(dev, "error: no matching ACPI descriptor ops\n"); return -ENODEV; } sof_pdata->desc = desc; sof_pdata->dev = &pdev->dev; sof_pdata->ipc_file_profile_base.ipc_type = desc->ipc_default; sof_pdata->ipc_file_profile_base.fw_path = fw_path; sof_pdata->ipc_file_profile_base.tplg_path = tplg_path; /* set callback to be called on successful device probe to enable runtime_pm */ sof_pdata->sof_probe_complete = sof_acpi_probe_complete; /* call sof helper for DSP hardware probe */ return snd_sof_device_probe(dev, sof_pdata); } EXPORT_SYMBOL_NS(sof_acpi_probe, "SND_SOC_SOF_ACPI_DEV"); void sof_acpi_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; if (!(sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME)) pm_runtime_disable(dev); /* call sof helper for DSP hardware remove */ snd_sof_device_remove(dev); } EXPORT_SYMBOL_NS(sof_acpi_remove, "SND_SOC_SOF_ACPI_DEV"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("SOF support for ACPI platforms");
// SPDX-License-Identifier: GPL-2.0-only /* esp_scsi.c: ESP SCSI driver. * * Copyright (C) 2007 David S. Miller ([email protected]) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/irqreturn.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dma.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> #include "esp_scsi.h" #define DRV_MODULE_NAME "esp" #define PFX DRV_MODULE_NAME ": " #define DRV_VERSION "2.000" #define DRV_MODULE_RELDATE "April 19, 2007" /* SCSI bus reset settle time in seconds. */ static int esp_bus_reset_settle = 3; static u32 esp_debug; #define ESP_DEBUG_INTR 0x00000001 #define ESP_DEBUG_SCSICMD 0x00000002 #define ESP_DEBUG_RESET 0x00000004 #define ESP_DEBUG_MSGIN 0x00000008 #define ESP_DEBUG_MSGOUT 0x00000010 #define ESP_DEBUG_CMDDONE 0x00000020 #define ESP_DEBUG_DISCONNECT 0x00000040 #define ESP_DEBUG_DATASTART 0x00000080 #define ESP_DEBUG_DATADONE 0x00000100 #define ESP_DEBUG_RECONNECT 0x00000200 #define ESP_DEBUG_AUTOSENSE 0x00000400 #define ESP_DEBUG_EVENT 0x00000800 #define ESP_DEBUG_COMMAND 0x00001000 #define esp_log_intr(f, a...) \ do { if (esp_debug & ESP_DEBUG_INTR) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reset(f, a...) \ do { if (esp_debug & ESP_DEBUG_RESET) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgin(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGIN) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgout(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGOUT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_cmddone(f, a...) \ do { if (esp_debug & ESP_DEBUG_CMDDONE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_disconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datastart(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATASTART) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datadone(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATADONE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_RECONNECT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_autosense(f, a...) \ do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_event(f, a...) \ do { if (esp_debug & ESP_DEBUG_EVENT) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_command(f, a...) \ do { if (esp_debug & ESP_DEBUG_COMMAND) \ shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_read8(REG) esp->ops->esp_read8(esp, REG) #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) static void esp_log_fill_regs(struct esp *esp, struct esp_event_ent *p) { p->sreg = esp->sreg; p->seqreg = esp->seqreg; p->sreg2 = esp->sreg2; p->ireg = esp->ireg; p->select_state = esp->select_state; p->event = esp->event; } void scsi_esp_cmd(struct esp *esp, u8 val) { struct esp_event_ent *p; int idx = esp->esp_event_cur; p = &esp->esp_event_log[idx]; p->type = ESP_EVENT_TYPE_CMD; p->val = val; esp_log_fill_regs(esp, p); esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); esp_log_command("cmd[%02x]\n", val); esp_write8(val, ESP_CMD); } EXPORT_SYMBOL(scsi_esp_cmd); static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) { if (esp->flags & ESP_FLAG_USE_FIFO) { int i; scsi_esp_cmd(esp, ESP_CMD_FLUSH); for (i = 0; i < len; i++) esp_write8(esp->command_block[i], ESP_FDATA); scsi_esp_cmd(esp, cmd); } else { if (esp->rev == FASHME) scsi_esp_cmd(esp, ESP_CMD_FLUSH); cmd |= ESP_CMD_DMA; esp->ops->send_dma_cmd(esp, esp->command_block_dma, len, max_len, 0, cmd); } } static void esp_event(struct esp *esp, u8 val) { struct esp_event_ent *p; int idx = esp->esp_event_cur; p = &esp->esp_event_log[idx]; p->type = ESP_EVENT_TYPE_EVENT; p->val = val; esp_log_fill_regs(esp, p); esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); esp->event = val; } static void esp_dump_cmd_log(struct esp *esp) { int idx = esp->esp_event_cur; int stop = idx; shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); do { struct esp_event_ent *p = &esp->esp_event_log[idx]; shost_printk(KERN_INFO, esp->host, "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", idx, p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", p->val, p->sreg, p->seqreg, p->sreg2, p->ireg, p->select_state, p->event); idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); } while (idx != stop); } static void esp_flush_fifo(struct esp *esp) { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP236) { int lim = 1000; while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { if (--lim == 0) { shost_printk(KERN_ALERT, esp->host, "ESP_FF_BYTES will not clear!\n"); break; } udelay(1); } } } static void hme_read_fifo(struct esp *esp) { int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; int idx = 0; while (fcnt--) { esp->fifo[idx++] = esp_read8(ESP_FDATA); esp->fifo[idx++] = esp_read8(ESP_FDATA); } if (esp->sreg2 & ESP_STAT2_F1BYTE) { esp_write8(0, ESP_FDATA); esp->fifo[idx++] = esp_read8(ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_FLUSH); } esp->fifo_cnt = idx; } static void esp_set_all_config3(struct esp *esp, u8 val) { int i; for (i = 0; i < ESP_MAX_TARGET; i++) esp->target[i].esp_config3 = val; } /* Reset the ESP chip, _not_ the SCSI bus. */ static void esp_reset_esp(struct esp *esp) { /* Now reset the ESP chip */ scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); if (esp->rev == FAST) esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); /* This is the only point at which it is reliable to read * the ID-code for a fast ESP chip variants. */ esp->max_period = ((35 * esp->ccycle) / 1000); if (esp->rev == FAST) { u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); if (family_code == ESP_UID_F236) { esp->rev = FAS236; } else if (family_code == ESP_UID_HME) { esp->rev = FASHME; /* Version is usually '5'. */ } else if (family_code == ESP_UID_FSC) { esp->rev = FSC; /* Enable Active Negation */ esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); } else { esp->rev = FAS100A; } esp->min_period = ((4 * esp->ccycle) / 1000); } else { esp->min_period = ((5 * esp->ccycle) / 1000); } if (esp->rev == FAS236) { /* * The AM53c974 chip returns the same ID as FAS236; * try to configure glitch eater. */ u8 config4 = ESP_CONFIG4_GE1; esp_write8(config4, ESP_CFG4); config4 = esp_read8(ESP_CFG4); if (config4 & ESP_CONFIG4_GE1) { esp->rev = PCSCSI; esp_write8(esp->config4, ESP_CFG4); } } esp->max_period = (esp->max_period + 3)>>2; esp->min_period = (esp->min_period + 3)>>2; esp_write8(esp->config1, ESP_CFG1); switch (esp->rev) { case ESP100: /* nothing to do */ break; case ESP100A: esp_write8(esp->config2, ESP_CFG2); break; case ESP236: /* Slow 236 */ esp_write8(esp->config2, ESP_CFG2); esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); break; case FASHME: esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); fallthrough; case FAS236: case PCSCSI: case FSC: esp_write8(esp->config2, ESP_CFG2); if (esp->rev == FASHME) { u8 cfg3 = esp->target[0].esp_config3; cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; if (esp->scsi_id >= 8) cfg3 |= ESP_CONFIG3_IDBIT3; esp_set_all_config3(esp, cfg3); } else { u32 cfg3 = esp->target[0].esp_config3; cfg3 |= ESP_CONFIG3_FCLK; esp_set_all_config3(esp, cfg3); } esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); if (esp->rev == FASHME) { esp->radelay = 80; } else { if (esp->flags & ESP_FLAG_DIFFERENTIAL) esp->radelay = 0; else esp->radelay = 96; } break; case FAS100A: /* Fast 100a */ esp_write8(esp->config2, ESP_CFG2); esp_set_all_config3(esp, (esp->target[0].esp_config3 | ESP_CONFIG3_FCLOCK)); esp->prev_cfg3 = esp->target[0].esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); esp->radelay = 32; break; default: break; } /* Reload the configuration registers */ esp_write8(esp->cfact, ESP_CFACT); esp->prev_stp = 0; esp_write8(esp->prev_stp, ESP_STP); esp->prev_soff = 0; esp_write8(esp->prev_soff, ESP_SOFF); esp_write8(esp->neg_defp, ESP_TIMEO); /* Eat any bitrot in the chip */ esp_read8(ESP_INTRPT); udelay(100); } static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) { struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); struct scatterlist *sg = scsi_sglist(cmd); int total = 0, i; struct scatterlist *s; if (cmd->sc_data_direction == DMA_NONE) return; if (esp->flags & ESP_FLAG_NO_DMA_MAP) { /* * For pseudo DMA and PIO we need the virtual address instead of * a dma address, so perform an identity mapping. */ spriv->num_sg = scsi_sg_count(cmd); scsi_for_each_sg(cmd, s, spriv->num_sg, i) { s->dma_address = (uintptr_t)sg_virt(s); total += sg_dma_len(s); } } else { spriv->num_sg = scsi_dma_map(cmd); scsi_for_each_sg(cmd, s, spriv->num_sg, i) total += sg_dma_len(s); } spriv->cur_residue = sg_dma_len(sg); spriv->prv_sg = NULL; spriv->cur_sg = sg; spriv->tot_residue = total; } static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { return ent->sense_dma + (ent->sense_ptr - cmd->sense_buffer); } return sg_dma_address(p->cur_sg) + (sg_dma_len(p->cur_sg) - p->cur_residue); } static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { return SCSI_SENSE_BUFFERSIZE - (ent->sense_ptr - cmd->sense_buffer); } return p->cur_residue; } static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd, unsigned int len) { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->sense_ptr += len; return; } p->cur_residue -= len; p->tot_residue -= len; if (p->cur_residue < 0 || p->tot_residue < 0) { shost_printk(KERN_ERR, esp->host, "Data transfer overflow.\n"); shost_printk(KERN_ERR, esp->host, "cur_residue[%d] tot_residue[%d] len[%u]\n", p->cur_residue, p->tot_residue, len); p->cur_residue = 0; p->tot_residue = 0; } if (!p->cur_residue && p->tot_residue) { p->prv_sg = p->cur_sg; p->cur_sg = sg_next(p->cur_sg); p->cur_residue = sg_dma_len(p->cur_sg); } } static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) { if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) scsi_dma_unmap(cmd); } static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->saved_sense_ptr = ent->sense_ptr; return; } ent->saved_cur_residue = spriv->cur_residue; ent->saved_prv_sg = spriv->prv_sg; ent->saved_cur_sg = spriv->cur_sg; ent->saved_tot_residue = spriv->tot_residue; } static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->sense_ptr = ent->saved_sense_ptr; return; } spriv->cur_residue = ent->saved_cur_residue; spriv->prv_sg = ent->saved_prv_sg; spriv->cur_sg = ent->saved_cur_sg; spriv->tot_residue = ent->saved_tot_residue; } static void esp_write_tgt_config3(struct esp *esp, int tgt) { if (esp->rev > ESP100A) { u8 val = esp->target[tgt].esp_config3; if (val != esp->prev_cfg3) { esp->prev_cfg3 = val; esp_write8(val, ESP_CFG3); } } } static void esp_write_tgt_sync(struct esp *esp, int tgt) { u8 off = esp->target[tgt].esp_offset; u8 per = esp->target[tgt].esp_period; if (off != esp->prev_soff) { esp->prev_soff = off; esp_write8(off, ESP_SOFF); } if (per != esp->prev_stp) { esp->prev_stp = per; esp_write8(per, ESP_STP); } } static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { if (esp->rev == FASHME) { /* Arbitrary segment boundaries, 24-bit counts. */ if (dma_len > (1U << 24)) dma_len = (1U << 24); } else { u32 base, end; /* ESP chip limits other variants by 16-bits of transfer * count. Actually on FAS100A and FAS236 we could get * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB * in the ESP_CFG2 register but that causes other unwanted * changes so we don't use it currently. */ if (dma_len > (1U << 16)) dma_len = (1U << 16); /* All of the DMA variants hooked up to these chips * cannot handle crossing a 24-bit address boundary. */ base = dma_addr & ((1U << 24) - 1U); end = base + dma_len; if (end > (1U << 24)) end = (1U <<24); dma_len = end - base; } return dma_len; } static int esp_need_to_nego_wide(struct esp_target_data *tp) { struct scsi_target *target = tp->starget; return spi_width(target) != tp->nego_goal_width; } static int esp_need_to_nego_sync(struct esp_target_data *tp) { struct scsi_target *target = tp->starget; /* When offset is zero, period is "don't care". */ if (!spi_offset(target) && !tp->nego_goal_offset) return 0; if (spi_offset(target) == tp->nego_goal_offset && spi_period(target) == tp->nego_goal_period) return 0; return 1; } static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { if (!ent->orig_tag[0]) { /* Non-tagged, slot already taken? */ if (lp->non_tagged_cmd) return -EBUSY; if (lp->hold) { /* We are being held by active tagged * commands. */ if (lp->num_tagged) return -EBUSY; /* Tagged commands completed, we can unplug * the queue and run this untagged command. */ lp->hold = 0; } else if (lp->num_tagged) { /* Plug the queue until num_tagged decreases * to zero in esp_free_lun_tag. */ lp->hold = 1; return -EBUSY; } lp->non_tagged_cmd = ent; return 0; } /* Tagged command. Check that it isn't blocked by a non-tagged one. */ if (lp->non_tagged_cmd || lp->hold) return -EBUSY; BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); lp->tagged_cmds[ent->orig_tag[1]] = ent; lp->num_tagged++; return 0; } static void esp_free_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { if (ent->orig_tag[0]) { BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); lp->tagged_cmds[ent->orig_tag[1]] = NULL; lp->num_tagged--; } else { BUG_ON(lp->non_tagged_cmd != ent); lp->non_tagged_cmd = NULL; } } static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) { ent->sense_ptr = ent->cmd->sense_buffer; if (esp->flags & ESP_FLAG_NO_DMA_MAP) { ent->sense_dma = (uintptr_t)ent->sense_ptr; return; } ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) { if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) dma_unmap_single(esp->dev, ent->sense_dma, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); ent->sense_ptr = NULL; } /* When a contingent allegiance condition is created, we force feed a * REQUEST_SENSE command to the device to fetch the sense data. I * tried many other schemes, relying on the scsi error handling layer * to send out the REQUEST_SENSE automatically, but this was difficult * to get right especially in the presence of applications like smartd * which use SG_IO to send out their own REQUEST_SENSE commands. */ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; struct scsi_device *dev = cmd->device; int tgt, lun; u8 *p, val; tgt = dev->id; lun = dev->lun; if (!ent->sense_ptr) { esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", tgt, lun); esp_map_sense(esp, ent); } ent->saved_sense_ptr = ent->sense_ptr; esp->active_cmd = ent; p = esp->command_block; esp->msg_out_len = 0; *p++ = IDENTIFY(0, lun); *p++ = REQUEST_SENSE; *p++ = ((dev->scsi_level <= SCSI_2) ? (lun << 5) : 0); *p++ = 0; *p++ = 0; *p++ = SCSI_SENSE_BUFFERSIZE; *p++ = 0; esp->select_state = ESP_SELECT_BASIC; val = tgt; if (esp->rev == FASHME) val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; esp_write8(val, ESP_BUSID); esp_write_tgt_sync(esp, tgt); esp_write_tgt_config3(esp, tgt); val = (p - esp->command_block); esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); } static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) { struct esp_cmd_entry *ent; list_for_each_entry(ent, &esp->queued_cmds, list) { struct scsi_cmnd *cmd = ent->cmd; struct scsi_device *dev = cmd->device; struct esp_lun_data *lp = dev->hostdata; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent->tag[0] = 0; ent->tag[1] = 0; return ent; } if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { ent->tag[0] = 0; ent->tag[1] = 0; } ent->orig_tag[0] = ent->tag[0]; ent->orig_tag[1] = ent->tag[1]; if (esp_alloc_lun_tag(ent, lp) < 0) continue; return ent; } return NULL; } static void esp_maybe_execute_command(struct esp *esp) { struct esp_target_data *tp; struct scsi_device *dev; struct scsi_cmnd *cmd; struct esp_cmd_entry *ent; bool select_and_stop = false; int tgt, lun, i; u32 val, start_cmd; u8 *p; if (esp->active_cmd || (esp->flags & ESP_FLAG_RESETTING)) return; ent = find_and_prep_issuable_command(esp); if (!ent) return; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { esp_autosense(esp, ent); return; } cmd = ent->cmd; dev = cmd->device; tgt = dev->id; lun = dev->lun; tp = &esp->target[tgt]; list_move(&ent->list, &esp->active_cmds); esp->active_cmd = ent; esp_map_dma(esp, cmd); esp_save_pointers(esp, ent); if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12)) select_and_stop = true; p = esp->command_block; esp->msg_out_len = 0; if (tp->flags & ESP_TGT_CHECK_NEGO) { /* Need to negotiate. If the target is broken * go for synchronous transfers and non-wide. */ if (tp->flags & ESP_TGT_BROKEN) { tp->flags &= ~ESP_TGT_DISCONNECT; tp->nego_goal_period = 0; tp->nego_goal_offset = 0; tp->nego_goal_width = 0; tp->nego_goal_tags = 0; } /* If the settings are not changing, skip this. */ if (spi_width(tp->starget) == tp->nego_goal_width && spi_period(tp->starget) == tp->nego_goal_period && spi_offset(tp->starget) == tp->nego_goal_offset) { tp->flags &= ~ESP_TGT_CHECK_NEGO; goto build_identify; } if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { esp->msg_out_len = spi_populate_width_msg(&esp->msg_out[0], (tp->nego_goal_width ? 1 : 0)); tp->flags |= ESP_TGT_NEGO_WIDE; } else if (esp_need_to_nego_sync(tp)) { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; } else { tp->flags &= ~ESP_TGT_CHECK_NEGO; } /* If there are multiple message bytes, use Select and Stop */ if (esp->msg_out_len) select_and_stop = true; } build_identify: *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun); if (ent->tag[0] && esp->rev == ESP100) { /* ESP100 lacks select w/atn3 command, use select * and stop instead. */ select_and_stop = true; } if (select_and_stop) { esp->cmd_bytes_left = cmd->cmd_len; esp->cmd_bytes_ptr = &cmd->cmnd[0]; if (ent->tag[0]) { for (i = esp->msg_out_len - 1; i >= 0; i--) esp->msg_out[i + 2] = esp->msg_out[i]; esp->msg_out[0] = ent->tag[0]; esp->msg_out[1] = ent->tag[1]; esp->msg_out_len += 2; } start_cmd = ESP_CMD_SELAS; esp->select_state = ESP_SELECT_MSGOUT; } else { start_cmd = ESP_CMD_SELA; if (ent->tag[0]) { *p++ = ent->tag[0]; *p++ = ent->tag[1]; start_cmd = ESP_CMD_SA3; } for (i = 0; i < cmd->cmd_len; i++) *p++ = cmd->cmnd[i]; esp->select_state = ESP_SELECT_BASIC; } val = tgt; if (esp->rev == FASHME) val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; esp_write8(val, ESP_BUSID); esp_write_tgt_sync(esp, tgt); esp_write_tgt_config3(esp, tgt); val = (p - esp->command_block); if (esp_debug & ESP_DEBUG_SCSICMD) { printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); for (i = 0; i < cmd->cmd_len; i++) printk("%02x ", cmd->cmnd[i]); printk("]\n"); } esp_send_dma_cmd(esp, val, 16, start_cmd); } static struct esp_cmd_entry *esp_get_ent(struct esp *esp) { struct list_head *head = &esp->esp_cmd_pool; struct esp_cmd_entry *ret; if (list_empty(head)) { ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); } else { ret = list_entry(head->next, struct esp_cmd_entry, list); list_del(&ret->list); memset(ret, 0, sizeof(*ret)); } return ret; } static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) { list_add(&ent->list, &esp->esp_cmd_pool); } static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd, unsigned char host_byte) { struct scsi_device *dev = cmd->device; int tgt = dev->id; int lun = dev->lun; esp->active_cmd = NULL; esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, dev->hostdata); cmd->result = 0; set_host_byte(cmd, host_byte); if (host_byte == DID_OK) set_status_byte(cmd, ent->status); if (ent->eh_done) { complete(ent->eh_done); ent->eh_done = NULL; } if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { esp_unmap_sense(esp, ent); /* Restore the message/status bytes to what we actually * saw originally. Also, report that we are providing * the sense data. */ cmd->result = SAM_STAT_CHECK_CONDITION; ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; if (esp_debug & ESP_DEBUG_AUTOSENSE) { int i; printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", esp->host->unique_id, tgt, lun); for (i = 0; i < 18; i++) printk("%02x ", cmd->sense_buffer[i]); printk("]\n"); } } scsi_done(cmd); list_del(&ent->list); esp_put_ent(esp, ent); esp_maybe_execute_command(esp); } static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_device *dev = ent->cmd->device; struct esp_lun_data *lp = dev->hostdata; scsi_track_queue_full(dev, lp->num_tagged - 1); } static int esp_queuecommand_lck(struct scsi_cmnd *cmd) { struct scsi_device *dev = cmd->device; struct esp *esp = shost_priv(dev->host); struct esp_cmd_priv *spriv; struct esp_cmd_entry *ent; ent = esp_get_ent(esp); if (!ent) return SCSI_MLQUEUE_HOST_BUSY; ent->cmd = cmd; spriv = ESP_CMD_PRIV(cmd); spriv->num_sg = 0; list_add_tail(&ent->list, &esp->queued_cmds); esp_maybe_execute_command(esp); return 0; } static DEF_SCSI_QCMD(esp_queuecommand) static int esp_check_gross_error(struct esp *esp) { if (esp->sreg & ESP_STAT_SPAM) { /* Gross Error, could be one of: * - top of fifo overwritten * - top of command register overwritten * - DMA programmed with wrong direction * - improper phase change */ shost_printk(KERN_ERR, esp->host, "Gross error sreg[%02x]\n", esp->sreg); /* XXX Reset the chip. XXX */ return 1; } return 0; } static int esp_check_spur_intr(struct esp *esp) { switch (esp->rev) { case ESP100: case ESP100A: /* The interrupt pending bit of the status register cannot * be trusted on these revisions. */ esp->sreg &= ~ESP_STAT_INTR; break; default: if (!(esp->sreg & ESP_STAT_INTR)) { if (esp->ireg & ESP_INTR_SR) return 1; /* If the DMA is indicating interrupt pending and the * ESP is not, the only possibility is a DMA error. */ if (!esp->ops->dma_error(esp)) { shost_printk(KERN_ERR, esp->host, "Spurious irq, sreg=%02x.\n", esp->sreg); return -1; } shost_printk(KERN_ERR, esp->host, "DMA error\n"); /* XXX Reset the chip. XXX */ return -1; } break; } return 0; } static void esp_schedule_reset(struct esp *esp) { esp_log_reset("esp_schedule_reset() from %ps\n", __builtin_return_address(0)); esp->flags |= ESP_FLAG_RESETTING; esp_event(esp, ESP_EVENT_RESET); } /* In order to avoid having to add a special half-reconnected state * into the driver we just sit here and poll through the rest of * the reselection process to get the tag message bytes. */ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, struct esp_lun_data *lp) { struct esp_cmd_entry *ent; int i; if (!lp->num_tagged) { shost_printk(KERN_ERR, esp->host, "Reconnect w/num_tagged==0\n"); return NULL; } esp_log_reconnect("reconnect tag, "); for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { if (esp->ops->irq_pending(esp)) break; } if (i == ESP_QUICKIRQ_LIMIT) { shost_printk(KERN_ERR, esp->host, "Reconnect IRQ1 timeout\n"); return NULL; } esp->sreg = esp_read8(ESP_STATUS); esp->ireg = esp_read8(ESP_INTRPT); esp_log_reconnect("IRQ(%d:%x:%x), ", i, esp->ireg, esp->sreg); if (esp->ireg & ESP_INTR_DC) { shost_printk(KERN_ERR, esp->host, "Reconnect, got disconnect.\n"); return NULL; } if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { shost_printk(KERN_ERR, esp->host, "Reconnect, not MIP sreg[%02x].\n", esp->sreg); return NULL; } /* DMA in the tag bytes... */ esp->command_block[0] = 0xff; esp->command_block[1] = 0xff; esp->ops->send_dma_cmd(esp, esp->command_block_dma, 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); /* ACK the message. */ scsi_esp_cmd(esp, ESP_CMD_MOK); for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { if (esp->ops->irq_pending(esp)) { esp->sreg = esp_read8(ESP_STATUS); esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & ESP_INTR_FDONE) break; } udelay(1); } if (i == ESP_RESELECT_TAG_LIMIT) { shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); return NULL; } esp->ops->dma_drain(esp); esp->ops->dma_invalidate(esp); esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", i, esp->ireg, esp->sreg, esp->command_block[0], esp->command_block[1]); if (esp->command_block[0] < SIMPLE_QUEUE_TAG || esp->command_block[0] > ORDERED_QUEUE_TAG) { shost_printk(KERN_ERR, esp->host, "Reconnect, bad tag type %02x.\n", esp->command_block[0]); return NULL; } ent = lp->tagged_cmds[esp->command_block[1]]; if (!ent) { shost_printk(KERN_ERR, esp->host, "Reconnect, no entry for tag %02x.\n", esp->command_block[1]); return NULL; } return ent; } static int esp_reconnect(struct esp *esp) { struct esp_cmd_entry *ent; struct esp_target_data *tp; struct esp_lun_data *lp; struct scsi_device *dev; int target, lun; BUG_ON(esp->active_cmd); if (esp->rev == FASHME) { /* FASHME puts the target and lun numbers directly * into the fifo. */ target = esp->fifo[0]; lun = esp->fifo[1] & 0x7; } else { u8 bits = esp_read8(ESP_FDATA); /* Older chips put the lun directly into the fifo, but * the target is given as a sample of the arbitration * lines on the bus at reselection time. So we should * see the ID of the ESP and the one reconnecting target * set in the bitmap. */ if (!(bits & esp->scsi_id_mask)) goto do_reset; bits &= ~esp->scsi_id_mask; if (!bits || (bits & (bits - 1))) goto do_reset; target = ffs(bits) - 1; lun = (esp_read8(ESP_FDATA) & 0x7); scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP100) { u8 ireg = esp_read8(ESP_INTRPT); /* This chip has a bug during reselection that can * cause a spurious illegal-command interrupt, which * we simply ACK here. Another possibility is a bus * reset so we must check for that. */ if (ireg & ESP_INTR_SR) goto do_reset; } scsi_esp_cmd(esp, ESP_CMD_NULL); } esp_write_tgt_sync(esp, target); esp_write_tgt_config3(esp, target); scsi_esp_cmd(esp, ESP_CMD_MOK); if (esp->rev == FASHME) esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, ESP_BUSID); tp = &esp->target[target]; dev = __scsi_device_lookup_by_target(tp->starget, lun); if (!dev) { shost_printk(KERN_ERR, esp->host, "Reconnect, no lp tgt[%u] lun[%u]\n", target, lun); goto do_reset; } lp = dev->hostdata; ent = lp->non_tagged_cmd; if (!ent) { ent = esp_reconnect_with_tag(esp, lp); if (!ent) goto do_reset; } esp->active_cmd = ent; esp_event(esp, ESP_EVENT_CHECK_PHASE); esp_restore_pointers(esp, ent); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; do_reset: esp_schedule_reset(esp); return 0; } static int esp_finish_select(struct esp *esp) { struct esp_cmd_entry *ent; struct scsi_cmnd *cmd; /* No longer selecting. */ esp->select_state = ESP_SELECT_NONE; esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; ent = esp->active_cmd; cmd = ent->cmd; if (esp->ops->dma_error(esp)) { /* If we see a DMA error during or as a result of selection, * all bets are off. */ esp_schedule_reset(esp); esp_cmd_is_done(esp, ent, cmd, DID_ERROR); return 0; } esp->ops->dma_invalidate(esp); if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { struct esp_target_data *tp = &esp->target[cmd->device->id]; /* Carefully back out of the selection attempt. Release * resources (such as DMA mapping & TAG) and reset state (such * as message out and command delivery variables). */ if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, cmd->device->hostdata); tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); esp->cmd_bytes_ptr = NULL; esp->cmd_bytes_left = 0; } else { esp_unmap_sense(esp, ent); } /* Now that the state is unwound properly, put back onto * the issue queue. This command is no longer active. */ list_move(&ent->list, &esp->queued_cmds); esp->active_cmd = NULL; /* Return value ignored by caller, it directly invokes * esp_reconnect(). */ return 0; } if (esp->ireg == ESP_INTR_DC) { struct scsi_device *dev = cmd->device; /* Disconnect. Make sure we re-negotiate sync and * wide parameters if this target starts responding * again in the future. */ esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_ESEL); esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET); return 1; } if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { /* Selection successful. On pre-FAST chips we have * to do a NOP and possibly clean out the FIFO. */ if (esp->rev <= ESP236) { int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; scsi_esp_cmd(esp, ESP_CMD_NULL); if (!fcnt && (!esp->prev_soff || ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) esp_flush_fifo(esp); } /* If we are doing a Select And Stop command, negotiation, etc. * we'll do the right thing as we transition to the next phase. */ esp_event(esp, ESP_EVENT_CHECK_PHASE); return 0; } shost_printk(KERN_INFO, esp->host, "Unexpected selection completion ireg[%x]\n", esp->ireg); esp_schedule_reset(esp); return 0; } static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, struct scsi_cmnd *cmd) { int fifo_cnt, ecount, bytes_sent, flush_fifo; fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) fifo_cnt <<= 1; ecount = 0; if (!(esp->sreg & ESP_STAT_TCNT)) { ecount = ((unsigned int)esp_read8(ESP_TCLOW) | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); if (esp->rev == FASHME) ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; } bytes_sent = esp->data_dma_len; bytes_sent -= ecount; bytes_sent -= esp->send_cmd_residual; /* * The am53c974 has a DMA 'peculiarity'. The doc states: * In some odd byte conditions, one residual byte will * be left in the SCSI FIFO, and the FIFO Flags will * never count to '0 '. When this happens, the residual * byte should be retrieved via PIO following completion * of the BLAST operation. */ if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { size_t count = 1; size_t offset = bytes_sent; u8 bval = esp_read8(ESP_FDATA); if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) ent->sense_ptr[bytes_sent] = bval; else { struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); u8 *ptr; ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg, &offset, &count); if (likely(ptr)) { *(ptr + offset) = bval; scsi_kunmap_atomic_sg(ptr); } } bytes_sent += fifo_cnt; ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; } if (!(ent->flags & ESP_CMD_FLAG_WRITE)) bytes_sent -= fifo_cnt; flush_fifo = 0; if (!esp->prev_soff) { /* Synchronous data transfer, always flush fifo. */ flush_fifo = 1; } else { if (esp->rev == ESP100) { u32 fflags, phase; /* ESP100 has a chip bug where in the synchronous data * phase it can mistake a final long REQ pulse from the * target as an extra data byte. Fun. * * To detect this case we resample the status register * and fifo flags. If we're still in a data phase and * we see spurious chunks in the fifo, we return error * to the caller which should reset and set things up * such that we only try future transfers to this * target in synchronous mode. */ esp->sreg = esp_read8(ESP_STATUS); phase = esp->sreg & ESP_STAT_PMASK; fflags = esp_read8(ESP_FFLAGS); if ((phase == ESP_DOP && (fflags & ESP_FF_ONOTZERO)) || (phase == ESP_DIP && (fflags & ESP_FF_FBYTES))) return -1; } if (!(ent->flags & ESP_CMD_FLAG_WRITE)) flush_fifo = 1; } if (flush_fifo) esp_flush_fifo(esp); return bytes_sent; } static void esp_setsync(struct esp *esp, struct esp_target_data *tp, u8 scsi_period, u8 scsi_offset, u8 esp_stp, u8 esp_soff) { spi_period(tp->starget) = scsi_period; spi_offset(tp->starget) = scsi_offset; spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; if (esp_soff) { esp_stp &= 0x1f; esp_soff |= esp->radelay; if (esp->rev >= FAS236) { u8 bit = ESP_CONFIG3_FSCSI; if (esp->rev >= FAS100A) bit = ESP_CONFIG3_FAST; if (scsi_period < 50) { if (esp->rev == FASHME) esp_soff &= ~esp->radelay; tp->esp_config3 |= bit; } else { tp->esp_config3 &= ~bit; } esp->prev_cfg3 = tp->esp_config3; esp_write8(esp->prev_cfg3, ESP_CFG3); } } tp->esp_period = esp->prev_stp = esp_stp; tp->esp_offset = esp->prev_soff = esp_soff; esp_write8(esp_soff, ESP_SOFF); esp_write8(esp_stp, ESP_STP); tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); spi_display_xfer_agreement(tp->starget); } static void esp_msgin_reject(struct esp *esp) { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; struct esp_target_data *tp; int tgt; tgt = cmd->device->id; tp = &esp->target[tgt]; if (tp->flags & ESP_TGT_NEGO_WIDE) { tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); if (!esp_need_to_nego_sync(tp)) { tp->flags &= ~ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_RATN); } else { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; scsi_esp_cmd(esp, ESP_CMD_SATN); } return; } if (tp->flags & ESP_TGT_NEGO_SYNC) { tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); tp->esp_period = 0; tp->esp_offset = 0; esp_setsync(esp, tp, 0, 0, 0, 0); scsi_esp_cmd(esp, ESP_CMD_RATN); return; } shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n"); esp_schedule_reset(esp); } static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) { u8 period = esp->msg_in[3]; u8 offset = esp->msg_in[4]; u8 stp; if (!(tp->flags & ESP_TGT_NEGO_SYNC)) goto do_reject; if (offset > 15) goto do_reject; if (offset) { int one_clock; if (period > esp->max_period) { period = offset = 0; goto do_sdtr; } if (period < esp->min_period) goto do_reject; one_clock = esp->ccycle / 1000; stp = DIV_ROUND_UP(period << 2, one_clock); if (stp && esp->rev >= FAS236) { if (stp >= 50) stp--; } } else { stp = 0; } esp_setsync(esp, tp, period, offset, stp, offset); return; do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); return; do_sdtr: tp->nego_goal_period = period; tp->nego_goal_offset = offset; esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); scsi_esp_cmd(esp, ESP_CMD_SATN); } static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) { int size = 8 << esp->msg_in[3]; u8 cfg3; if (esp->rev != FASHME) goto do_reject; if (size != 8 && size != 16) goto do_reject; if (!(tp->flags & ESP_TGT_NEGO_WIDE)) goto do_reject; cfg3 = tp->esp_config3; if (size == 16) { tp->flags |= ESP_TGT_WIDE; cfg3 |= ESP_CONFIG3_EWIDE; } else { tp->flags &= ~ESP_TGT_WIDE; cfg3 &= ~ESP_CONFIG3_EWIDE; } tp->esp_config3 = cfg3; esp->prev_cfg3 = cfg3; esp_write8(cfg3, ESP_CFG3); tp->flags &= ~ESP_TGT_NEGO_WIDE; spi_period(tp->starget) = 0; spi_offset(tp->starget) = 0; if (!esp_need_to_nego_sync(tp)) { tp->flags &= ~ESP_TGT_CHECK_NEGO; scsi_esp_cmd(esp, ESP_CMD_RATN); } else { esp->msg_out_len = spi_populate_sync_msg(&esp->msg_out[0], tp->nego_goal_period, tp->nego_goal_offset); tp->flags |= ESP_TGT_NEGO_SYNC; scsi_esp_cmd(esp, ESP_CMD_SATN); } return; do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); } static void esp_msgin_extended(struct esp *esp) { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; struct esp_target_data *tp; int tgt = cmd->device->id; tp = &esp->target[tgt]; if (esp->msg_in[2] == EXTENDED_SDTR) { esp_msgin_sdtr(esp, tp); return; } if (esp->msg_in[2] == EXTENDED_WDTR) { esp_msgin_wdtr(esp, tp); return; } shost_printk(KERN_INFO, esp->host, "Unexpected extended msg type %x\n", esp->msg_in[2]); esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); } /* Analyze msgin bytes received from target so far. Return non-zero * if there are more bytes needed to complete the message. */ static int esp_msgin_process(struct esp *esp) { u8 msg0 = esp->msg_in[0]; int len = esp->msg_in_len; if (msg0 & 0x80) { /* Identify */ shost_printk(KERN_INFO, esp->host, "Unexpected msgin identify\n"); return 0; } switch (msg0) { case EXTENDED_MESSAGE: if (len == 1) return 1; if (len < esp->msg_in[1] + 2) return 1; esp_msgin_extended(esp); return 0; case IGNORE_WIDE_RESIDUE: { struct esp_cmd_entry *ent; struct esp_cmd_priv *spriv; if (len == 1) return 1; if (esp->msg_in[1] != 1) goto do_reject; ent = esp->active_cmd; spriv = ESP_CMD_PRIV(ent->cmd); if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { spriv->cur_sg = spriv->prv_sg; spriv->cur_residue = 1; } else spriv->cur_residue++; spriv->tot_residue++; return 0; } case NOP: return 0; case RESTORE_POINTERS: esp_restore_pointers(esp, esp->active_cmd); return 0; case SAVE_POINTERS: esp_save_pointers(esp, esp->active_cmd); return 0; case COMMAND_COMPLETE: case DISCONNECT: { struct esp_cmd_entry *ent = esp->active_cmd; ent->message = msg0; esp_event(esp, ESP_EVENT_FREE_BUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 0; } case MESSAGE_REJECT: esp_msgin_reject(esp); return 0; default: do_reject: esp->msg_out[0] = MESSAGE_REJECT; esp->msg_out_len = 1; scsi_esp_cmd(esp, ESP_CMD_SATN); return 0; } } static int esp_process_event(struct esp *esp) { int write, i; again: write = 0; esp_log_event("process event %d phase %x\n", esp->event, esp->sreg & ESP_STAT_PMASK); switch (esp->event) { case ESP_EVENT_CHECK_PHASE: switch (esp->sreg & ESP_STAT_PMASK) { case ESP_DOP: esp_event(esp, ESP_EVENT_DATA_OUT); break; case ESP_DIP: esp_event(esp, ESP_EVENT_DATA_IN); break; case ESP_STATP: esp_flush_fifo(esp); scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); esp_event(esp, ESP_EVENT_STATUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; case ESP_MOP: esp_event(esp, ESP_EVENT_MSGOUT); break; case ESP_MIP: esp_event(esp, ESP_EVENT_MSGIN); break; case ESP_CMDP: esp_event(esp, ESP_EVENT_CMD_START); break; default: shost_printk(KERN_INFO, esp->host, "Unexpected phase, sreg=%02x\n", esp->sreg); esp_schedule_reset(esp); return 0; } goto again; case ESP_EVENT_DATA_IN: write = 1; fallthrough; case ESP_EVENT_DATA_OUT: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); unsigned int dma_len = esp_cur_dma_len(ent, cmd); if (esp->rev == ESP100) scsi_esp_cmd(esp, ESP_CMD_NULL); if (write) ent->flags |= ESP_CMD_FLAG_WRITE; else ent->flags &= ~ESP_CMD_FLAG_WRITE; if (esp->ops->dma_length_limit) dma_len = esp->ops->dma_length_limit(esp, dma_addr, dma_len); else dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); esp->data_dma_len = dma_len; if (!dma_len) { shost_printk(KERN_ERR, esp->host, "DMA length is zero!\n"); shost_printk(KERN_ERR, esp->host, "cur adr[%08llx] len[%08x]\n", (unsigned long long)esp_cur_dma_addr(ent, cmd), esp_cur_dma_len(ent, cmd)); esp_schedule_reset(esp); return 0; } esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", (unsigned long long)dma_addr, dma_len, write); esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, write, ESP_CMD_DMA | ESP_CMD_TI); esp_event(esp, ESP_EVENT_DATA_DONE); break; } case ESP_EVENT_DATA_DONE: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; int bytes_sent; if (esp->ops->dma_error(esp)) { shost_printk(KERN_INFO, esp->host, "data done, DMA error, resetting\n"); esp_schedule_reset(esp); return 0; } if (ent->flags & ESP_CMD_FLAG_WRITE) { /* XXX parity errors, etc. XXX */ esp->ops->dma_drain(esp); } esp->ops->dma_invalidate(esp); if (esp->ireg != ESP_INTR_BSERV) { /* We should always see exactly a bus-service * interrupt at the end of a successful transfer. */ shost_printk(KERN_INFO, esp->host, "data done, not BSERV, resetting\n"); esp_schedule_reset(esp); return 0; } bytes_sent = esp_data_bytes_sent(esp, ent, cmd); esp_log_datadone("data done flgs[%x] sent[%d]\n", ent->flags, bytes_sent); if (bytes_sent < 0) { /* XXX force sync mode for this target XXX */ esp_schedule_reset(esp); return 0; } esp_advance_dma(esp, ent, cmd, bytes_sent); esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; } case ESP_EVENT_STATUS: { struct esp_cmd_entry *ent = esp->active_cmd; if (esp->ireg & ESP_INTR_FDONE) { ent->status = esp_read8(ESP_FDATA); ent->message = esp_read8(ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_MOK); } else if (esp->ireg == ESP_INTR_BSERV) { ent->status = esp_read8(ESP_FDATA); ent->message = 0xff; esp_event(esp, ESP_EVENT_MSGIN); return 0; } if (ent->message != COMMAND_COMPLETE) { shost_printk(KERN_INFO, esp->host, "Unexpected message %x in status\n", ent->message); esp_schedule_reset(esp); return 0; } esp_event(esp, ESP_EVENT_FREE_BUS); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; } case ESP_EVENT_FREE_BUS: { struct esp_cmd_entry *ent = esp->active_cmd; struct scsi_cmnd *cmd = ent->cmd; if (ent->message == COMMAND_COMPLETE || ent->message == DISCONNECT) scsi_esp_cmd(esp, ESP_CMD_ESEL); if (ent->message == COMMAND_COMPLETE) { esp_log_cmddone("Command done status[%x] message[%x]\n", ent->status, ent->message); if (ent->status == SAM_STAT_TASK_SET_FULL) esp_event_queue_full(esp, ent); if (ent->status == SAM_STAT_CHECK_CONDITION && !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { ent->flags |= ESP_CMD_FLAG_AUTOSENSE; esp_autosense(esp, ent); } else { esp_cmd_is_done(esp, ent, cmd, DID_OK); } } else if (ent->message == DISCONNECT) { esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", cmd->device->id, ent->tag[0], ent->tag[1]); esp->active_cmd = NULL; esp_maybe_execute_command(esp); } else { shost_printk(KERN_INFO, esp->host, "Unexpected message %x in freebus\n", ent->message); esp_schedule_reset(esp); return 0; } if (esp->active_cmd) esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; } case ESP_EVENT_MSGOUT: { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp_debug & ESP_DEBUG_MSGOUT) { int i; printk("ESP: Sending message [ "); for (i = 0; i < esp->msg_out_len; i++) printk("%02x ", esp->msg_out[i]); printk("]\n"); } if (esp->rev == FASHME) { int i; /* Always use the fifo. */ for (i = 0; i < esp->msg_out_len; i++) { esp_write8(esp->msg_out[i], ESP_FDATA); esp_write8(0, ESP_FDATA); } scsi_esp_cmd(esp, ESP_CMD_TI); } else { if (esp->msg_out_len == 1) { esp_write8(esp->msg_out[0], ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_TI); } else if (esp->flags & ESP_FLAG_USE_FIFO) { for (i = 0; i < esp->msg_out_len; i++) esp_write8(esp->msg_out[i], ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_TI); } else { /* Use DMA. */ memcpy(esp->command_block, esp->msg_out, esp->msg_out_len); esp->ops->send_dma_cmd(esp, esp->command_block_dma, esp->msg_out_len, esp->msg_out_len, 0, ESP_CMD_DMA|ESP_CMD_TI); } } esp_event(esp, ESP_EVENT_MSGOUT_DONE); break; } case ESP_EVENT_MSGOUT_DONE: if (esp->rev == FASHME) { scsi_esp_cmd(esp, ESP_CMD_FLUSH); } else { if (esp->msg_out_len > 1) esp->ops->dma_invalidate(esp); /* XXX if the chip went into disconnected mode, * we can't run the phase state machine anyway. */ if (!(esp->ireg & ESP_INTR_DC)) scsi_esp_cmd(esp, ESP_CMD_NULL); } esp->msg_out_len = 0; esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; case ESP_EVENT_MSGIN: if (esp->ireg & ESP_INTR_BSERV) { if (esp->rev == FASHME) { if (!(esp_read8(ESP_STATUS2) & ESP_STAT2_FEMPTY)) scsi_esp_cmd(esp, ESP_CMD_FLUSH); } else { scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (esp->rev == ESP100) scsi_esp_cmd(esp, ESP_CMD_NULL); } scsi_esp_cmd(esp, ESP_CMD_TI); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; return 1; } if (esp->ireg & ESP_INTR_FDONE) { u8 val; if (esp->rev == FASHME) val = esp->fifo[0]; else val = esp_read8(ESP_FDATA); esp->msg_in[esp->msg_in_len++] = val; esp_log_msgin("Got msgin byte %x\n", val); if (!esp_msgin_process(esp)) esp->msg_in_len = 0; if (esp->rev == FASHME) scsi_esp_cmd(esp, ESP_CMD_FLUSH); scsi_esp_cmd(esp, ESP_CMD_MOK); /* Check whether a bus reset is to be done next */ if (esp->event == ESP_EVENT_RESET) return 0; if (esp->event != ESP_EVENT_FREE_BUS) esp_event(esp, ESP_EVENT_CHECK_PHASE); } else { shost_printk(KERN_INFO, esp->host, "MSGIN neither BSERV not FDON, resetting"); esp_schedule_reset(esp); return 0; } break; case ESP_EVENT_CMD_START: memcpy(esp->command_block, esp->cmd_bytes_ptr, esp->cmd_bytes_left); esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); esp_event(esp, ESP_EVENT_CMD_DONE); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; case ESP_EVENT_CMD_DONE: esp->ops->dma_invalidate(esp); if (esp->ireg & ESP_INTR_BSERV) { esp_event(esp, ESP_EVENT_CHECK_PHASE); goto again; } esp_schedule_reset(esp); return 0; case ESP_EVENT_RESET: scsi_esp_cmd(esp, ESP_CMD_RS); break; default: shost_printk(KERN_INFO, esp->host, "Unexpected event %x, resetting\n", esp->event); esp_schedule_reset(esp); return 0; } return 1; } static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) { struct scsi_cmnd *cmd = ent->cmd; esp_unmap_dma(esp, cmd); esp_free_lun_tag(ent, cmd->device->hostdata); cmd->result = DID_RESET << 16; if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) esp_unmap_sense(esp, ent); scsi_done(cmd); list_del(&ent->list); esp_put_ent(esp, ent); } static void esp_clear_hold(struct scsi_device *dev, void *data) { struct esp_lun_data *lp = dev->hostdata; BUG_ON(lp->num_tagged); lp->hold = 0; } static void esp_reset_cleanup(struct esp *esp) { struct esp_cmd_entry *ent, *tmp; int i; list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { struct scsi_cmnd *cmd = ent->cmd; list_del(&ent->list); cmd->result = DID_RESET << 16; scsi_done(cmd); esp_put_ent(esp, ent); } list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { if (ent == esp->active_cmd) esp->active_cmd = NULL; esp_reset_cleanup_one(esp, ent); } BUG_ON(esp->active_cmd != NULL); /* Force renegotiation of sync/wide transfers. */ for (i = 0; i < ESP_MAX_TARGET; i++) { struct esp_target_data *tp = &esp->target[i]; tp->esp_period = 0; tp->esp_offset = 0; tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | ESP_CONFIG3_FSCSI | ESP_CONFIG3_FAST); tp->flags &= ~ESP_TGT_WIDE; tp->flags |= ESP_TGT_CHECK_NEGO; if (tp->starget) __starget_for_each_device(tp->starget, NULL, esp_clear_hold); } esp->flags &= ~ESP_FLAG_RESETTING; } /* Runs under host->lock */ static void __esp_interrupt(struct esp *esp) { int finish_reset, intr_done; u8 phase; /* * Once INTRPT is read STATUS and SSTEP are cleared. */ esp->sreg = esp_read8(ESP_STATUS); esp->seqreg = esp_read8(ESP_SSTEP); esp->ireg = esp_read8(ESP_INTRPT); if (esp->flags & ESP_FLAG_RESETTING) { finish_reset = 1; } else { if (esp_check_gross_error(esp)) return; finish_reset = esp_check_spur_intr(esp); if (finish_reset < 0) return; } if (esp->ireg & ESP_INTR_SR) finish_reset = 1; if (finish_reset) { esp_reset_cleanup(esp); if (esp->eh_reset) { complete(esp->eh_reset); esp->eh_reset = NULL; } return; } phase = (esp->sreg & ESP_STAT_PMASK); if (esp->rev == FASHME) { if (((phase != ESP_DIP && phase != ESP_DOP) && esp->select_state == ESP_SELECT_NONE && esp->event != ESP_EVENT_STATUS && esp->event != ESP_EVENT_DATA_DONE) || (esp->ireg & ESP_INTR_RSEL)) { esp->sreg2 = esp_read8(ESP_STATUS2); if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || (esp->sreg2 & ESP_STAT2_F1BYTE)) hme_read_fifo(esp); } } esp_log_intr("intr sreg[%02x] seqreg[%02x] " "sreg2[%02x] ireg[%02x]\n", esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); intr_done = 0; if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { shost_printk(KERN_INFO, esp->host, "unexpected IREG %02x\n", esp->ireg); if (esp->ireg & ESP_INTR_IC) esp_dump_cmd_log(esp); esp_schedule_reset(esp); } else { if (esp->ireg & ESP_INTR_RSEL) { if (esp->active_cmd) (void) esp_finish_select(esp); intr_done = esp_reconnect(esp); } else { /* Some combination of FDONE, BSERV, DC. */ if (esp->select_state != ESP_SELECT_NONE) intr_done = esp_finish_select(esp); } } while (!intr_done) intr_done = esp_process_event(esp); } irqreturn_t scsi_esp_intr(int irq, void *dev_id) { struct esp *esp = dev_id; unsigned long flags; irqreturn_t ret; spin_lock_irqsave(esp->host->host_lock, flags); ret = IRQ_NONE; if (esp->ops->irq_pending(esp)) { ret = IRQ_HANDLED; for (;;) { int i; __esp_interrupt(esp); if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) break; esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { if (esp->ops->irq_pending(esp)) break; } if (i == ESP_QUICKIRQ_LIMIT) break; } } spin_unlock_irqrestore(esp->host->host_lock, flags); return ret; } EXPORT_SYMBOL(scsi_esp_intr); static void esp_get_revision(struct esp *esp) { u8 val; esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); if (esp->config2 == 0) { esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); esp_write8(esp->config2, ESP_CFG2); val = esp_read8(ESP_CFG2); val &= ~ESP_CONFIG2_MAGIC; esp->config2 = 0; if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { /* * If what we write to cfg2 does not come back, * cfg2 is not implemented. * Therefore this must be a plain esp100. */ esp->rev = ESP100; return; } } esp_set_all_config3(esp, 5); esp->prev_cfg3 = 5; esp_write8(esp->config2, ESP_CFG2); esp_write8(0, ESP_CFG3); esp_write8(esp->prev_cfg3, ESP_CFG3); val = esp_read8(ESP_CFG3); if (val != 5) { /* The cfg2 register is implemented, however * cfg3 is not, must be esp100a. */ esp->rev = ESP100A; } else { esp_set_all_config3(esp, 0); esp->prev_cfg3 = 0; esp_write8(esp->prev_cfg3, ESP_CFG3); /* All of cfg{1,2,3} implemented, must be one of * the fas variants, figure out which one. */ if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { esp->rev = FAST; esp->sync_defp = SYNC_DEFP_FAST; } else { esp->rev = ESP236; } } } static void esp_init_swstate(struct esp *esp) { int i; INIT_LIST_HEAD(&esp->queued_cmds); INIT_LIST_HEAD(&esp->active_cmds); INIT_LIST_HEAD(&esp->esp_cmd_pool); /* Start with a clear state, domain validation (via ->slave_configure, * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged * commands. */ for (i = 0 ; i < ESP_MAX_TARGET; i++) { esp->target[i].flags = 0; esp->target[i].nego_goal_period = 0; esp->target[i].nego_goal_offset = 0; esp->target[i].nego_goal_width = 0; esp->target[i].nego_goal_tags = 0; } } /* This places the ESP into a known state at boot time. */ static void esp_bootup_reset(struct esp *esp) { u8 val; /* Reset the DMA */ esp->ops->reset_dma(esp); /* Reset the ESP */ esp_reset_esp(esp); /* Reset the SCSI bus, but tell ESP not to generate an irq */ val = esp_read8(ESP_CFG1); val |= ESP_CONFIG1_SRRDISAB; esp_write8(val, ESP_CFG1); scsi_esp_cmd(esp, ESP_CMD_RS); udelay(400); esp_write8(esp->config1, ESP_CFG1); /* Eat any bitrot in the chip and we are done... */ esp_read8(ESP_INTRPT); } static void esp_set_clock_params(struct esp *esp) { int fhz; u8 ccf; /* This is getting messy but it has to be done correctly or else * you get weird behavior all over the place. We are trying to * basically figure out three pieces of information. * * a) Clock Conversion Factor * * This is a representation of the input crystal clock frequency * going into the ESP on this machine. Any operation whose timing * is longer than 400ns depends on this value being correct. For * example, you'll get blips for arbitration/selection during high * load or with multiple targets if this is not set correctly. * * b) Selection Time-Out * * The ESP isn't very bright and will arbitrate for the bus and try * to select a target forever if you let it. This value tells the * ESP when it has taken too long to negotiate and that it should * interrupt the CPU so we can see what happened. The value is * computed as follows (from NCR/Symbios chip docs). * * (Time Out Period) * (Input Clock) * STO = ---------------------------------- * (8192) * (Clock Conversion Factor) * * We use a time out period of 250ms (ESP_BUS_TIMEOUT). * * c) Imperical constants for synchronous offset and transfer period * register values * * This entails the smallest and largest sync period we could ever * handle on this ESP. */ fhz = esp->cfreq; ccf = ((fhz / 1000000) + 4) / 5; if (ccf == 1) ccf = 2; /* If we can't find anything reasonable, just assume 20MHZ. * This is the clock frequency of the older sun4c's where I've * been unable to find the clock-frequency PROM property. All * other machines provide useful values it seems. */ if (fhz <= 5000000 || ccf < 1 || ccf > 8) { fhz = 20000000; ccf = 4; } esp->cfact = (ccf == 8 ? 0 : ccf); esp->cfreq = fhz; esp->ccycle = ESP_HZ_TO_CYCLE(fhz); esp->ctick = ESP_TICK(ccf, esp->ccycle); esp->neg_defp = ESP_NEG_DEFP(fhz, ccf); esp->sync_defp = SYNC_DEFP_SLOW; } static const char *esp_chip_names[] = { "ESP100", "ESP100A", "ESP236", "FAS236", "AM53C974", "53CF9x-2", "FAS100A", "FAST", "FASHME", }; static struct scsi_transport_template *esp_transport_template; int scsi_esp_register(struct esp *esp) { static int instance; int err; if (!esp->num_tags) esp->num_tags = ESP_DEFAULT_TAGS; esp->host->transportt = esp_transport_template; esp->host->max_lun = ESP_MAX_LUN; esp->host->cmd_per_lun = 2; esp->host->unique_id = instance; esp_set_clock_params(esp); esp_get_revision(esp); esp_init_swstate(esp); esp_bootup_reset(esp); dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n", esp->host->unique_id, esp->regs, esp->dma_regs, esp->host->irq); dev_printk(KERN_INFO, esp->dev, "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", esp->host->unique_id, esp_chip_names[esp->rev], esp->cfreq / 1000000, esp->cfact, esp->scsi_id); /* Let the SCSI bus reset settle. */ ssleep(esp_bus_reset_settle); err = scsi_add_host(esp->host, esp->dev); if (err) return err; instance++; scsi_scan_host(esp->host); return 0; } EXPORT_SYMBOL(scsi_esp_register); void scsi_esp_unregister(struct esp *esp) { scsi_remove_host(esp->host); } EXPORT_SYMBOL(scsi_esp_unregister); static int esp_target_alloc(struct scsi_target *starget) { struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); struct esp_target_data *tp = &esp->target[starget->id]; tp->starget = starget; return 0; } static void esp_target_destroy(struct scsi_target *starget) { struct esp *esp = shost_priv(dev_to_shost(&starget->dev)); struct esp_target_data *tp = &esp->target[starget->id]; tp->starget = NULL; } static int esp_slave_alloc(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; struct esp_lun_data *lp; lp = kzalloc(sizeof(*lp), GFP_KERNEL); if (!lp) return -ENOMEM; dev->hostdata = lp; spi_min_period(tp->starget) = esp->min_period; spi_max_offset(tp->starget) = 15; if (esp->flags & ESP_FLAG_WIDE_CAPABLE) spi_max_width(tp->starget) = 1; else spi_max_width(tp->starget) = 0; return 0; } static int esp_slave_configure(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; if (dev->tagged_supported) scsi_change_queue_depth(dev, esp->num_tags); tp->flags |= ESP_TGT_DISCONNECT; if (!spi_initial_dv(dev->sdev_target)) spi_dv_device(dev); return 0; } static void esp_slave_destroy(struct scsi_device *dev) { struct esp_lun_data *lp = dev->hostdata; kfree(lp); dev->hostdata = NULL; } static int esp_eh_abort_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); struct esp_cmd_entry *ent, *tmp; struct completion eh_done; unsigned long flags; /* XXX This helps a lot with debugging but might be a bit * XXX much for the final driver. */ spin_lock_irqsave(esp->host->host_lock, flags); shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", cmd, cmd->cmnd[0]); ent = esp->active_cmd; if (ent) shost_printk(KERN_ERR, esp->host, "Current command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); list_for_each_entry(ent, &esp->queued_cmds, list) { shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); } list_for_each_entry(ent, &esp->active_cmds, list) { shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", ent->cmd, ent->cmd->cmnd[0]); } esp_dump_cmd_log(esp); spin_unlock_irqrestore(esp->host->host_lock, flags); spin_lock_irqsave(esp->host->host_lock, flags); ent = NULL; list_for_each_entry(tmp, &esp->queued_cmds, list) { if (tmp->cmd == cmd) { ent = tmp; break; } } if (ent) { /* Easiest case, we didn't even issue the command * yet so it is trivial to abort. */ list_del(&ent->list); cmd->result = DID_ABORT << 16; scsi_done(cmd); esp_put_ent(esp, ent); goto out_success; } init_completion(&eh_done); ent = esp->active_cmd; if (ent && ent->cmd == cmd) { /* Command is the currently active command on * the bus. If we already have an output message * pending, no dice. */ if (esp->msg_out_len) goto out_failure; /* Send out an abort, encouraging the target to * go to MSGOUT phase by asserting ATN. */ esp->msg_out[0] = ABORT_TASK_SET; esp->msg_out_len = 1; ent->eh_done = &eh_done; scsi_esp_cmd(esp, ESP_CMD_SATN); } else { /* The command is disconnected. This is not easy to * abort. For now we fail and let the scsi error * handling layer go try a scsi bus reset or host * reset. * * What we could do is put together a scsi command * solely for the purpose of sending an abort message * to the target. Coming up with all the code to * cook up scsi commands, special case them everywhere, * etc. is for questionable gain and it would be better * if the generic scsi error handling layer could do at * least some of that for us. * * Anyways this is an area for potential future improvement * in this driver. */ goto out_failure; } spin_unlock_irqrestore(esp->host->host_lock, flags); if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { spin_lock_irqsave(esp->host->host_lock, flags); ent->eh_done = NULL; spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } return SUCCESS; out_success: spin_unlock_irqrestore(esp->host->host_lock, flags); return SUCCESS; out_failure: /* XXX This might be a good location to set ESP_TGT_BROKEN * XXX since we know which target/lun in particular is * XXX causing trouble. */ spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); struct completion eh_reset; unsigned long flags; init_completion(&eh_reset); spin_lock_irqsave(esp->host->host_lock, flags); esp->eh_reset = &eh_reset; /* XXX This is too simple... We should add lots of * XXX checks here so that if we find that the chip is * XXX very wedged we return failure immediately so * XXX that we can perform a full chip reset. */ esp->flags |= ESP_FLAG_RESETTING; scsi_esp_cmd(esp, ESP_CMD_RS); spin_unlock_irqrestore(esp->host->host_lock, flags); ssleep(esp_bus_reset_settle); if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { spin_lock_irqsave(esp->host->host_lock, flags); esp->eh_reset = NULL; spin_unlock_irqrestore(esp->host->host_lock, flags); return FAILED; } return SUCCESS; } /* All bets are off, reset the entire device. */ static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) { struct esp *esp = shost_priv(cmd->device->host); unsigned long flags; spin_lock_irqsave(esp->host->host_lock, flags); esp_bootup_reset(esp); esp_reset_cleanup(esp); spin_unlock_irqrestore(esp->host->host_lock, flags); ssleep(esp_bus_reset_settle); return SUCCESS; } static const char *esp_info(struct Scsi_Host *host) { return "esp"; } const struct scsi_host_template scsi_esp_template = { .module = THIS_MODULE, .name = "esp", .info = esp_info, .queuecommand = esp_queuecommand, .target_alloc = esp_target_alloc, .target_destroy = esp_target_destroy, .slave_alloc = esp_slave_alloc, .slave_configure = esp_slave_configure, .slave_destroy = esp_slave_destroy, .eh_abort_handler = esp_eh_abort_handler, .eh_bus_reset_handler = esp_eh_bus_reset_handler, .eh_host_reset_handler = esp_eh_host_reset_handler, .can_queue = 7, .this_id = 7, .sg_tablesize = SG_ALL, .max_sectors = 0xffff, .skip_settle_delay = 1, .cmd_size = sizeof(struct esp_cmd_priv), }; EXPORT_SYMBOL(scsi_esp_template); static void esp_get_signalling(struct Scsi_Host *host) { struct esp *esp = shost_priv(host); enum spi_signal_type type; if (esp->flags & ESP_FLAG_DIFFERENTIAL) type = SPI_SIGNAL_HVD; else type = SPI_SIGNAL_SE; spi_signalling(host) = type; } static void esp_set_offset(struct scsi_target *target, int offset) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; if (esp->flags & ESP_FLAG_DISABLE_SYNC) tp->nego_goal_offset = 0; else tp->nego_goal_offset = offset; tp->flags |= ESP_TGT_CHECK_NEGO; } static void esp_set_period(struct scsi_target *target, int period) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; tp->nego_goal_period = period; tp->flags |= ESP_TGT_CHECK_NEGO; } static void esp_set_width(struct scsi_target *target, int width) { struct Scsi_Host *host = dev_to_shost(target->dev.parent); struct esp *esp = shost_priv(host); struct esp_target_data *tp = &esp->target[target->id]; tp->nego_goal_width = (width ? 1 : 0); tp->flags |= ESP_TGT_CHECK_NEGO; } static struct spi_function_template esp_transport_ops = { .set_offset = esp_set_offset, .show_offset = 1, .set_period = esp_set_period, .show_period = 1, .set_width = esp_set_width, .show_width = 1, .get_signalling = esp_get_signalling, }; static int __init esp_init(void) { esp_transport_template = spi_attach_transport(&esp_transport_ops); if (!esp_transport_template) return -ENODEV; return 0; } static void __exit esp_exit(void) { spi_release_transport(esp_transport_template); } MODULE_DESCRIPTION("ESP SCSI driver core"); MODULE_AUTHOR("David S. Miller <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_param(esp_bus_reset_settle, int, 0); MODULE_PARM_DESC(esp_bus_reset_settle, "ESP scsi bus reset delay in seconds"); module_param(esp_debug, int, 0); MODULE_PARM_DESC(esp_debug, "ESP bitmapped debugging message enable value:\n" " 0x00000001 Log interrupt events\n" " 0x00000002 Log scsi commands\n" " 0x00000004 Log resets\n" " 0x00000008 Log message in events\n" " 0x00000010 Log message out events\n" " 0x00000020 Log command completion\n" " 0x00000040 Log disconnects\n" " 0x00000080 Log data start\n" " 0x00000100 Log data done\n" " 0x00000200 Log reconnects\n" " 0x00000400 Log auto-sense data\n" ); module_init(esp_init); module_exit(esp_exit); #ifdef CONFIG_SCSI_ESP_PIO static inline unsigned int esp_wait_for_fifo(struct esp *esp) { int i = 500000; do { unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; if (fbytes) return fbytes; udelay(1); } while (--i); shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n", esp_read8(ESP_STATUS)); return 0; } static inline int esp_wait_for_intr(struct esp *esp) { int i = 500000; do { esp->sreg = esp_read8(ESP_STATUS); if (esp->sreg & ESP_STAT_INTR) return 0; udelay(1); } while (--i); shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n", esp->sreg); return 1; } #define ESP_FIFO_SIZE 16 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { u8 phase = esp->sreg & ESP_STAT_PMASK; cmd &= ~ESP_CMD_DMA; esp->send_cmd_error = 0; if (write) { u8 *dst = (u8 *)addr; u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); scsi_esp_cmd(esp, cmd); while (1) { if (!esp_wait_for_fifo(esp)) break; *dst++ = readb(esp->fifo_reg); --esp_count; if (!esp_count) break; if (esp_wait_for_intr(esp)) { esp->send_cmd_error = 1; break; } if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & mask) { esp->send_cmd_error = 1; break; } if (phase == ESP_MIP) esp_write8(ESP_CMD_MOK, ESP_CMD); esp_write8(ESP_CMD_TI, ESP_CMD); } } else { unsigned int n = ESP_FIFO_SIZE; u8 *src = (u8 *)addr; scsi_esp_cmd(esp, ESP_CMD_FLUSH); if (n > esp_count) n = esp_count; writesb(esp->fifo_reg, src, n); src += n; esp_count -= n; scsi_esp_cmd(esp, cmd); while (esp_count) { if (esp_wait_for_intr(esp)) { esp->send_cmd_error = 1; break; } if ((esp->sreg & ESP_STAT_PMASK) != phase) break; esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & ~ESP_INTR_BSERV) { esp->send_cmd_error = 1; break; } n = ESP_FIFO_SIZE - (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); if (n > esp_count) n = esp_count; writesb(esp->fifo_reg, src, n); src += n; esp_count -= n; esp_write8(ESP_CMD_TI, ESP_CMD); } } esp->send_cmd_residual = esp_count; } EXPORT_SYMBOL(esp_send_pio_cmd); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 MediaTek Inc. * * Author: ChiYuan Huang <[email protected]> */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/usb/tcpci.h> #include <linux/usb/tcpm.h> #define MT6360_REG_PHYCTRL1 0x80 #define MT6360_REG_PHYCTRL3 0x82 #define MT6360_REG_PHYCTRL7 0x86 #define MT6360_REG_VCONNCTRL1 0x8C #define MT6360_REG_MODECTRL2 0x8F #define MT6360_REG_SWRESET 0xA0 #define MT6360_REG_DEBCTRL1 0xA1 #define MT6360_REG_DRPCTRL1 0xA2 #define MT6360_REG_DRPCTRL2 0xA3 #define MT6360_REG_I2CTORST 0xBF #define MT6360_REG_PHYCTRL11 0xCA #define MT6360_REG_RXCTRL1 0xCE #define MT6360_REG_RXCTRL2 0xCF #define MT6360_REG_CTDCTRL2 0xEC /* MT6360_REG_VCONNCTRL1 */ #define MT6360_VCONNCL_ENABLE BIT(0) /* MT6360_REG_RXCTRL2 */ #define MT6360_OPEN40M_ENABLE BIT(7) /* MT6360_REG_CTDCTRL2 */ #define MT6360_RPONESHOT_ENABLE BIT(6) struct mt6360_tcpc_info { struct tcpci_data tdata; struct tcpci *tcpci; struct device *dev; int irq; }; static inline int mt6360_tcpc_write16(struct regmap *regmap, unsigned int reg, u16 val) { return regmap_raw_write(regmap, reg, &val, sizeof(u16)); } static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata) { struct regmap *regmap = tdata->regmap; int ret; ret = regmap_write(regmap, MT6360_REG_SWRESET, 0x01); if (ret) return ret; /* after reset command, wait 1~2ms to wait IC action */ usleep_range(1000, 2000); /* write all alert to masked */ ret = mt6360_tcpc_write16(regmap, TCPC_ALERT_MASK, 0); if (ret) return ret; /* config I2C timeout reset enable , and timeout to 200ms */ ret = regmap_write(regmap, MT6360_REG_I2CTORST, 0x8F); if (ret) return ret; /* config CC Detect Debounce : 26.7*val us */ ret = regmap_write(regmap, MT6360_REG_DEBCTRL1, 0x10); if (ret) return ret; /* DRP Toggle Cycle : 51.2 + 6.4*val ms */ ret = regmap_write(regmap, MT6360_REG_DRPCTRL1, 4); if (ret) return ret; /* DRP Duyt Ctrl : dcSRC: /1024 */ ret = mt6360_tcpc_write16(regmap, MT6360_REG_DRPCTRL2, 330); if (ret) return ret; /* Enable VCONN Current Limit function */ ret = regmap_update_bits(regmap, MT6360_REG_VCONNCTRL1, MT6360_VCONNCL_ENABLE, MT6360_VCONNCL_ENABLE); if (ret) return ret; /* Enable cc open 40ms when pmic send vsysuv signal */ ret = regmap_update_bits(regmap, MT6360_REG_RXCTRL2, MT6360_OPEN40M_ENABLE, MT6360_OPEN40M_ENABLE); if (ret) return ret; /* Enable Rpdet oneshot detection */ ret = regmap_update_bits(regmap, MT6360_REG_CTDCTRL2, MT6360_RPONESHOT_ENABLE, MT6360_RPONESHOT_ENABLE); if (ret) return ret; /* BMC PHY */ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70); if (ret) return ret; ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82); if (ret) return ret; ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36); if (ret) return ret; ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60); if (ret) return ret; ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8); if (ret) return ret; /* Set shipping mode off, AUTOIDLE on */ return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A); } static irqreturn_t mt6360_irq(int irq, void *dev_id) { struct mt6360_tcpc_info *mti = dev_id; return tcpci_irq(mti->tcpci); } static int mt6360_tcpc_probe(struct platform_device *pdev) { struct mt6360_tcpc_info *mti; int ret; mti = devm_kzalloc(&pdev->dev, sizeof(*mti), GFP_KERNEL); if (!mti) return -ENOMEM; mti->dev = &pdev->dev; mti->tdata.regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!mti->tdata.regmap) { dev_err(&pdev->dev, "Failed to get parent regmap\n"); return -ENODEV; } mti->irq = platform_get_irq_byname(pdev, "PD_IRQB"); if (mti->irq < 0) return mti->irq; mti->tdata.init = mt6360_tcpc_init; mti->tcpci = tcpci_register_port(&pdev->dev, &mti->tdata); if (IS_ERR(mti->tcpci)) { dev_err(&pdev->dev, "Failed to register tcpci port\n"); return PTR_ERR(mti->tcpci); } ret = devm_request_threaded_irq(mti->dev, mti->irq, NULL, mt6360_irq, IRQF_ONESHOT, dev_name(&pdev->dev), mti); if (ret) { dev_err(mti->dev, "Failed to register irq\n"); tcpci_unregister_port(mti->tcpci); return ret; } device_init_wakeup(&pdev->dev, true); platform_set_drvdata(pdev, mti); return 0; } static void mt6360_tcpc_remove(struct platform_device *pdev) { struct mt6360_tcpc_info *mti = platform_get_drvdata(pdev); disable_irq(mti->irq); tcpci_unregister_port(mti->tcpci); } static int __maybe_unused mt6360_tcpc_suspend(struct device *dev) { struct mt6360_tcpc_info *mti = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(mti->irq); return 0; } static int __maybe_unused mt6360_tcpc_resume(struct device *dev) { struct mt6360_tcpc_info *mti = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(mti->irq); return 0; } static SIMPLE_DEV_PM_OPS(mt6360_tcpc_pm_ops, mt6360_tcpc_suspend, mt6360_tcpc_resume); static const struct of_device_id __maybe_unused mt6360_tcpc_of_id[] = { { .compatible = "mediatek,mt6360-tcpc", }, {}, }; MODULE_DEVICE_TABLE(of, mt6360_tcpc_of_id); static struct platform_driver mt6360_tcpc_driver = { .driver = { .name = "mt6360-tcpc", .pm = &mt6360_tcpc_pm_ops, .of_match_table = mt6360_tcpc_of_id, }, .probe = mt6360_tcpc_probe, .remove = mt6360_tcpc_remove, }; module_platform_driver(mt6360_tcpc_driver); MODULE_AUTHOR("ChiYuan Huang <[email protected]>"); MODULE_DESCRIPTION("MT6360 USB Type-C Port Controller Interface Driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_REBALANCE_TYPES_H #define _BCACHEFS_REBALANCE_TYPES_H #include "bbpos_types.h" #include "move_types.h" #define BCH_REBALANCE_STATES() \ x(waiting) \ x(working) \ x(scanning) enum bch_rebalance_states { #define x(t) BCH_REBALANCE_##t, BCH_REBALANCE_STATES() #undef x }; struct bch_fs_rebalance { struct task_struct __rcu *thread; struct bch_pd_controller pd; enum bch_rebalance_states state; u64 wait_iotime_start; u64 wait_iotime_end; u64 wait_wallclock_start; struct bch_move_stats work_stats; struct bbpos scan_start; struct bbpos scan_end; struct bch_move_stats scan_stats; unsigned enabled:1; }; #endif /* _BCACHEFS_REBALANCE_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 */ /****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * ******************************************************************************/ /***************************************************************************** * * Module: __INC_HAL8192CPHYREG_H * * * Note: 1. Define PMAC/BB register map * 2. Define RF register map * 3. PMAC/BB register bit mask. * 4. RF reg bit mask. * 5. Other BB/RF relative definition. * * * Export: Constants, macro, functions(API), global variables(None). * * Abbrev: * * History: * Data Who Remark * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h. * 2. Reorganize code architecture. *09/25/2008 MH 1. Add RL6052 register definition * *****************************************************************************/ #ifndef __INC_HAL8192CPHYREG_H #define __INC_HAL8192CPHYREG_H /*--------------------------Define Parameters-------------------------------*/ /* */ /* 8192S Register offset definition */ /* */ /* */ /* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */ /* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */ /* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */ /* 3. RF register 0x00-2E */ /* 4. Bit Mask for BB/RF register */ /* 5. Other definition for BB/RF R/W */ /* */ /* */ /* 3. Page8(0x800) */ /* */ #define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */ #define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */ #define rFPGA0_XA_HSSIParameter2 0x824 #define rFPGA0_XB_HSSIParameter1 0x828 #define rFPGA0_XB_HSSIParameter2 0x82c #define rTxAGC_B_Rate18_06 0x830 #define rTxAGC_B_Rate54_24 0x834 #define rTxAGC_B_CCK1_55_Mcs32 0x838 #define rTxAGC_B_Mcs03_Mcs00 0x83c #define rTxAGC_B_Mcs07_Mcs04 0x848 #define rFPGA0_XA_LSSIParameter 0x840 #define rFPGA0_XB_LSSIParameter 0x844 #define rFPGA0_XCD_SwitchControl 0x85c #define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */ #define rFPGA0_XB_RFInterfaceOE 0x864 #define rTxAGC_B_CCK11_A_CCK2_11 0x86c #define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */ #define rFPGA0_XCD_RFInterfaceSW 0x874 #define rFPGA0_XA_LSSIReadBack 0x8a0 /* Transceiver LSSI Readback */ #define rFPGA0_XB_LSSIReadBack 0x8a4 #define TransceiverA_HSPI_Readback 0x8b8 /* Transceiver A HSPI Readback */ #define TransceiverB_HSPI_Readback 0x8bc /* Transceiver B HSPI Readback */ /* */ /* 4. Page9(0x900) */ /* */ #define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */ #define rS0S1_PathSwitch 0x948 /* */ /* 5. PageA(0xA00) */ /* */ /* Set Control channel to upper or lower. These settings are required only for 40MHz */ #define rCCK0_System 0xa00 #define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */ /* */ /* PageB(0xB00) */ /* */ #define rConfig_AntA 0xb68 #define rConfig_AntB 0xb6c /* */ /* 6. PageC(0xC00) */ /* */ #define rOFDM0_TRxPathEnable 0xc04 #define rOFDM0_TRMuxPar 0xc08 #define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */ #define rOFDM0_XBRxIQImbalance 0xc1c #define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */ #define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */ #define rOFDM0_AGCRSSITable 0xc78 #define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */ #define rOFDM0_XBTxIQImbalance 0xc88 #define rOFDM0_XCTxAFE 0xc94 #define rOFDM0_XDTxAFE 0xc9c #define rOFDM0_RxIQExtAnta 0xca0 #define rOFDM0_TxPseudoNoiseWgt 0xce4 /* */ /* 7. PageD(0xD00) */ /* */ #define rOFDM1_LSTF 0xd00 /* */ /* 8. PageE(0xE00) */ /* */ #define rTxAGC_A_Rate18_06 0xe00 #define rTxAGC_A_Rate54_24 0xe04 #define rTxAGC_A_CCK1_Mcs32 0xe08 #define rTxAGC_A_Mcs03_Mcs00 0xe10 #define rTxAGC_A_Mcs07_Mcs04 0xe14 #define rFPGA0_IQK 0xe28 #define rTx_IQK_Tone_A 0xe30 #define rRx_IQK_Tone_A 0xe34 #define rTx_IQK_PI_A 0xe38 #define rRx_IQK_PI_A 0xe3c #define rTx_IQK 0xe40 #define rRx_IQK 0xe44 #define rIQK_AGC_Pts 0xe48 #define rIQK_AGC_Rsp 0xe4c #define rTx_IQK_Tone_B 0xe50 #define rRx_IQK_Tone_B 0xe54 #define rTx_IQK_PI_B 0xe58 #define rRx_IQK_PI_B 0xe5c #define rBlue_Tooth 0xe6c #define rRx_Wait_CCA 0xe70 #define rTx_CCK_RFON 0xe74 #define rTx_CCK_BBON 0xe78 #define rTx_OFDM_RFON 0xe7c #define rTx_OFDM_BBON 0xe80 #define rTx_To_Rx 0xe84 #define rTx_To_Tx 0xe88 #define rRx_CCK 0xe8c #define rTx_Power_Before_IQK_A 0xe94 #define rTx_Power_After_IQK_A 0xe9c #define rRx_Power_Before_IQK_A_2 0xea4 #define rRx_Power_After_IQK_A_2 0xeac #define rRx_OFDM 0xed0 #define rRx_Wait_RIFS 0xed4 #define rRx_TO_Rx 0xed8 #define rStandby 0xedc #define rSleep 0xee0 #define rPMPD_ANAEN 0xeec /* */ /* RL6052 Register definition */ /* */ #define RF_AC 0x00 /* */ #define RF_TXM_IDAC 0x08 /* */ #define RF_CHNLBW 0x18 /* RF channel and BW switch */ #define RF_RCK_OS 0x30 /* RF TX PA control */ #define RF_TXPA_G1 0x31 /* RF TX PA control */ #define RF_TXPA_G2 0x32 /* RF TX PA control */ #define RF_WE_LUT 0xEF /* 2. Page8(0x800) */ #define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */ #define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */ #define b3WireAddressLength 0x400 #define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */ #define bLSSIReadAddress 0x7f800000 /* T65 RF */ #define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */ #define bLSSIReadBackData 0xfffff /* T65 RF */ /* 4. PageA(0xA00) */ #define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 switch */ /* */ /* Other Definition */ /* */ /* for PutRegsetting & GetRegSetting BitMask */ #define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */ #define bMaskByte1 0xff00 #define bMaskByte2 0xff0000 #define bMaskByte3 0xff000000 #define bMaskHWord 0xffff0000 #define bMaskLWord 0x0000ffff #define bMaskDWord 0xffffffff #define bMaskH3Bytes 0xffffff00 #define bMask12Bits 0xfff #define bMaskH4Bits 0xf0000000 #define bEnable 0x1 /* Useless */ #define rDPDT_control 0x92c #endif /* __INC_HAL8192SPHYREG_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2018-2020 Broadcom. */ #ifndef BCM_VK_MSG_H #define BCM_VK_MSG_H #include <uapi/linux/misc/bcm_vk.h> #include "bcm_vk_sg.h" /* Single message queue control structure */ struct bcm_vk_msgq { u16 type; /* queue type */ u16 num; /* queue number */ u32 start; /* offset in BAR1 where the queue memory starts */ u32 rd_idx; /* read idx */ u32 wr_idx; /* write idx */ u32 size; /* * size, which is in number of 16byte blocks, * to align with the message data structure. */ u32 nxt; /* * nxt offset to the next msg queue struct. * This is to provide flexibity for alignment purposes. */ /* Least significant 16 bits in below field hold doorbell register offset */ #define DB_SHIFT 16 u32 db_offset; /* queue doorbell register offset in BAR0 */ u32 rsvd; }; /* * Structure to record static info from the msgq sync. We keep local copy * for some of these variables for both performance + checking purpose. */ struct bcm_vk_sync_qinfo { void __iomem *q_start; u32 q_size; u32 q_mask; u32 q_low; u32 q_db_offset; }; #define VK_MSGQ_MAX_NR 4 /* Maximum number of message queues */ /* * message block - basic unit in the message where a message's size is always * N x sizeof(basic_block) */ struct vk_msg_blk { u8 function_id; #define VK_FID_TRANS_BUF 5 #define VK_FID_SHUTDOWN 8 #define VK_FID_INIT 9 u8 size; /* size of the message in number of vk_msg_blk's */ u16 trans_id; /* transport id, queue & msg_id */ u32 context_id; #define VK_NEW_CTX 0 u32 cmd; #define VK_CMD_PLANES_MASK 0x000f /* number of planes to up/download */ #define VK_CMD_UPLOAD 0x0400 /* memory transfer to vk */ #define VK_CMD_DOWNLOAD 0x0500 /* memory transfer from vk */ #define VK_CMD_MASK 0x0f00 /* command mask */ u32 arg; }; /* vk_msg_blk is 16 bytes fixed */ #define VK_MSGQ_BLK_SIZE (sizeof(struct vk_msg_blk)) /* shift for fast division of basic msg blk size */ #define VK_MSGQ_BLK_SZ_SHIFT 4 /* use msg_id 0 for any simplex host2vk communication */ #define VK_SIMPLEX_MSG_ID 0 /* context per session opening of sysfs */ struct bcm_vk_ctx { struct list_head node; /* use for linkage in Hash Table */ unsigned int idx; bool in_use; pid_t pid; u32 hash_idx; u32 q_num; /* queue number used by the stream */ struct miscdevice *miscdev; atomic_t pend_cnt; /* number of items pending to be read from host */ atomic_t dma_cnt; /* any dma transaction outstanding */ wait_queue_head_t rd_wq; }; /* pid hash table entry */ struct bcm_vk_ht_entry { struct list_head head; }; #define VK_DMA_MAX_ADDRS 4 /* Max 4 DMA Addresses */ /* structure for house keeping a single work entry */ struct bcm_vk_wkent { struct list_head node; /* for linking purpose */ struct bcm_vk_ctx *ctx; /* Store up to 4 dma pointers */ struct bcm_vk_dma dma[VK_DMA_MAX_ADDRS]; u32 to_h_blks; /* response */ struct vk_msg_blk *to_h_msg; /* * put the to_v_msg at the end so that we could simply append to_v msg * to the end of the allocated block */ u32 usr_msg_id; u32 to_v_blks; u32 seq_num; struct vk_msg_blk to_v_msg[] __counted_by(to_v_blks); }; /* queue stats counters */ struct bcm_vk_qs_cnts { u32 cnt; /* general counter, used to limit output */ u32 acc_sum; u32 max_occ; /* max during a sampling period */ u32 max_abs; /* the abs max since reset */ }; /* control channel structure for either to_v or to_h communication */ struct bcm_vk_msg_chan { u32 q_nr; /* Mutex to access msgq */ struct mutex msgq_mutex; /* pointing to BAR locations */ struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR]; /* Spinlock to access pending queue */ spinlock_t pendq_lock; /* for temporary storing pending items, one for each queue */ struct list_head pendq[VK_MSGQ_MAX_NR]; /* static queue info from the sync */ struct bcm_vk_sync_qinfo sync_qinfo[VK_MSGQ_MAX_NR]; }; /* totol number of message q allowed by the driver */ #define VK_MSGQ_PER_CHAN_MAX 3 #define VK_MSGQ_NUM_DEFAULT (VK_MSGQ_PER_CHAN_MAX - 1) /* total number of supported ctx, 32 ctx each for 5 components */ #define VK_CMPT_CTX_MAX (32 * 5) /* hash table defines to store the opened FDs */ #define VK_PID_HT_SHIFT_BIT 7 /* 128 */ #define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT) /* The following are offsets of DDR info provided by the vk card */ #define VK_BAR0_SEG_SIZE (4 * SZ_1K) /* segment size for BAR0 */ /* shutdown types supported */ #define VK_SHUTDOWN_PID 1 #define VK_SHUTDOWN_GRACEFUL 2 #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Invensense, Inc. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/sysfs.h> #include <linux/jiffies.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/math64.h> #include <linux/iio/common/inv_sensors_timestamp.h> #include "inv_mpu_iio.h" static int inv_reset_fifo(struct iio_dev *indio_dev) { int result; struct inv_mpu6050_state *st = iio_priv(indio_dev); /* disable fifo and reenable it */ inv_mpu6050_prepare_fifo(st, false); result = inv_mpu6050_prepare_fifo(st, true); if (result) goto reset_fifo_fail; return 0; reset_fifo_fail: dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result); return regmap_update_bits(st->map, st->reg->int_enable, INV_MPU6050_BIT_DATA_RDY_EN, INV_MPU6050_BIT_DATA_RDY_EN); } /* * inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO. */ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct inv_mpu6050_state *st = iio_priv(indio_dev); size_t bytes_per_datum; int result; u16 fifo_count; u32 fifo_period; s64 timestamp; u8 data[INV_MPU6050_OUTPUT_DATA_SIZE]; size_t i, nb; mutex_lock(&st->lock); if (!(st->chip_config.accl_fifo_enable | st->chip_config.gyro_fifo_enable | st->chip_config.magn_fifo_enable)) goto end_session; bytes_per_datum = 0; if (st->chip_config.accl_fifo_enable) bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR; if (st->chip_config.gyro_fifo_enable) bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR; if (st->chip_config.temp_fifo_enable) bytes_per_datum += INV_MPU6050_BYTES_PER_TEMP_SENSOR; if (st->chip_config.magn_fifo_enable) bytes_per_datum += INV_MPU9X50_BYTES_MAGN; /* * read fifo_count register to know how many bytes are inside the FIFO * right now */ result = regmap_bulk_read(st->map, st->reg->fifo_count_h, st->data, INV_MPU6050_FIFO_COUNT_BYTE); if (result) goto end_session; fifo_count = be16_to_cpup((__be16 *)&st->data[0]); /* * Handle fifo overflow by resetting fifo. * Reset if there is only 3 data set free remaining to mitigate * possible delay between reading fifo count and fifo data. */ nb = 3 * bytes_per_datum; if (fifo_count >= st->hw->fifo_size - nb) { dev_warn(regmap_get_device(st->map), "fifo overflow reset\n"); goto flush_fifo; } /* compute and process only all complete datum */ nb = fifo_count / bytes_per_datum; fifo_count = nb * bytes_per_datum; if (nb == 0) goto end_session; /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp); inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0); /* clear internal data buffer for avoiding kernel data leak */ memset(data, 0, sizeof(data)); /* read all data once and process every samples */ result = regmap_noinc_read(st->map, st->reg->fifo_r_w, st->data, fifo_count); if (result) goto flush_fifo; for (i = 0; i < nb; ++i) { /* skip first samples if needed */ if (st->skip_samples) { st->skip_samples--; continue; } memcpy(data, &st->data[i * bytes_per_datum], bytes_per_datum); timestamp = inv_sensors_timestamp_pop(&st->timestamp); iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp); } end_session: mutex_unlock(&st->lock); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; flush_fifo: /* Flush HW and SW FIFOs. */ inv_reset_fifo(indio_dev); mutex_unlock(&st->lock); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
/* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "mad_priv.h" #include "mad_rmpp.h" enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE }; struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct delayed_work timeout_work; struct delayed_work cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; refcount_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; int repwin; __be64 tid; u32 src_qp; u32 slid; u8 mgmt_class; u8 class_version; u8 method; u8 base_version; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { if (refcount_dec_and_test(&rmpp_recv->refcount)) complete(&rmpp_recv->comp); } static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE); kfree(rmpp_recv); } void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) { struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } spin_unlock_irqrestore(&agent->lock, flags); flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); destroy_rmpp_recv(rmpp_recv); } } static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *ack = msg->mad; unsigned long flags; memcpy(ack, &data->mad_hdr, msg->hdr_len); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags); } static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; int ret, hdr_len; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) return; format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg); } static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_ah *ah; int hdr_len; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); else { msg->ah = ah; msg->context[0] = ah; } return msg; } static void ack_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } static void recv_timeout_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, timeout_work.work); struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } static void recv_cleanup_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, cleanup_work.work); unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv); } static struct mad_rmpp_recv * create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; refcount_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; rmpp_recv->base_version = mad_hdr->base_version; return rmpp_recv; error: kfree(rmpp_recv); return NULL; } static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL; } static struct mad_rmpp_recv * acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) refcount_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv; } static struct mad_rmpp_recv * insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv) { struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv; } static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; } static inline int get_seg_num(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list); } static inline int window_size(struct ib_mad_agent_private *agent) { return max(agent->qp_info->recv_queue.max_active >> 3, 1); } static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list, int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL; } static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf) { struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); } } static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, rmpp_recv->agent->qp_info->port_priv->port_num); rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) { data_size = sizeof(struct opa_rmpp_mad) - hdr_size; pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > OPA_MGMT_RMPP_DATA || pad < 0) pad = 0; } else { data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; } return hdr_size + rmpp_recv->seg_num * data_size - pad; } static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc; } static struct ib_mad_recv_wc * continue_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_recv_buf *prev_buf; struct ib_mad_recv_wc *done_wc; int seg_num; unsigned long flags; rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) goto drop1; seg_num = get_seg_num(&mad_recv_wc->recv_buf); spin_lock_irqsave(&rmpp_recv->lock, flags); if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || (seg_num > rmpp_recv->newwin)) goto drop3; if ((seg_num <= rmpp_recv->last_ack) || (rmpp_recv->state == RMPP_STATE_COMPLETE)) { spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto drop2; } prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); if (!prev_buf) goto drop3; done_wc = NULL; list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); if (rmpp_recv->cur_seg_buf == prev_buf) { update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); if (get_last_flag(rmpp_recv->cur_seg_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&rmpp_recv->lock, flags); done_wc = complete_rmpp(rmpp_recv); goto out; } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { rmpp_recv->newwin += window_size(agent); spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto out; } } spin_unlock_irqrestore(&rmpp_recv->lock, flags); out: deref_rmpp_recv(rmpp_recv); return done_wc; drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); drop2: deref_rmpp_recv(rmpp_recv); drop1: ib_free_recv_mad(mad_recv_wc); return NULL; } static struct ib_mad_recv_wc * start_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) { ib_free_recv_mad(mad_recv_wc); return NULL; } spin_lock_irqsave(&agent->lock, flags); if (insert_rmpp_recv(agent, rmpp_recv)) { spin_unlock_irqrestore(&agent->lock, flags); /* duplicate first MAD */ destroy_rmpp_recv(rmpp_recv); return continue_rmpp(agent, mad_recv_wc); } refcount_inc(&rmpp_recv->refcount); if (get_last_flag(&mad_recv_wc->recv_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&agent->lock, flags); complete_rmpp(rmpp_recv); } else { spin_unlock_irqrestore(&agent->lock, flags); /* 40 seconds until we can find the packet lifetimes */ queue_delayed_work(agent->qp_info->port_priv->wq, &rmpp_recv->timeout_work, msecs_to_jiffies(40000)); rmpp_recv->newwin += window_size(agent); ack_recv(rmpp_recv, mad_recv_wc); mad_recv_wc = NULL; } deref_rmpp_recv(rmpp_recv); return mad_recv_wc; } static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int timeout; u32 paylen = 0; rmpp_mad = mad_send_wr->send_buf.mad; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; paylen = (mad_send_wr->send_buf.seg_count * mad_send_wr->send_buf.seg_rmpp_size) - mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); /* 2 seconds for an ACK until we can find the packet lifetime */ timeout = mad_send_wr->send_buf.timeout_ms; if (!timeout || timeout > 2000) mad_send_wr->timeout = msecs_to_jiffies(2000); return ib_send_mad(mad_send_wr); } static void abort_send(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) goto out; /* Unmatched send */ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_REM_ABORT_ERR; wc.vendor_err = rmpp_status; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; out: spin_unlock_irqrestore(&agent->lock, flags); } static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, int seg_num) { struct list_head *list; wr->last_ack = seg_num; list = &wr->last_ack_seg->list; list_for_each_entry(wr->last_ack_seg, list, list) if (wr->last_ack_seg->num == seg_num) break; } static void process_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, int newwin) { struct mad_rmpp_recv *rmpp_recv; rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) rmpp_recv->repwin = newwin; } static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_rmpp_mad *rmpp_mad; unsigned long flags; int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) { if (!seg_num) process_ds_ack(agent, mad_recv_wc, newwin); goto out; /* Unmatched or DS RMPP ACK */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && (mad_send_wr->timeout)) { spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; /* Repeated ACK for DS RMPP transaction */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ if (seg_num > mad_send_wr->last_ack) { adjust_last_ack(mad_send_wr, seg_num); mad_send_wr->retries_left = mad_send_wr->max_retries; } mad_send_wr->newwin = newwin; if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { /* If no response is expected, the ACK completes the send */ if (!mad_send_wr->send_buf.timeout_ms) { struct ib_mad_send_wc wc; ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_SUCCESS; wc.vendor_err = 0; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; } if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { /* Send failure will just result in a timeout/retry */ ret = send_next_seg(mad_send_wr); if (ret) goto out; mad_send_wr->refcount++; list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } out: spin_unlock_irqrestore(&agent->lock, flags); } static struct ib_mad_recv_wc * process_rmpp_data(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_hdr *rmpp_hdr; u8 rmpp_status; rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; if (rmpp_hdr->rmpp_status) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; goto bad; } if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return start_rmpp(agent, mad_recv_wc); } else { if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return continue_rmpp(agent, mad_recv_wc); } bad: nack_recv(agent, mad_recv_wc, rmpp_status); ib_free_recv_mad(mad_recv_wc); return NULL; } static void process_rmpp_stop(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: process_rmpp_stop(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_ABORT: process_rmpp_abort(agent, mad_recv_wc); break; default: abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: ib_free_recv_mad(mad_recv_wc); return NULL; } static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; struct mad_rmpp_recv *rmpp_recv; struct rdma_ah_attr ah_attr; unsigned long flags; int newwin = 1; if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) goto out; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid != mad_hdr->tid || rmpp_recv->mgmt_class != mad_hdr->mgmt_class || rmpp_recv->class_version != mad_hdr->class_version || (rmpp_recv->method & IB_MGMT_METHOD_RESP)) continue; if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) continue; if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) { newwin = rmpp_recv->repwin; break; } } spin_unlock_irqrestore(&agent->lock, flags); out: return newwin; } int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { mad_send_wr->seg_num = 1; return IB_RMPP_RESULT_INTERNAL; } mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); ret = send_next_seg(mad_send_wr); if (!ret) return IB_RMPP_RESULT_CONSUMED; return ret; } int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ if (mad_send_wc->status != IB_WC_SUCCESS || mad_send_wr->status != IB_WC_SUCCESS) return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ if (!mad_send_wr->timeout) return IB_RMPP_RESULT_PROCESSED; /* Response received */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); return IB_RMPP_RESULT_PROCESSED; /* Send done */ } if (mad_send_wr->seg_num == mad_send_wr->newwin || mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ ret = send_next_seg(mad_send_wr); if (ret) { mad_send_wc->status = IB_WC_GENERAL_ERR; return IB_RMPP_RESULT_PROCESSED; } return IB_RMPP_RESULT_CONSUMED; } int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; mad_send_wr->seg_num = mad_send_wr->last_ack; mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; ret = send_next_seg(mad_send_wr); if (ret) return IB_RMPP_RESULT_PROCESSED; return IB_RMPP_RESULT_CONSUMED; }
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2015 Broadcom Corporation * */ #include <linux/clk.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/platform_data/bcm7038_wdt.h> #include <linux/pm.h> #include <linux/watchdog.h> #define WDT_START_1 0xff00 #define WDT_START_2 0x00ff #define WDT_STOP_1 0xee00 #define WDT_STOP_2 0x00ee #define WDT_TIMEOUT_REG 0x0 #define WDT_CMD_REG 0x4 #define WDT_MIN_TIMEOUT 1 /* seconds */ #define WDT_DEFAULT_TIMEOUT 30 /* seconds */ #define WDT_DEFAULT_RATE 27000000 struct bcm7038_watchdog { void __iomem *base; struct watchdog_device wdd; u32 rate; struct clk *clk; }; static bool nowayout = WATCHDOG_NOWAYOUT; static inline void bcm7038_wdt_write(u32 value, void __iomem *addr) { /* MIPS chips strapped for BE will automagically configure the * peripheral registers for CPU-native byte order. */ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, addr); else writel_relaxed(value, addr); } static inline u32 bcm7038_wdt_read(void __iomem *addr) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(addr); else return readl_relaxed(addr); } static void bcm7038_wdt_set_timeout_reg(struct watchdog_device *wdog) { struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog); u32 timeout; timeout = wdt->rate * wdog->timeout; bcm7038_wdt_write(timeout, wdt->base + WDT_TIMEOUT_REG); } static int bcm7038_wdt_ping(struct watchdog_device *wdog) { struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog); bcm7038_wdt_write(WDT_START_1, wdt->base + WDT_CMD_REG); bcm7038_wdt_write(WDT_START_2, wdt->base + WDT_CMD_REG); return 0; } static int bcm7038_wdt_start(struct watchdog_device *wdog) { bcm7038_wdt_set_timeout_reg(wdog); bcm7038_wdt_ping(wdog); return 0; } static int bcm7038_wdt_stop(struct watchdog_device *wdog) { struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog); bcm7038_wdt_write(WDT_STOP_1, wdt->base + WDT_CMD_REG); bcm7038_wdt_write(WDT_STOP_2, wdt->base + WDT_CMD_REG); return 0; } static int bcm7038_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t) { /* Can't modify timeout value if watchdog timer is running */ bcm7038_wdt_stop(wdog); wdog->timeout = t; bcm7038_wdt_start(wdog); return 0; } static unsigned int bcm7038_wdt_get_timeleft(struct watchdog_device *wdog) { struct bcm7038_watchdog *wdt = watchdog_get_drvdata(wdog); u32 time_left; time_left = bcm7038_wdt_read(wdt->base + WDT_CMD_REG); return time_left / wdt->rate; } static const struct watchdog_info bcm7038_wdt_info = { .identity = "Broadcom BCM7038 Watchdog Timer", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE }; static const struct watchdog_ops bcm7038_wdt_ops = { .owner = THIS_MODULE, .start = bcm7038_wdt_start, .stop = bcm7038_wdt_stop, .set_timeout = bcm7038_wdt_set_timeout, .get_timeleft = bcm7038_wdt_get_timeleft, }; static int bcm7038_wdt_probe(struct platform_device *pdev) { struct bcm7038_wdt_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct bcm7038_watchdog *wdt; const char *clk_name = NULL; int err; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; platform_set_drvdata(pdev, wdt); wdt->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(wdt->base)) return PTR_ERR(wdt->base); if (pdata && pdata->clk_name) clk_name = pdata->clk_name; wdt->clk = devm_clk_get_enabled(dev, clk_name); /* If unable to get clock, use default frequency */ if (!IS_ERR(wdt->clk)) { wdt->rate = clk_get_rate(wdt->clk); /* Prevent divide-by-zero exception */ if (!wdt->rate) wdt->rate = WDT_DEFAULT_RATE; } else { wdt->rate = WDT_DEFAULT_RATE; wdt->clk = NULL; } wdt->wdd.info = &bcm7038_wdt_info; wdt->wdd.ops = &bcm7038_wdt_ops; wdt->wdd.min_timeout = WDT_MIN_TIMEOUT; wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT; wdt->wdd.max_timeout = 0xffffffff / wdt->rate; wdt->wdd.parent = dev; watchdog_set_drvdata(&wdt->wdd, wdt); watchdog_stop_on_reboot(&wdt->wdd); watchdog_stop_on_unregister(&wdt->wdd); err = devm_watchdog_register_device(dev, &wdt->wdd); if (err) return err; dev_info(dev, "Registered BCM7038 Watchdog\n"); return 0; } static int bcm7038_wdt_suspend(struct device *dev) { struct bcm7038_watchdog *wdt = dev_get_drvdata(dev); if (watchdog_active(&wdt->wdd)) return bcm7038_wdt_stop(&wdt->wdd); return 0; } static int bcm7038_wdt_resume(struct device *dev) { struct bcm7038_watchdog *wdt = dev_get_drvdata(dev); if (watchdog_active(&wdt->wdd)) return bcm7038_wdt_start(&wdt->wdd); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend, bcm7038_wdt_resume); static const struct of_device_id bcm7038_wdt_match[] = { { .compatible = "brcm,bcm6345-wdt" }, { .compatible = "brcm,bcm7038-wdt" }, {}, }; MODULE_DEVICE_TABLE(of, bcm7038_wdt_match); static const struct platform_device_id bcm7038_wdt_devtype[] = { { .name = "bcm63xx-wdt" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, bcm7038_wdt_devtype); static struct platform_driver bcm7038_wdt_driver = { .probe = bcm7038_wdt_probe, .id_table = bcm7038_wdt_devtype, .driver = { .name = "bcm7038-wdt", .of_match_table = bcm7038_wdt_match, .pm = pm_sleep_ptr(&bcm7038_wdt_pm_ops), } }; module_platform_driver(bcm7038_wdt_driver); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for Broadcom 7038 SoCs Watchdog"); MODULE_AUTHOR("Justin Chen");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H #define _LINUX_BYTEORDER_LITTLE_ENDIAN_H #include <uapi/linux/byteorder/little_endian.h> #ifdef CONFIG_CPU_BIG_ENDIAN #warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set #endif #include <linux/byteorder/generic.h> #endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. */ #ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H #define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H /* GPU_CC clock registers */ #define GPU_CC_AHB_CLK 0 #define GPU_CC_CRC_AHB_CLK 1 #define GPU_CC_CX_APB_CLK 2 #define GPU_CC_CX_GMU_CLK 3 #define GPU_CC_CX_SNOC_DVM_CLK 4 #define GPU_CC_CXO_AON_CLK 5 #define GPU_CC_CXO_CLK 6 #define GPU_CC_GMU_CLK_SRC 7 #define GPU_CC_GX_GMU_CLK 8 #define GPU_CC_PLL1 9 /* GPU_CC Resets */ #define GPUCC_GPU_CC_CX_BCR 0 #define GPUCC_GPU_CC_GFX3D_AON_BCR 1 #define GPUCC_GPU_CC_GMU_BCR 2 #define GPUCC_GPU_CC_GX_BCR 3 #define GPUCC_GPU_CC_SPDM_BCR 4 #define GPUCC_GPU_CC_XO_BCR 5 /* GPU_CC GDSCRs */ #define GPU_CX_GDSC 0 #define GPU_GX_GDSC 1 #endif
/* SPDX-License-Identifier: GPL-2.0 * * mt8186-audsys-clkid.h -- Mediatek 8186 audsys clock id definition * * Copyright (c) 2022 MediaTek Inc. * Author: Jiaxin Yu <[email protected]> */ #ifndef _MT8186_AUDSYS_CLKID_H_ #define _MT8186_AUDSYS_CLKID_H_ enum{ CLK_AUD_AFE, CLK_AUD_22M, CLK_AUD_24M, CLK_AUD_APLL2_TUNER, CLK_AUD_APLL_TUNER, CLK_AUD_TDM, CLK_AUD_ADC, CLK_AUD_DAC, CLK_AUD_DAC_PREDIS, CLK_AUD_TML, CLK_AUD_NLE, CLK_AUD_I2S1_BCLK, CLK_AUD_I2S2_BCLK, CLK_AUD_I2S3_BCLK, CLK_AUD_I2S4_BCLK, CLK_AUD_CONNSYS_I2S_ASRC, CLK_AUD_GENERAL1_ASRC, CLK_AUD_GENERAL2_ASRC, CLK_AUD_DAC_HIRES, CLK_AUD_ADC_HIRES, CLK_AUD_ADC_HIRES_TML, CLK_AUD_ADDA6_ADC, CLK_AUD_ADDA6_ADC_HIRES, CLK_AUD_3RD_DAC, CLK_AUD_3RD_DAC_PREDIS, CLK_AUD_3RD_DAC_TML, CLK_AUD_3RD_DAC_HIRES, CLK_AUD_ETDM_IN1_BCLK, CLK_AUD_ETDM_OUT1_BCLK, CLK_AUD_NR_CLK, }; #endif
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/bitfield.h> #include "fm10k_pf.h" #include "fm10k_vf.h" /** * fm10k_reset_hw_pf - PF hardware reset * @hw: pointer to hardware structure * * This function should return the hardware to a state similar to the * one it is in after being powered on. **/ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) { s32 err; u32 reg; u16 i; /* Disable interrupts */ fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL)); /* Lock ITR2 reg 0 into itself and disable interrupt moderation */ fm10k_write_reg(hw, FM10K_ITR2(0), 0); fm10k_write_reg(hw, FM10K_INT_CTRL, 0); /* We assume here Tx and Rx queue 0 are owned by the PF */ /* Shut off VF access to their queues forcing them to queue 0 */ for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) { fm10k_write_reg(hw, FM10K_TQMAP(i), 0); fm10k_write_reg(hw, FM10K_RQMAP(i), 0); } /* shut down all rings */ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); if (err == FM10K_ERR_REQUESTS_PENDING) { hw->mac.reset_while_pending++; goto force_reset; } else if (err) { return err; } /* Verify that DMA is no longer active */ reg = fm10k_read_reg(hw, FM10K_DMA_CTRL); if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) return FM10K_ERR_DMA_PENDING; force_reset: /* Inititate data path reset */ reg = FM10K_DMA_CTRL_DATAPATH_RESET; fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); /* Flush write and allow 100us for reset to complete */ fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) return FM10K_ERR_RESET_FAILED; return 0; } /** * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support * @hw: pointer to hardware structure * * Looks at the ARI hierarchy bit to determine whether ARI is supported or not. **/ static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw) { u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL); return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI); } /** * fm10k_init_hw_pf - PF hardware initialization * @hw: pointer to hardware structure * **/ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) { u32 dma_ctrl, txqctl; u16 i; /* Establish default VSI as valid */ fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0); fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default), FM10K_DGLORTMAP_ANY); /* Invalidate all other GLORT entries */ for (i = 1; i < FM10K_DGLORT_COUNT; i++) fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); /* reset ITR2(0) to point to itself */ fm10k_write_reg(hw, FM10K_ITR2(0), 0); /* reset VF ITR2(0) to point to 0 avoid PF registers */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0); /* loop through all PF ITR2 registers pointing them to the previous */ for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++) fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); /* Enable interrupt moderator if not already enabled */ fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); /* compute the default txqctl configuration */ txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT); for (i = 0; i < FM10K_MAX_QUEUES; i++) { /* configure rings for 256 Queue / 32 Descriptor cache mode */ fm10k_write_reg(hw, FM10K_TQDLOC(i), (i * FM10K_TQDLOC_BASE_32_DESC) | FM10K_TQDLOC_SIZE_32_DESC); fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); /* configure rings to provide TPH processing hints */ fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i), FM10K_TPH_TXCTRL_DESC_TPHEN | FM10K_TPH_TXCTRL_DESC_RROEN | FM10K_TPH_TXCTRL_DESC_WROEN | FM10K_TPH_TXCTRL_DATA_RROEN); fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i), FM10K_TPH_RXCTRL_DESC_TPHEN | FM10K_TPH_RXCTRL_DESC_RROEN | FM10K_TPH_RXCTRL_DATA_WROEN | FM10K_TPH_RXCTRL_HDR_WROEN); } /* set max hold interval to align with 1.024 usec in all modes and * store ITR scale */ switch (hw->bus.speed) { case fm10k_bus_speed_2500: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; break; case fm10k_bus_speed_5000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; break; case fm10k_bus_speed_8000: dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; default: dma_ctrl = 0; /* just in case, assume Gen3 ITR scale */ hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; break; } /* Configure TSO flags */ fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW); fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI); /* Enable DMA engine * Set Rx Descriptor size to 32 * Set Minimum MSS to 64 * Set Maximum number of Rx queues to 256 / 32 Descriptor */ dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE | FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 | FM10K_DMA_CTRL_32_DESC; fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl); /* record maximum queue count, we limit ourselves to 128 */ hw->mac.max_queues = FM10K_MAX_QUEUES_PF; /* We support either 64 VFs or 7 VFs depending on if we have ARI */ hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; return 0; } /** * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table * @hw: pointer to hardware structure * @vid: VLAN ID to add to table * @vsi: Index indicating VF ID or PF ID in table * @set: Indicates if this is a set or clear operation * * This function adds or removes the corresponding VLAN ID from the VLAN * filter table for the corresponding function. In addition to the * standard set/clear that supports one bit a multi-bit write is * supported to set 64 bits at a time. **/ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) { u32 vlan_table, reg, mask, bit, len; /* verify the VSI index is valid */ if (vsi > FM10K_VLAN_TABLE_VSI_MAX) return FM10K_ERR_PARAM; /* VLAN multi-bit write: * The multi-bit write has several parts to it. * 24 16 8 0 * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | RSVD0 | Length |C|RSVD0| VLAN ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * VLAN ID: Vlan Starting value * RSVD0: Reserved section, must be 0 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message) * Length: Number of times to repeat the bit being set */ len = vid >> 16; vid = (vid << 17) >> 17; /* verify the reserved 0 fields are 0 */ if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* Loop through the table updating all required VLANs */ for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32; len < FM10K_VLAN_TABLE_VID_MAX; len -= 32 - bit, reg++, bit = 0) { /* record the initial state of the register */ vlan_table = fm10k_read_reg(hw, reg); /* truncate mask if we are at the start or end of the run */ mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit; /* make necessary modifications to the register */ mask &= set ? ~vlan_table : vlan_table; if (mask) fm10k_write_reg(hw, reg, vlan_table ^ mask); } return 0; } /** * fm10k_read_mac_addr_pf - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the SM_AREA and stores the value. **/ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) { u8 perm_addr[ETH_ALEN]; u32 serial_num; serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); /* last byte should be all 1's */ if ((~serial_num) << 24) return FM10K_ERR_INVALID_MAC_ADDR; perm_addr[0] = (u8)(serial_num >> 24); perm_addr[1] = (u8)(serial_num >> 16); perm_addr[2] = (u8)(serial_num >> 8); serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0)); /* first byte should be all 1's */ if ((~serial_num) >> 24) return FM10K_ERR_INVALID_MAC_ADDR; perm_addr[3] = (u8)(serial_num >> 16); perm_addr[4] = (u8)(serial_num >> 8); perm_addr[5] = (u8)(serial_num); ether_addr_copy(hw->mac.perm_addr, perm_addr); ether_addr_copy(hw->mac.addr, perm_addr); return 0; } /** * fm10k_glort_valid_pf - Validate that the provided glort is valid * @hw: pointer to the HW structure * @glort: base glort to be validated * * This function will return an error if the provided glort is invalid **/ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) { glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT; return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE); } /** * fm10k_update_xc_addr_pf - Update device addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table * @vid: VLAN ID to add/remove from table * @add: Indicates if this is an add or remove operation * @flags: flags field to indicate add and secure * * This function generates a message to the Switch API requesting * that the given logical port add/remove the given L2 MAC/VLAN address. **/ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, const u8 *mac, u16 vid, bool add, u8 flags) { struct fm10k_mbx_info *mbx = &hw->mbx; struct fm10k_mac_update mac_update; u32 msg[5]; /* clear set bit from VLAN ID */ vid &= ~FM10K_VLAN_CLEAR; /* if glort or VLAN are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* record fields */ mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | ((u32)mac[3] << 16) | ((u32)mac[4] << 8) | ((u32)mac[5])); mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) | ((u16)mac[1])); mac_update.vlan = cpu_to_le16(vid); mac_update.glort = cpu_to_le16(glort); mac_update.action = add ? 0 : 1; mac_update.flags = flags; /* populate mac_update fields */ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE); fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE, &mac_update, sizeof(mac_update)); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_update_uc_addr_pf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table * @vid: VLAN ID to add/remove from table * @add: Indicates if this is an add or remove operation * @flags: flags field to indicate add and secure * * This function is used to add or remove unicast addresses for * the PF. **/ static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort, const u8 *mac, u16 vid, bool add, u8 flags) { /* verify MAC address is valid */ if (!is_valid_ether_addr(mac)) return FM10K_ERR_PARAM; return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags); } /** * fm10k_update_mc_addr_pf - Update device multicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table * @vid: VLAN ID to add/remove from table * @add: Indicates if this is an add or remove operation * * This function is used to add or remove multicast MAC addresses for * the PF. **/ static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort, const u8 *mac, u16 vid, bool add) { /* verify multicast address is valid */ if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0); } /** * fm10k_update_xcast_mode_pf - Request update of multicast mode * @hw: pointer to hardware structure * @glort: base resource tag for this request * @mode: integer value indicating mode being requested * * This function will attempt to request a higher mode for the port * so that it can enable either multicast, multicast promiscuous, or * promiscuous mode of operation. **/ static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3], xcast_mode; if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; /* write xcast mode as a single u32 value, * lower 16 bits: glort * upper 16 bits: mode */ xcast_mode = ((u32)mode << 16) | glort; /* generate message requesting to change xcast mode */ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES); fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_update_int_moderator_pf - Update interrupt moderator linked list * @hw: pointer to hardware structure * * This function walks through the MSI-X vector table to determine the * number of active interrupts and based on that information updates the * interrupt moderator linked list. **/ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) { u32 i; /* Disable interrupt moderator */ fm10k_write_reg(hw, FM10K_INT_CTRL, 0); /* loop through PF from last to first looking enabled vectors */ for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) { if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) break; } /* always reset VFITR2[0] to point to last enabled PF vector */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); /* reset ITR2[0] to point to last enabled PF vector */ if (!hw->iov.num_vfs) fm10k_write_reg(hw, FM10K_ITR2(0), i); /* Enable interrupt moderator */ fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); } /** * fm10k_update_lport_state_pf - Notify the switch of a change in port state * @hw: pointer to the HW structure * @glort: base resource tag for this request * @count: number of logical ports being updated * @enable: boolean value indicating enable or disable * * This function is used to add/remove a logical port from the switch. **/ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, u16 count, bool enable) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3], lport_msg; /* do nothing if we are being asked to create or destroy 0 ports */ if (!count) return 0; /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; /* reset multicast mode if deleting lport */ if (!enable) fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE); /* construct the lport message from the 2 pieces of data we have */ lport_msg = ((u32)count << 16) | glort; /* generate lport create/delete message */ fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE : FM10K_PF_MSG_ID_LPORT_DELETE); fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues * @hw: pointer to hardware structure * @dglort: pointer to dglort configuration structure * * Reads the configuration structure contained in dglort_cfg and uses * that information to then populate a DGLORTMAP/DEC entry and the queues * to which it has been assigned. **/ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, struct fm10k_dglort_cfg *dglort) { u16 glort, queue_count, vsi_count, pc_count; u16 vsi, queue, pc, q_idx; u32 txqctl, dglortdec, dglortmap; /* verify the dglort pointer */ if (!dglort) return FM10K_ERR_PARAM; /* verify the dglort values */ if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) || (dglort->vsi_l > 6) || (dglort->vsi_b > 64) || (dglort->queue_l > 8) || (dglort->queue_b >= 256)) return FM10K_ERR_PARAM; /* determine count of VSIs and queues */ queue_count = BIT(dglort->rss_l + dglort->pc_l); vsi_count = BIT(dglort->vsi_l + dglort->queue_l); glort = dglort->glort; q_idx = dglort->queue_b; /* configure SGLORT for queues */ for (vsi = 0; vsi < vsi_count; vsi++, glort++) { for (queue = 0; queue < queue_count; queue++, q_idx++) { if (q_idx >= FM10K_MAX_QUEUES) break; fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); } } /* determine count of PCs and queues */ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); pc_count = BIT(dglort->pc_l); /* configure PC for Tx queues */ for (pc = 0; pc < pc_count; pc++) { q_idx = pc + dglort->queue_b; for (queue = 0; queue < queue_count; queue++) { if (q_idx >= FM10K_MAX_QUEUES) break; txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); txqctl &= ~FM10K_TXQCTL_PC_MASK; txqctl |= pc << FM10K_TXQCTL_PC_SHIFT; fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); q_idx += pc_count; } } /* configure DGLORTDEC */ dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) | ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) | ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) | ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) | ((u32)(dglort->queue_l)); if (dglort->inner_rss) dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE; /* configure DGLORTMAP */ dglortmap = (dglort->idx == fm10k_dglort_default) ? FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO; dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l; dglortmap |= dglort->glort; /* write values to hardware */ fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec); fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap); return 0; } u16 fm10k_queues_per_pool(struct fm10k_hw *hw) { u16 num_pools = hw->iov.num_pools; return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ? 8 : FM10K_MAX_QUEUES_POOL; } u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx) { u16 num_vfs = hw->iov.num_vfs; u16 vf_q_idx = FM10K_MAX_QUEUES; vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); return vf_q_idx; } static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw) { u16 num_pools = hw->iov.num_pools; return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 : FM10K_MAX_VECTORS_POOL; } static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx) { u16 vf_v_idx = FM10K_MAX_VECTORS_PF; vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx; return vf_v_idx; } /** * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization * @hw: pointer to the HW structure * @num_vfs: number of VFs to be allocated * @num_pools: number of virtualization pools to be allocated * * Allocates queues and traffic classes to virtualization entities to prepare * the PF for SR-IOV and VMDq **/ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, u16 num_pools) { u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx; u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT; int i, j; /* hardware only supports up to 64 pools */ if (num_pools > 64) return FM10K_ERR_PARAM; /* the number of VFs cannot exceed the number of pools */ if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) return FM10K_ERR_PARAM; /* record number of virtualization entities */ hw->iov.num_vfs = num_vfs; hw->iov.num_pools = num_pools; /* determine qmap offsets and counts */ qmap_stride = (num_vfs > 8) ? 32 : 256; qpp = fm10k_queues_per_pool(hw); vpp = fm10k_vectors_per_pool(hw); /* calculate starting index for queues */ vf_q_idx = fm10k_vf_queue_index(hw, 0); qmap_idx = 0; /* establish TCs with -1 credits and no quanta to prevent transmit */ for (i = 0; i < num_vfs; i++) { fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0); fm10k_write_reg(hw, FM10K_TC_RATE(i), 0); fm10k_write_reg(hw, FM10K_TC_CREDIT(i), FM10K_TC_CREDIT_CREDIT_MASK); } /* zero out all mbmem registers */ for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) fm10k_write_reg(hw, FM10K_MBMEM(i), 0); /* clear event notification of VF FLR */ fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0); fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0); /* loop through unallocated rings assigning them back to PF */ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | vid); fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); } /* PF should have already updated VFITR2[0] */ /* update all ITR registers to flow to VFITR2[0] */ for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) { if (!(i & (vpp - 1))) fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp); else fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); } /* update PF ITR2[0] to reference the last vector */ fm10k_write_reg(hw, FM10K_ITR2(0), fm10k_vf_vector_index(hw, num_vfs - 1)); /* loop through rings populating rings and TCs */ for (i = 0; i < num_vfs; i++) { /* record index for VF queue 0 for use in end of loop */ vf_q_idx0 = vf_q_idx; for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) { /* assign VF and locked TC to queues */ fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx), (i << FM10K_TXQCTL_TC_SHIFT) | i | FM10K_TXQCTL_VF | vid); fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx), FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | FM10K_RXDCTL_DROP_ON_EMPTY); fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), (i << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF); /* map queue pair to VF */ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx); } /* repeat the first ring for all of the remaining VF rings */ for (; j < qmap_stride; j++, qmap_idx++) { fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0); } } /* loop through remaining indexes assigning all to queue 0 */ while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) { fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0); qmap_idx++; } return 0; } /** * fm10k_iov_configure_tc_pf - Configure the shaping group for VF * @hw: pointer to the HW structure * @vf_idx: index of VF receiving GLORT * @rate: Rate indicated in Mb/s * * Configured the TC for a given VF to allow only up to a given number * of Mb/s of outgoing Tx throughput. **/ static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate) { /* configure defaults */ u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3; u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK; /* verify vf is in range */ if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; /* set interval to align with 4.096 usec in all modes */ switch (hw->bus.speed) { case fm10k_bus_speed_2500: interval = FM10K_TC_RATE_INTERVAL_4US_GEN1; break; case fm10k_bus_speed_5000: interval = FM10K_TC_RATE_INTERVAL_4US_GEN2; break; default: break; } if (rate) { if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN) return FM10K_ERR_PARAM; /* The quanta is measured in Bytes per 4.096 or 8.192 usec * The rate is provided in Mbits per second * To tralslate from rate to quanta we need to multiply the * rate by 8.192 usec and divide by 8 bits/byte. To avoid * dealing with floating point we can round the values up * to the nearest whole number ratio which gives us 128 / 125. */ tc_rate = (rate * 128) / 125; /* try to keep the rate limiting accurate by increasing * the number of credits and interval for rates less than 4Gb/s */ if (rate < 4000) interval <<= 1; else tc_rate >>= 1; } /* update rate limiter with new values */ fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval); fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); return 0; } /** * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list * @hw: pointer to the HW structure * @vf_idx: index of VF receiving GLORT * * Update the interrupt moderator linked list to include any MSI-X * interrupts which the VF has enabled in the MSI-X vector table. **/ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) { u16 vf_v_idx, vf_v_limit, i; /* verify vf is in range */ if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); /* search for first vector that is not masked */ for (i = vf_v_limit - 1; i > vf_v_idx; i--) { if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) break; } /* reset linked list so it now includes our active vectors */ if (vf_idx == (hw->iov.num_vfs - 1)) fm10k_write_reg(hw, FM10K_ITR2(0), i); else fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i); return 0; } /** * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF * @hw: pointer to the HW structure * @vf_info: pointer to VF information structure * * Assign a MAC address and default VLAN to a VF and notify it of the update **/ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, struct fm10k_vf_info *vf_info) { u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i; u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0; s32 err = 0; u16 vf_idx, vf_vid; /* verify vf is in range */ if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; /* determine qmap offsets and counts */ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; queues_per_pool = fm10k_queues_per_pool(hw); /* calculate starting index for queues */ vf_idx = vf_info->vf_idx; vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); qmap_idx = qmap_stride * vf_idx; /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is * used here to indicate to the VF that it will not have privilege to * write VLAN_TABLE. All policy is enforced on the PF but this allows * the VF to correctly report errors to userspace requests. */ if (vf_info->pf_vid) vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE; else vf_vid = vf_info->sw_vid; /* generate MAC_ADDR request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC, vf_info->mac, vf_vid); /* Configure Queue control register with new VLAN ID. The TXQCTL * register is RO from the VF, so the PF must do this even in the * case of notifying the VF of a new VID via the mailbox. */ txqctl = FIELD_PREP(FM10K_TXQCTL_VID_MASK, vf_vid); txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; for (i = 0; i < queues_per_pool; i++) fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); /* try loading a message onto outgoing mailbox first */ if (vf_info->mbx.ops.enqueue_tx) { err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); if (err != FM10K_MBX_ERR_NO_MBX) return err; err = 0; } /* If we aren't connected to a mailbox, this is most likely because * the VF driver is not running. It should thus be safe to re-map * queues and use the registers to pass the MAC address so that the VF * driver gets correct information during its initialization. */ /* MAP Tx queue back to 0 temporarily, and disable it */ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); /* verify ring has disabled before modifying base address registers */ txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) { /* limit ourselves to a 1ms timeout */ if (timeout == 10) { err = FM10K_ERR_DMA_PENDING; goto err_out; } usleep_range(100, 200); txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); } /* Update base address registers to contain MAC address */ if (is_valid_ether_addr(vf_info->mac)) { tdbal = (((u32)vf_info->mac[3]) << 24) | (((u32)vf_info->mac[4]) << 16) | (((u32)vf_info->mac[5]) << 8); tdbah = (((u32)0xFF) << 24) | (((u32)vf_info->mac[0]) << 16) | (((u32)vf_info->mac[1]) << 8) | ((u32)vf_info->mac[2]); } /* Record the base address into queue 0 */ fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); /* Provide the VF the ITR scale, using software-defined fields in TDLEN * to pass the information during VF initialization. See definition of * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. */ fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT); err_out: /* restore the queue back to VF ownership */ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); return err; } /** * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF * @hw: pointer to the HW structure * @vf_info: pointer to VF information structure * * Reassign the interrupts and queues to a VF following an FLR **/ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, struct fm10k_vf_info *vf_info) { u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx; u32 tdbal = 0, tdbah = 0, txqctl, rxqctl; u16 vf_v_idx, vf_v_limit, vf_vid; u8 vf_idx = vf_info->vf_idx; int i; /* verify vf is in range */ if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; /* clear event notification of VF FLR */ fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32)); /* force timeout and then disconnect the mailbox */ vf_info->mbx.timeout = 0; if (vf_info->mbx.ops.disconnect) vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); /* determine qmap offsets and counts */ qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; queues_per_pool = fm10k_queues_per_pool(hw); qmap_idx = qmap_stride * vf_idx; /* make all the queues inaccessible to the VF */ for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) { fm10k_write_reg(hw, FM10K_TQMAP(i), 0); fm10k_write_reg(hw, FM10K_RQMAP(i), 0); } /* calculate starting index for queues */ vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); /* determine correct default VLAN ID */ if (vf_info->pf_vid) vf_vid = vf_info->pf_vid; else vf_vid = vf_info->sw_vid; /* configure Queue control register */ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF; /* stop further DMA and reset queue ownership back to VF */ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); fm10k_write_reg(hw, FM10K_RXDCTL(i), FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | FM10K_RXDCTL_DROP_ON_EMPTY); fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl); } /* reset TC with -1 credits and no quanta to prevent transmit */ fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0); fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0); fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_CREDIT_CREDIT_MASK); /* update our first entry in the table based on previous VF */ if (!vf_idx) hw->mac.ops.update_int_moderator(hw); else hw->iov.ops.assign_int_moderator(hw, vf_idx - 1); /* reset linked list so it now includes our active vectors */ if (vf_idx == (hw->iov.num_vfs - 1)) fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx); else fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx); /* link remaining vectors so that next points to previous */ for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++) fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1); /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */ for (i = FM10K_VFMBMEM_LEN; i--;) fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0); for (i = FM10K_VLAN_TABLE_SIZE; i--;) fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0); for (i = FM10K_RETA_SIZE; i--;) fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0); for (i = FM10K_RSSRK_SIZE; i--;) fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0); fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0); /* Update base address registers to contain MAC address */ if (is_valid_ether_addr(vf_info->mac)) { tdbal = (((u32)vf_info->mac[3]) << 24) | (((u32)vf_info->mac[4]) << 16) | (((u32)vf_info->mac[5]) << 8); tdbah = (((u32)0xFF) << 24) | (((u32)vf_info->mac[0]) << 16) | (((u32)vf_info->mac[1]) << 8) | ((u32)vf_info->mac[2]); } /* map queue pairs back to VF from last to first */ for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an * explanation of how TDLEN is used. */ fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i), hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT); fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); } /* repeat the first ring for all the remaining VF rings */ for (i = queues_per_pool; i < qmap_stride; i++) { fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx); fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx); } return 0; } /** * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF * @hw: pointer to hardware structure * @vf_info: pointer to VF information structure * @lport_idx: Logical port offset from the hardware glort * @flags: Set of capability flags to extend port beyond basic functionality * * This function allows enabling a VF port by assigning it a GLORT and * setting the flags so that it can enable an Rx mode. **/ static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw, struct fm10k_vf_info *vf_info, u16 lport_idx, u8 flags) { u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE; /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE; vf_info->glort = glort; return 0; } /** * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF * @hw: pointer to hardware structure * @vf_info: pointer to VF information structure * * This function disables a VF port by stripping it of a GLORT and * setting the flags so that it cannot enable any Rx mode. **/ static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw, struct fm10k_vf_info *vf_info) { u32 msg[1]; /* need to disable the port if it is already enabled */ if (FM10K_VF_FLAG_ENABLED(vf_info)) { /* notify switch that this port has been disabled */ fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); /* generate port state response to notify VF it is not ready */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); } /* clear flags and glort if it exists */ vf_info->vf_flags = 0; vf_info->glort = 0; } /** * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs * @hw: pointer to hardware structure * @q: stats for all queues of a VF * @vf_idx: index of VF * * This function collects queue stats for VFs. **/ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, u16 vf_idx) { u32 idx, qpp; /* get stats for all of the queues */ qpp = fm10k_queues_per_pool(hw); idx = fm10k_vf_queue_index(hw, vf_idx); fm10k_update_hw_stats_q(hw, q, idx, qpp); } /** * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message * @mbx: Pointer to mailbox information structure * * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; u8 vf_idx = vf_info->vf_idx; return hw->iov.ops.assign_int_moderator(hw, vf_idx); } /** * fm10k_iov_select_vid - Select correct default VLAN ID * @vf_info: pointer to VF information structure * @vid: VLAN ID to correct * * Will report an error if the VLAN ID is out of range. For VID = 0, it will * return either the pf_vid or sw_vid depending on which one is set. */ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) { if (!vid) return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; else if (vf_info->pf_vid && vid != vf_info->pf_vid) return FM10K_ERR_PARAM; else return vid; } /** * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message * @mbx: Pointer to mailbox information structure * * This function is a default handler for MAC/VLAN requests from the VF. * The assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; u8 mac[ETH_ALEN]; u32 *result; int err = 0; bool set; u16 vlan; u32 vid; /* we shouldn't be updating rules on a disabled interface */ if (!FM10K_VF_FLAG_ENABLED(vf_info)) err = FM10K_ERR_PARAM; if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { result = results[FM10K_MAC_VLAN_MSG_VLAN]; /* record VLAN id requested */ err = fm10k_tlv_attr_get_u32(result, &vid); if (err) return err; set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; /* if the length field has been set, this is a multi-bit * update request. For multi-bit requests, simply disallow * them when the pf_vid has been set. In this case, the PF * should have already cleared the VLAN_TABLE, and if we * allowed them, it could allow a rogue VF to receive traffic * on a VLAN it was not assigned. In the single-bit case, we * need to modify requests for VLAN 0 to use the default PF or * SW vid when assigned. */ if (vid >> 16) { /* prevent multi-bit requests when PF has * administratively set the VLAN for this VF */ if (vf_info->pf_vid) return FM10K_ERR_PARAM; } else { err = fm10k_iov_select_vid(vf_info, (u16)vid); if (err < 0) return err; vid = err; } /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { result = results[FM10K_MAC_VLAN_MSG_MAC]; /* record unicast MAC address requested */ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); if (err) return err; /* block attempts to set MAC for a locked device */ if (is_valid_ether_addr(vf_info->mac) && !ether_addr_equal(mac, vf_info->mac)) return FM10K_ERR_PARAM; set = !(vlan & FM10K_VLAN_CLEAR); vlan &= ~FM10K_VLAN_CLEAR; err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; vlan = (u16)err; /* notify switch of request for new unicast address */ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, set, 0); } if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; /* record multicast MAC address requested */ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); if (err) return err; /* verify that the VF is allowed to request multicast */ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) return FM10K_ERR_PARAM; set = !(vlan & FM10K_VLAN_CLEAR); vlan &= ~FM10K_VLAN_CLEAR; err = fm10k_iov_select_vid(vf_info, vlan); if (err < 0) return err; vlan = (u16)err; /* notify switch of request for new multicast address */ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, set); } return err; } /** * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode * @vf_info: VF info structure containing capability flags * @mode: Requested xcast mode * * This function outputs the mode that most closely matches the requested * mode. If not modes match it will request we disable the port **/ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, u8 mode) { u8 vf_flags = vf_info->vf_flags; /* match up mode to capabilities as best as possible */ switch (mode) { case FM10K_XCAST_MODE_PROMISC: if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) return FM10K_XCAST_MODE_PROMISC; fallthrough; case FM10K_XCAST_MODE_ALLMULTI: if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) return FM10K_XCAST_MODE_ALLMULTI; fallthrough; case FM10K_XCAST_MODE_MULTI: if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) return FM10K_XCAST_MODE_MULTI; fallthrough; case FM10K_XCAST_MODE_NONE: if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) return FM10K_XCAST_MODE_NONE; fallthrough; default: break; } /* disable interface as it should not be able to request any */ return FM10K_XCAST_MODE_DISABLE; } /** * fm10k_iov_msg_lport_state_pf - Message handler for port state requests * @hw: Pointer to hardware structure * @results: Pointer array to message, results[0] is pointer to message * @mbx: Pointer to mailbox information structure * * This function is a default handler for port state requests. The port * state requests for now are basic and consist of enabling or disabling * the port. **/ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; s32 err = 0; u32 msg[2]; u8 mode = 0; /* verify VF is allowed to enable even minimal mode */ if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)) return FM10K_ERR_PARAM; if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; /* XCAST mode update requested */ err = fm10k_tlv_attr_get_u8(result, &mode); if (err) return FM10K_ERR_PARAM; /* prep for possible demotion depending on capabilities */ mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); /* if mode is not currently enabled, enable it */ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode))) fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); /* swap mode back to a bit flag */ mode = FM10K_VF_FLAG_SET_MODE(mode); } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) { /* need to disable the port if it is already enabled */ if (FM10K_VF_FLAG_ENABLED(vf_info)) err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); /* we need to clear VF_FLAG_ENABLED flags in order to ensure * that we actually re-enable the LPORT state below. Note that * this has no impact if the VF is already disabled, as the * flags are already cleared. */ if (!err) vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info); /* when enabling the port we should reset the rate limiters */ hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); /* set mode for minimal functionality */ mode = FM10K_VF_FLAG_SET_MODE_NONE; /* generate port state response to notify VF it is ready */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY); mbx->ops.enqueue_tx(hw, mbx, msg); } /* if enable state toggled note the update */ if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode)) err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, !!mode); /* if state change succeeded, then update our stored state */ mode |= FM10K_VF_FLAG_CAPABLE(vf_info); if (!err) vf_info->vf_flags = mode; return err; } /** * fm10k_update_hw_stats_pf - Updates hardware related statistics of PF * @hw: pointer to hardware structure * @stats: pointer to the stats structure to update * * This function collects and aggregates global and per queue hardware * statistics. **/ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; u32 id, id_prev; /* Use Tx queue 0 as a canary to detect a reset */ id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); /* Read Global Statistics */ do { timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT, &stats->timeout); ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur); ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca); um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um); xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, &stats->vlan_drop); loopback_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_LOOPBACK_DROP, &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); /* if value has not changed then we have consistent data */ id_prev = id; id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK); /* drop non-ID bits and set VALID ID bit */ id &= FM10K_TXQCTL_ID_MASK; id |= FM10K_STAT_VALID; /* Update Global Statistics */ if (stats->stats_idx == id) { stats->timeout.count += timeout; stats->ur.count += ur; stats->ca.count += ca; stats->um.count += um; stats->xec.count += xec; stats->vlan_drop.count += vlan_drop; stats->loopback_drop.count += loopback_drop; stats->nodesc_drop.count += nodesc_drop; } /* Update bases and record current PF id */ fm10k_update_hw_base_32b(&stats->timeout, timeout); fm10k_update_hw_base_32b(&stats->ur, ur); fm10k_update_hw_base_32b(&stats->ca, ca); fm10k_update_hw_base_32b(&stats->um, um); fm10k_update_hw_base_32b(&stats->xec, xec); fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop); fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop); fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop); stats->stats_idx = id; /* Update Queue Statistics */ fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); } /** * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF * @hw: pointer to hardware structure * @stats: pointer to the stats structure to update * * This function resets the base for global and per queue hardware * statistics. **/ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { /* Unbind Global Statistics */ fm10k_unbind_hw_stats_32b(&stats->timeout); fm10k_unbind_hw_stats_32b(&stats->ur); fm10k_unbind_hw_stats_32b(&stats->ca); fm10k_unbind_hw_stats_32b(&stats->um); fm10k_unbind_hw_stats_32b(&stats->xec); fm10k_unbind_hw_stats_32b(&stats->vlan_drop); fm10k_unbind_hw_stats_32b(&stats->loopback_drop); fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); /* Unbind Queue Statistics */ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); /* Reinitialize bases for all stats */ fm10k_update_hw_stats_pf(hw, stats); } /** * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system * @hw: pointer to hardware structure * @dma_mask: 64 bit DMA mask required for platform * * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order * to limit the access to memory beyond what is physically in the system. **/ static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask) { /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */ u32 phyaddr = (u32)(dma_mask >> 32); fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr); } /** * fm10k_get_fault_pf - Record a fault in one of the interface units * @hw: pointer to hardware structure * @type: pointer to fault type register offset * @fault: pointer to memory location to record the fault * * Record the fault register contents to the fault data structure and * clear the entry from the register. * * Returns ERR_PARAM if invalid register is specified or no error is present. **/ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, struct fm10k_fault *fault) { u32 func; /* verify the fault register is in range and is aligned */ switch (type) { case FM10K_PCA_FAULT: case FM10K_THI_FAULT: case FM10K_FUM_FAULT: break; default: return FM10K_ERR_PARAM; } /* only service faults that are valid */ func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC); if (!(func & FM10K_FAULT_FUNC_VALID)) return FM10K_ERR_PARAM; /* read remaining fields */ fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); fault->address <<= 32; fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); /* clear valid bit to allow for next error */ fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID); /* Record which function triggered the error */ if (func & FM10K_FAULT_FUNC_PF) fault->func = 0; else fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func); /* record fault type */ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; return 0; } /** * fm10k_request_lport_map_pf - Request LPORT map from the switch API * @hw: pointer to hardware structure * **/ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[1]; /* issue request asking for LPORT map */ fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_get_host_state_pf - Returns the state of the switch and mailbox * @hw: pointer to hardware structure * @switch_ready: pointer to boolean value that will record switch state * * This function will check the DMA_CTRL2 register and mailbox in order * to determine if the switch is ready for the PF to begin requesting * addresses and mapping traffic to the local interface. **/ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) { u32 dma_ctrl2; /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) return 0; /* retrieve generic host state info */ return fm10k_get_host_state_generic(hw, switch_ready); } /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, sizeof(struct fm10k_swapi_error)), FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), FM10K_TLV_ATTR_LAST }; /** * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM * @hw: Pointer to hardware structure * @results: pointer array containing parsed data * @mbx: Pointer to mailbox information structure * * This handler configures the lport mapping based on the reply from the * switch API. **/ s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info __always_unused *mbx) { u16 glort, mask; u32 dglort_map; s32 err; err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP], &dglort_map); if (err) return err; /* extract values out of the header */ glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT); mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK); /* verify mask is set and none of the masked bits in glort are set */ if (!mask || (glort & ~mask)) return FM10K_ERR_PARAM; /* verify the mask is contiguous, and that it is 1's followed by 0's */ if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE) return FM10K_ERR_PARAM; /* record the glort, mask, and port count */ hw->mac.dglort_map = dglort_map; return 0; } const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID), FM10K_TLV_ATTR_LAST }; /** * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM * @hw: Pointer to hardware structure * @results: pointer array containing parsed data * @mbx: Pointer to mailbox information structure * * This handler configures the default VLAN for the PF **/ static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info __always_unused *mbx) { u16 glort, pvid; u32 pvid_update; s32 err; err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], &pvid_update); if (err) return err; /* extract values from the pvid update */ glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); /* if glort is not valid return error */ if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; /* verify VLAN ID is valid */ if (pvid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* record the port VLAN ID value */ hw->mac.default_vid = pvid; return 0; } /** * fm10k_record_global_table_data - Move global table data to swapi table info * @from: pointer to source table data structure * @to: pointer to destination table info structure * * This function is will copy table_data to the table_info contained in * the hw struct. **/ static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, struct fm10k_swapi_table_info *to) { /* convert from le32 struct to CPU byte ordered values */ to->used = le32_to_cpu(from->used); to->avail = le32_to_cpu(from->avail); } const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, sizeof(struct fm10k_swapi_error)), FM10K_TLV_ATTR_LAST }; /** * fm10k_msg_err_pf - Message handler for error reply * @hw: Pointer to hardware structure * @results: pointer array containing parsed data * @mbx: Pointer to mailbox information structure * * This handler will capture the data for any error replies to previous * messages that the PF has sent. **/ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_error err_msg; s32 err; /* extract structure from message */ err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR], &err_msg, sizeof(err_msg)); if (err) return err; /* record table status */ fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac); fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop); fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu); /* record SW API status value */ hw->swapi.status = le32_to_cpu(err_msg.status); return 0; } static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; static const struct fm10k_mac_ops mac_ops_pf = { .get_bus_info = fm10k_get_bus_info_generic, .reset_hw = fm10k_reset_hw_pf, .init_hw = fm10k_init_hw_pf, .start_hw = fm10k_start_hw_generic, .stop_hw = fm10k_stop_hw_generic, .update_vlan = fm10k_update_vlan_pf, .read_mac_addr = fm10k_read_mac_addr_pf, .update_uc_addr = fm10k_update_uc_addr_pf, .update_mc_addr = fm10k_update_mc_addr_pf, .update_xcast_mode = fm10k_update_xcast_mode_pf, .update_int_moderator = fm10k_update_int_moderator_pf, .update_lport_state = fm10k_update_lport_state_pf, .update_hw_stats = fm10k_update_hw_stats_pf, .rebind_hw_stats = fm10k_rebind_hw_stats_pf, .configure_dglort_map = fm10k_configure_dglort_map_pf, .set_dma_mask = fm10k_set_dma_mask_pf, .get_fault = fm10k_get_fault_pf, .get_host_state = fm10k_get_host_state_pf, .request_lport_map = fm10k_request_lport_map_pf, }; static const struct fm10k_iov_ops iov_ops_pf = { .assign_resources = fm10k_iov_assign_resources_pf, .configure_tc = fm10k_iov_configure_tc_pf, .assign_int_moderator = fm10k_iov_assign_int_moderator_pf, .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, .reset_resources = fm10k_iov_reset_resources_pf, .set_lport = fm10k_iov_set_lport_pf, .reset_lport = fm10k_iov_reset_lport_pf, .update_stats = fm10k_iov_update_stats_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) { fm10k_get_invariants_generic(hw); return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); } const struct fm10k_info fm10k_pf_info = { .mac = fm10k_mac_pf, .get_invariants = fm10k_get_invariants_pf, .mac_ops = &mac_ops_pf, .iov_ops = &iov_ops_pf, };
// SPDX-License-Identifier: GPL-2.0 /* * arch/sh/boards/landisk/gio.c - driver for landisk * * This driver will also support the I-O DATA Device, Inc. LANDISK Board. * LANDISK and USL-5P Button, LED and GIO driver drive function. * * Copylight (C) 2006 kogiidena * Copylight (C) 2002 Atom Create Engineering Co., Ltd. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/cdev.h> #include <linux/fs.h> #include <asm/io.h> #include <linux/uaccess.h> #include <mach-landisk/mach/gio.h> #include <mach-landisk/mach/iodata_landisk.h> #define DEVCOUNT 4 #define GIO_MINOR 2 /* GIO minor no. */ static dev_t dev; static struct cdev *cdev_p; static int openCnt; static int gio_open(struct inode *inode, struct file *filp) { int minor = iminor(inode); int ret = -ENOENT; preempt_disable(); if (minor < DEVCOUNT) { if (openCnt > 0) { ret = -EALREADY; } else { openCnt++; ret = 0; } } preempt_enable(); return ret; } static int gio_close(struct inode *inode, struct file *filp) { int minor = iminor(inode); if (minor < DEVCOUNT) { openCnt--; } return 0; } static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int data; static unsigned int addr = 0; if (cmd & 0x01) { /* write */ if (copy_from_user(&data, (int *)arg, sizeof(int))) { return -EFAULT; } } switch (cmd) { case GIODRV_IOCSGIOSETADDR: /* address set */ addr = data; break; case GIODRV_IOCSGIODATA1: /* write byte */ __raw_writeb((unsigned char)(0x0ff & data), addr); break; case GIODRV_IOCSGIODATA2: /* write word */ if (addr & 0x01) { return -EFAULT; } __raw_writew((unsigned short int)(0x0ffff & data), addr); break; case GIODRV_IOCSGIODATA4: /* write long */ if (addr & 0x03) { return -EFAULT; } __raw_writel(data, addr); break; case GIODRV_IOCGGIODATA1: /* read byte */ data = __raw_readb(addr); break; case GIODRV_IOCGGIODATA2: /* read word */ if (addr & 0x01) { return -EFAULT; } data = __raw_readw(addr); break; case GIODRV_IOCGGIODATA4: /* read long */ if (addr & 0x03) { return -EFAULT; } data = __raw_readl(addr); break; default: return -EFAULT; break; } if ((cmd & 0x01) == 0) { /* read */ if (copy_to_user((int *)arg, &data, sizeof(int))) { return -EFAULT; } } return 0; } static const struct file_operations gio_fops = { .owner = THIS_MODULE, .open = gio_open, /* open */ .release = gio_close, /* release */ .unlocked_ioctl = gio_ioctl, .llseek = noop_llseek, }; static int __init gio_init(void) { int error; printk(KERN_INFO "gio: driver initialized\n"); openCnt = 0; if ((error = alloc_chrdev_region(&dev, 0, DEVCOUNT, "gio")) < 0) { printk(KERN_ERR "gio: Couldn't alloc_chrdev_region, error=%d\n", error); return 1; } cdev_p = cdev_alloc(); cdev_p->ops = &gio_fops; error = cdev_add(cdev_p, dev, DEVCOUNT); if (error) { printk(KERN_ERR "gio: Couldn't cdev_add, error=%d\n", error); return 1; } return 0; } static void __exit gio_exit(void) { cdev_del(cdev_p); unregister_chrdev_region(dev, DEVCOUNT); } module_init(gio_init); module_exit(gio_exit); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * A sample program to run a User VM on the ACRN hypervisor * * This sample runs in a Service VM, which is a privileged VM of ACRN. * CONFIG_ACRN_HSM needs to be enabled in the Service VM. * * Guest VM code in guest16.s will be executed after the VM launched. * * Copyright (C) 2020 Intel Corporation. All rights reserved. */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <unistd.h> #include <signal.h> #include <sys/ioctl.h> #include <linux/acrn.h> #define GUEST_MEMORY_SIZE (1024*1024) void *guest_memory; extern const unsigned char guest16[], guest16_end[]; static char io_request_page[4096] __attribute__((aligned(4096))); static struct acrn_io_request *io_req_buf = (struct acrn_io_request *)io_request_page; __u16 vcpu_num; __u16 vmid; int hsm_fd; int is_running = 1; void vm_exit(int sig) { sig = sig; is_running = 0; ioctl(hsm_fd, ACRN_IOCTL_PAUSE_VM, vmid); ioctl(hsm_fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, 0); } int main(int argc, char **argv) { int vcpu_id, ret; struct acrn_vm_creation create_vm = {0}; struct acrn_vm_memmap ram_map = {0}; struct acrn_vcpu_regs regs; struct acrn_io_request *io_req; struct acrn_ioreq_notify __attribute__((aligned(8))) notify; argc = argc; argv = argv; ret = posix_memalign(&guest_memory, 4096, GUEST_MEMORY_SIZE); if (ret < 0) { printf("Not enough memory!\n"); return -1; } hsm_fd = open("/dev/acrn_hsm", O_RDWR|O_CLOEXEC); create_vm.ioreq_buf = (__u64)io_req_buf; ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_VM, &create_vm); printf("Created VM! [%d]\n", ret); vcpu_num = create_vm.vcpu_num; vmid = create_vm.vmid; /* setup guest memory */ ram_map.type = ACRN_MEMMAP_RAM; ram_map.vma_base = (__u64)guest_memory; ram_map.len = GUEST_MEMORY_SIZE; ram_map.user_vm_pa = 0; ram_map.attr = ACRN_MEM_ACCESS_RWX; ret = ioctl(hsm_fd, ACRN_IOCTL_SET_MEMSEG, &ram_map); printf("Set up VM memory! [%d]\n", ret); memcpy(guest_memory, guest16, guest16_end-guest16); /* setup vcpu registers */ memset(&regs, 0, sizeof(regs)); regs.vcpu_id = 0; regs.vcpu_regs.rip = 0; /* CR0_ET | CR0_NE */ regs.vcpu_regs.cr0 = 0x30U; regs.vcpu_regs.cs_ar = 0x009FU; regs.vcpu_regs.cs_sel = 0xF000U; regs.vcpu_regs.cs_limit = 0xFFFFU; regs.vcpu_regs.cs_base = 0 & 0xFFFF0000UL; regs.vcpu_regs.rip = 0 & 0xFFFFUL; ret = ioctl(hsm_fd, ACRN_IOCTL_SET_VCPU_REGS, &regs); printf("Set up VM BSP registers! [%d]\n", ret); /* create an ioreq client for this VM */ ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0); printf("Created IO request client! [%d]\n", ret); /* run vm */ ret = ioctl(hsm_fd, ACRN_IOCTL_START_VM, vmid); printf("Start VM! [%d]\n", ret); signal(SIGINT, vm_exit); while (is_running) { ret = ioctl(hsm_fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, 0); for (vcpu_id = 0; vcpu_id < vcpu_num; vcpu_id++) { io_req = &io_req_buf[vcpu_id]; if ((__sync_add_and_fetch(&io_req->processed, 0) == ACRN_IOREQ_STATE_PROCESSING) && (!io_req->kernel_handled)) if (io_req->type == ACRN_IOREQ_TYPE_PORTIO) { int bytes, port, in; port = io_req->reqs.pio_request.address; bytes = io_req->reqs.pio_request.size; in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ); printf("Guest VM %s PIO[%x] with size[%x]\n", in ? "read" : "write", port, bytes); notify.vmid = vmid; notify.vcpu = vcpu_id; ioctl(hsm_fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify); } } } ret = ioctl(hsm_fd, ACRN_IOCTL_DESTROY_VM, NULL); printf("Destroy VM! [%d]\n", ret); close(hsm_fd); free(guest_memory); return 0; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) by Jaroslav Kysela <[email protected]> * Takashi Iwai <[email protected]> * * Generic memory allocators */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/dma-map-ops.h> #include <linux/genalloc.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include <sound/memalloc.h> struct snd_malloc_ops { void *(*alloc)(struct snd_dma_buffer *dmab, size_t size); void (*free)(struct snd_dma_buffer *dmab); dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset); struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset); unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size); int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area); void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode); }; #define DEFAULT_GFP \ (GFP_KERNEL | \ __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ __GFP_NOWARN) /* no stack trace print - this call is non-critical */ static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (WARN_ON_ONCE(!ops || !ops->alloc)) return NULL; return ops->alloc(dmab, size); } /** * snd_dma_alloc_dir_pages - allocate the buffer area according to the given * type and direction * @type: the DMA buffer type * @device: the device pointer * @dir: DMA direction * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * * Calls the memory-allocator function for the corresponding * buffer type. * * Return: Zero if the buffer with the given size is allocated successfully, * otherwise a negative value on error. */ int snd_dma_alloc_dir_pages(int type, struct device *device, enum dma_data_direction dir, size_t size, struct snd_dma_buffer *dmab) { if (WARN_ON(!size)) return -ENXIO; if (WARN_ON(!dmab)) return -ENXIO; size = PAGE_ALIGN(size); dmab->dev.type = type; dmab->dev.dev = device; dmab->dev.dir = dir; dmab->bytes = 0; dmab->addr = 0; dmab->private_data = NULL; dmab->area = __snd_dma_alloc_pages(dmab, size); if (!dmab->area) return -ENOMEM; dmab->bytes = size; return 0; } EXPORT_SYMBOL(snd_dma_alloc_dir_pages); /** * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback * @type: the DMA buffer type * @device: the device pointer * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * * Calls the memory-allocator function for the corresponding * buffer type. When no space is left, this function reduces the size and * tries to allocate again. The size actually allocated is stored in * res_size argument. * * Return: Zero if the buffer with the given size is allocated successfully, * otherwise a negative value on error. */ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab) { int err; while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { if (err != -ENOMEM) return err; if (size <= PAGE_SIZE) return -ENOMEM; size >>= 1; size = PAGE_SIZE << get_order(size); } if (! dmab->area) return -ENOMEM; return 0; } EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); /** * snd_dma_free_pages - release the allocated buffer * @dmab: the buffer allocation record to release * * Releases the allocated buffer via snd_dma_alloc_pages(). */ void snd_dma_free_pages(struct snd_dma_buffer *dmab) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (ops && ops->free) ops->free(dmab); } EXPORT_SYMBOL(snd_dma_free_pages); /* called by devres */ static void __snd_release_pages(struct device *dev, void *res) { snd_dma_free_pages(res); } /** * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres * @dev: the device pointer * @type: the DMA buffer type * @dir: DMA direction * @size: the buffer size to allocate * * Allocate buffer pages depending on the given type and manage using devres. * The pages will be released automatically at the device removal. * * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or * SNDRV_DMA_TYPE_VMALLOC type. * * Return: the snd_dma_buffer object at success, or NULL if failed */ struct snd_dma_buffer * snd_devm_alloc_dir_pages(struct device *dev, int type, enum dma_data_direction dir, size_t size) { struct snd_dma_buffer *dmab; int err; if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || type == SNDRV_DMA_TYPE_VMALLOC)) return NULL; dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); if (!dmab) return NULL; err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); if (err < 0) { devres_free(dmab); return NULL; } devres_add(dev, dmab); return dmab; } EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); /** * snd_dma_buffer_mmap - perform mmap of the given DMA buffer * @dmab: buffer allocation information * @area: VM area information * * Return: zero if successful, or a negative error code */ int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { const struct snd_malloc_ops *ops; if (!dmab) return -ENOENT; ops = snd_dma_get_ops(dmab); if (ops && ops->mmap) return ops->mmap(dmab, area); else return -ENOENT; } EXPORT_SYMBOL(snd_dma_buffer_mmap); #ifdef CONFIG_HAS_DMA /** * snd_dma_buffer_sync - sync DMA buffer between CPU and device * @dmab: buffer allocation information * @mode: sync mode */ void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode) { const struct snd_malloc_ops *ops; if (!dmab || !dmab->dev.need_sync) return; ops = snd_dma_get_ops(dmab); if (ops && ops->sync) ops->sync(dmab, mode); } EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); #endif /* CONFIG_HAS_DMA */ /** * snd_sgbuf_get_addr - return the physical address at the corresponding offset * @dmab: buffer allocation information * @offset: offset in the ring buffer * * Return: the physical address */ dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (ops && ops->get_addr) return ops->get_addr(dmab, offset); else return dmab->addr + offset; } EXPORT_SYMBOL(snd_sgbuf_get_addr); /** * snd_sgbuf_get_page - return the physical page at the corresponding offset * @dmab: buffer allocation information * @offset: offset in the ring buffer * * Return: the page pointer */ struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (ops && ops->get_page) return ops->get_page(dmab, offset); else return virt_to_page(dmab->area + offset); } EXPORT_SYMBOL(snd_sgbuf_get_page); /** * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages * on sg-buffer * @dmab: buffer allocation information * @ofs: offset in the ring buffer * @size: the requested size * * Return: the chunk size */ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size) { const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); if (ops && ops->get_chunk_size) return ops->get_chunk_size(dmab, ofs, size); else return size; } EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); /* * Continuous pages allocator */ static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, bool wc) { void *p; gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; again: p = alloc_pages_exact(size, gfp); if (!p) return NULL; *addr = page_to_phys(virt_to_page(p)); if (!dev) return p; if ((*addr + size - 1) & ~dev->coherent_dma_mask) { if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) { gfp |= GFP_DMA32; goto again; } if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { gfp = (gfp & ~GFP_DMA32) | GFP_DMA; goto again; } } #ifdef CONFIG_X86 if (wc) set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT); #endif return p; } static void do_free_pages(void *p, size_t size, bool wc) { #ifdef CONFIG_X86 if (wc) set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT); #endif free_pages_exact(p, size); } static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) { return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); } static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) { do_free_pages(dmab->area, dmab->bytes, false); } static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return remap_pfn_range(area, area->vm_start, dmab->addr >> PAGE_SHIFT, area->vm_end - area->vm_start, area->vm_page_prot); } static const struct snd_malloc_ops snd_dma_continuous_ops = { .alloc = snd_dma_continuous_alloc, .free = snd_dma_continuous_free, .mmap = snd_dma_continuous_mmap, }; /* * VMALLOC allocator */ static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) { return vmalloc(size); } static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) { vfree(dmab->area); } static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return remap_vmalloc_range(area, dmab->area, 0); } #define get_vmalloc_page_addr(dmab, offset) \ page_to_phys(vmalloc_to_page((dmab)->area + (offset))) static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, size_t offset) { return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; } static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, size_t offset) { return vmalloc_to_page(dmab->area + offset); } static unsigned int snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size) { unsigned int start, end; unsigned long addr; start = ALIGN_DOWN(ofs, PAGE_SIZE); end = ofs + size - 1; /* the last byte address */ /* check page continuity */ addr = get_vmalloc_page_addr(dmab, start); for (;;) { start += PAGE_SIZE; if (start > end) break; addr += PAGE_SIZE; if (get_vmalloc_page_addr(dmab, start) != addr) return start - ofs; } /* ok, all on continuous pages */ return size; } static const struct snd_malloc_ops snd_dma_vmalloc_ops = { .alloc = snd_dma_vmalloc_alloc, .free = snd_dma_vmalloc_free, .mmap = snd_dma_vmalloc_mmap, .get_addr = snd_dma_vmalloc_get_addr, .get_page = snd_dma_vmalloc_get_page, .get_chunk_size = snd_dma_vmalloc_get_chunk_size, }; #ifdef CONFIG_HAS_DMA /* * IRAM allocator */ #ifdef CONFIG_GENERIC_ALLOCATOR static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) { struct device *dev = dmab->dev.dev; struct gen_pool *pool; void *p; if (dev->of_node) { pool = of_gen_pool_get(dev->of_node, "iram", 0); /* Assign the pool into private_data field */ dmab->private_data = pool; p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); if (p) return p; } /* Internal memory might have limited size and no enough space, * so if we fail to malloc, try to fetch memory traditionally. */ dmab->dev.type = SNDRV_DMA_TYPE_DEV; return __snd_dma_alloc_pages(dmab, size); } static void snd_dma_iram_free(struct snd_dma_buffer *dmab) { struct gen_pool *pool = dmab->private_data; if (pool && dmab->area) gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); } static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); return remap_pfn_range(area, area->vm_start, dmab->addr >> PAGE_SHIFT, area->vm_end - area->vm_start, area->vm_page_prot); } static const struct snd_malloc_ops snd_dma_iram_ops = { .alloc = snd_dma_iram_alloc, .free = snd_dma_iram_free, .mmap = snd_dma_iram_mmap, }; #endif /* CONFIG_GENERIC_ALLOCATOR */ /* * Coherent device pages allocator */ static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) { return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); } static void snd_dma_dev_free(struct snd_dma_buffer *dmab) { dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); } static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return dma_mmap_coherent(dmab->dev.dev, area, dmab->area, dmab->addr, dmab->bytes); } static const struct snd_malloc_ops snd_dma_dev_ops = { .alloc = snd_dma_dev_alloc, .free = snd_dma_dev_free, .mmap = snd_dma_dev_mmap, }; /* * Write-combined pages */ #ifdef CONFIG_SND_DMA_SGBUF /* x86-specific allocations */ static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) { void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); if (!p) return NULL; dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL); if (dmab->addr == DMA_MAPPING_ERROR) { do_free_pages(dmab->area, size, true); return NULL; } return p; } static void snd_dma_wc_free(struct snd_dma_buffer *dmab) { dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes, DMA_BIDIRECTIONAL); do_free_pages(dmab->area, dmab->bytes, true); } static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); return dma_mmap_coherent(dmab->dev.dev, area, dmab->area, dmab->addr, dmab->bytes); } #else static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) { return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); } static void snd_dma_wc_free(struct snd_dma_buffer *dmab) { dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); } static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return dma_mmap_wc(dmab->dev.dev, area, dmab->area, dmab->addr, dmab->bytes); } #endif static const struct snd_malloc_ops snd_dma_wc_ops = { .alloc = snd_dma_wc_alloc, .free = snd_dma_wc_free, .mmap = snd_dma_wc_mmap, }; /* * Non-contiguous pages allocator */ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) { struct sg_table *sgt; void *p; sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, DEFAULT_GFP, 0); if (!sgt) return NULL; dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, sg_dma_address(sgt->sgl)); p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); if (p) { dmab->private_data = sgt; /* store the first page address for convenience */ dmab->addr = snd_sgbuf_get_addr(dmab, 0); } else { dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); } return p; } static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) { dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, dmab->dev.dir); } static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { return dma_mmap_noncontiguous(dmab->dev.dev, area, dmab->bytes, dmab->private_data); } static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode) { if (mode == SNDRV_DMA_SYNC_CPU) { if (dmab->dev.dir == DMA_TO_DEVICE) return; invalidate_kernel_vmap_range(dmab->area, dmab->bytes); dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, dmab->dev.dir); } else { if (dmab->dev.dir == DMA_FROM_DEVICE) return; flush_kernel_vmap_range(dmab->area, dmab->bytes); dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, dmab->dev.dir); } } static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, struct sg_page_iter *piter, size_t offset) { struct sg_table *sgt = dmab->private_data; __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, offset >> PAGE_SHIFT); } static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, size_t offset) { struct sg_dma_page_iter iter; snd_dma_noncontig_iter_set(dmab, &iter.base, offset); __sg_page_iter_dma_next(&iter); return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE; } static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, size_t offset) { struct sg_page_iter iter; snd_dma_noncontig_iter_set(dmab, &iter, offset); __sg_page_iter_next(&iter); return sg_page_iter_page(&iter); } static unsigned int snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size) { struct sg_dma_page_iter iter; unsigned int start, end; unsigned long addr; start = ALIGN_DOWN(ofs, PAGE_SIZE); end = ofs + size - 1; /* the last byte address */ snd_dma_noncontig_iter_set(dmab, &iter.base, start); if (!__sg_page_iter_dma_next(&iter)) return 0; /* check page continuity */ addr = sg_page_iter_dma_address(&iter); for (;;) { start += PAGE_SIZE; if (start > end) break; addr += PAGE_SIZE; if (!__sg_page_iter_dma_next(&iter) || sg_page_iter_dma_address(&iter) != addr) return start - ofs; } /* ok, all on continuous pages */ return size; } static const struct snd_malloc_ops snd_dma_noncontig_ops = { .alloc = snd_dma_noncontig_alloc, .free = snd_dma_noncontig_free, .mmap = snd_dma_noncontig_mmap, .sync = snd_dma_noncontig_sync, .get_addr = snd_dma_noncontig_get_addr, .get_page = snd_dma_noncontig_get_page, .get_chunk_size = snd_dma_noncontig_get_chunk_size, }; #ifdef CONFIG_SND_DMA_SGBUF /* Fallback SG-buffer allocations for x86 */ struct snd_dma_sg_fallback { struct sg_table sgt; /* used by get_addr - must be the first item */ size_t count; struct page **pages; unsigned int *npages; }; static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, struct snd_dma_sg_fallback *sgbuf) { bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; size_t i, size; if (sgbuf->pages && sgbuf->npages) { i = 0; while (i < sgbuf->count) { size = sgbuf->npages[i]; if (!size) break; do_free_pages(page_address(sgbuf->pages[i]), size << PAGE_SHIFT, wc); i += size; } } kvfree(sgbuf->pages); kvfree(sgbuf->npages); kfree(sgbuf); } /* fallback manual S/G buffer allocations */ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) { bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG; struct snd_dma_sg_fallback *sgbuf; struct page **pagep, *curp; size_t chunk; dma_addr_t addr; unsigned int idx, npages; void *p; sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (!sgbuf) return NULL; size = PAGE_ALIGN(size); sgbuf->count = size >> PAGE_SHIFT; sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL); if (!sgbuf->pages || !sgbuf->npages) goto error; pagep = sgbuf->pages; chunk = size; idx = 0; while (size > 0) { chunk = min(size, chunk); p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc); if (!p) { if (chunk <= PAGE_SIZE) goto error; chunk >>= 1; chunk = PAGE_SIZE << get_order(chunk); continue; } size -= chunk; /* fill pages */ npages = chunk >> PAGE_SHIFT; sgbuf->npages[idx] = npages; idx += npages; curp = virt_to_page(p); while (npages--) *pagep++ = curp++; } if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count, 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL)) goto error; if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0)) goto error_dma_map; p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); if (!p) goto error_vmap; dmab->private_data = sgbuf; /* store the first page address for convenience */ dmab->addr = snd_sgbuf_get_addr(dmab, 0); return p; error_vmap: dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); error_dma_map: sg_free_table(&sgbuf->sgt); error: __snd_dma_sg_fallback_free(dmab, sgbuf); return NULL; } static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) { struct snd_dma_sg_fallback *sgbuf = dmab->private_data; vunmap(dmab->area); dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(&sgbuf->sgt); __snd_dma_sg_fallback_free(dmab, dmab->private_data); } static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { struct snd_dma_sg_fallback *sgbuf = dmab->private_data; if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); return vm_map_pages(area, sgbuf->pages, sgbuf->count); } static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) { int type = dmab->dev.type; void *p; /* try the standard DMA API allocation at first */ if (type == SNDRV_DMA_TYPE_DEV_WC_SG) dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; else dmab->dev.type = SNDRV_DMA_TYPE_DEV; p = __snd_dma_alloc_pages(dmab, size); if (p) return p; dmab->dev.type = type; /* restore the type */ return snd_dma_sg_fallback_alloc(dmab, size); } static const struct snd_malloc_ops snd_dma_sg_ops = { .alloc = snd_dma_sg_alloc, .free = snd_dma_sg_fallback_free, .mmap = snd_dma_sg_fallback_mmap, /* reuse noncontig helper */ .get_addr = snd_dma_noncontig_get_addr, /* reuse vmalloc helpers */ .get_page = snd_dma_vmalloc_get_page, .get_chunk_size = snd_dma_vmalloc_get_chunk_size, }; #endif /* CONFIG_SND_DMA_SGBUF */ /* * Non-coherent pages allocator */ static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) { void *p; p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, dmab->dev.dir, DEFAULT_GFP); if (p) dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); return p; } static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) { dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr, dmab->dev.dir); } static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) { area->vm_page_prot = vm_get_page_prot(area->vm_flags); return dma_mmap_pages(dmab->dev.dev, area, area->vm_end - area->vm_start, virt_to_page(dmab->area)); } static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode) { if (mode == SNDRV_DMA_SYNC_CPU) { if (dmab->dev.dir != DMA_TO_DEVICE) dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, dmab->bytes, dmab->dev.dir); } else { if (dmab->dev.dir != DMA_FROM_DEVICE) dma_sync_single_for_device(dmab->dev.dev, dmab->addr, dmab->bytes, dmab->dev.dir); } } static const struct snd_malloc_ops snd_dma_noncoherent_ops = { .alloc = snd_dma_noncoherent_alloc, .free = snd_dma_noncoherent_free, .mmap = snd_dma_noncoherent_mmap, .sync = snd_dma_noncoherent_sync, }; #endif /* CONFIG_HAS_DMA */ /* * Entry points */ static const struct snd_malloc_ops *snd_dma_ops[] = { [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, #ifdef CONFIG_HAS_DMA [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, #ifdef CONFIG_SND_DMA_SGBUF [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, #endif #ifdef CONFIG_GENERIC_ALLOCATOR [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, #endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_HAS_DMA */ }; static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) { if (WARN_ON_ONCE(!dmab)) return NULL; if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) return NULL; return snd_dma_ops[dmab->dev.type]; }
// SPDX-License-Identifier: GPL-2.0 #include "util/cgroup.h" #include "util/debug.h" #include "util/evlist.h" #include "util/machine.h" #include "util/map.h" #include "util/symbol.h" #include "util/target.h" #include "util/thread.h" #include "util/thread_map.h" #include "util/lock-contention.h" #include <linux/zalloc.h> #include <linux/string.h> #include <bpf/bpf.h> #include <inttypes.h> #include "bpf_skel/lock_contention.skel.h" #include "bpf_skel/lock_data.h" static struct lock_contention_bpf *skel; int lock_contention_prepare(struct lock_contention *con) { int i, fd; int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1; struct evlist *evlist = con->evlist; struct target *target = con->target; skel = lock_contention_bpf__open(); if (!skel) { pr_err("Failed to open lock-contention BPF skeleton\n"); return -1; } bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64)); bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries); bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries); if (con->aggr_mode == LOCK_AGGR_TASK) bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries); else bpf_map__set_max_entries(skel->maps.task_data, 1); if (con->save_callstack) bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries); else bpf_map__set_max_entries(skel->maps.stacks, 1); if (target__has_cpu(target)) { skel->rodata->has_cpu = 1; ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); } if (target__has_task(target)) { skel->rodata->has_task = 1; ntasks = perf_thread_map__nr(evlist->core.threads); } if (con->filters->nr_types) { skel->rodata->has_type = 1; ntypes = con->filters->nr_types; } if (con->filters->nr_cgrps) { skel->rodata->has_cgroup = 1; ncgrps = con->filters->nr_cgrps; } /* resolve lock name filters to addr */ if (con->filters->nr_syms) { struct symbol *sym; struct map *kmap; unsigned long *addrs; for (i = 0; i < con->filters->nr_syms; i++) { sym = machine__find_kernel_symbol_by_name(con->machine, con->filters->syms[i], &kmap); if (sym == NULL) { pr_warning("ignore unknown symbol: %s\n", con->filters->syms[i]); continue; } addrs = realloc(con->filters->addrs, (con->filters->nr_addrs + 1) * sizeof(*addrs)); if (addrs == NULL) { pr_warning("memory allocation failure\n"); continue; } addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start); con->filters->addrs = addrs; } naddrs = con->filters->nr_addrs; skel->rodata->has_addr = 1; } bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); bpf_map__set_max_entries(skel->maps.task_filter, ntasks); bpf_map__set_max_entries(skel->maps.type_filter, ntypes); bpf_map__set_max_entries(skel->maps.addr_filter, naddrs); bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps); skel->rodata->stack_skip = con->stack_skip; skel->rodata->aggr_mode = con->aggr_mode; skel->rodata->needs_callstack = con->save_callstack; skel->rodata->lock_owner = con->owner; if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) { if (cgroup_is_v2("perf_event")) skel->rodata->use_cgroup_v2 = 1; } if (lock_contention_bpf__load(skel) < 0) { pr_err("Failed to load lock-contention BPF skeleton\n"); return -1; } if (target__has_cpu(target)) { u32 cpu; u8 val = 1; fd = bpf_map__fd(skel->maps.cpu_filter); for (i = 0; i < ncpus; i++) { cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); } } if (target__has_task(target)) { u32 pid; u8 val = 1; fd = bpf_map__fd(skel->maps.task_filter); for (i = 0; i < ntasks; i++) { pid = perf_thread_map__pid(evlist->core.threads, i); bpf_map_update_elem(fd, &pid, &val, BPF_ANY); } } if (target__none(target) && evlist->workload.pid > 0) { u32 pid = evlist->workload.pid; u8 val = 1; fd = bpf_map__fd(skel->maps.task_filter); bpf_map_update_elem(fd, &pid, &val, BPF_ANY); } if (con->filters->nr_types) { u8 val = 1; fd = bpf_map__fd(skel->maps.type_filter); for (i = 0; i < con->filters->nr_types; i++) bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY); } if (con->filters->nr_addrs) { u8 val = 1; fd = bpf_map__fd(skel->maps.addr_filter); for (i = 0; i < con->filters->nr_addrs; i++) bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY); } if (con->filters->nr_cgrps) { u8 val = 1; fd = bpf_map__fd(skel->maps.cgroup_filter); for (i = 0; i < con->filters->nr_cgrps; i++) bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY); } if (con->aggr_mode == LOCK_AGGR_CGROUP) read_all_cgroups(&con->cgroups); bpf_program__set_autoload(skel->progs.collect_lock_syms, false); lock_contention_bpf__attach(skel); return 0; } /* * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end * timestamp in ktime so that it can calculate delta easily. */ static void mark_end_timestamp(void) { DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .flags = BPF_F_TEST_RUN_ON_CPU, ); int prog_fd = bpf_program__fd(skel->progs.end_timestamp); bpf_prog_test_run_opts(prog_fd, &opts); } static void update_lock_stat(int map_fd, int pid, u64 end_ts, enum lock_aggr_mode aggr_mode, struct tstamp_data *ts_data) { u64 delta; struct contention_key stat_key = {}; struct contention_data stat_data; if (ts_data->timestamp >= end_ts) return; delta = end_ts - ts_data->timestamp; switch (aggr_mode) { case LOCK_AGGR_CALLER: stat_key.stack_id = ts_data->stack_id; break; case LOCK_AGGR_TASK: stat_key.pid = pid; break; case LOCK_AGGR_ADDR: stat_key.lock_addr_or_cgroup = ts_data->lock; break; case LOCK_AGGR_CGROUP: /* TODO */ return; default: return; } if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0) return; stat_data.total_time += delta; stat_data.count++; if (delta > stat_data.max_time) stat_data.max_time = delta; if (delta < stat_data.min_time) stat_data.min_time = delta; bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST); } /* * Account entries in the tstamp map (which didn't see the corresponding * lock:contention_end tracepoint) using end_ts. */ static void account_end_timestamp(struct lock_contention *con) { int ts_fd, stat_fd; int *prev_key, key; u64 end_ts = skel->bss->end_ts; int total_cpus; enum lock_aggr_mode aggr_mode = con->aggr_mode; struct tstamp_data ts_data, *cpu_data; /* Iterate per-task tstamp map (key = TID) */ ts_fd = bpf_map__fd(skel->maps.tstamp); stat_fd = bpf_map__fd(skel->maps.lock_stat); prev_key = NULL; while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) { int pid = key; if (aggr_mode == LOCK_AGGR_TASK && con->owner) pid = ts_data.flags; update_lock_stat(stat_fd, pid, end_ts, aggr_mode, &ts_data); } prev_key = &key; } /* Now it'll check per-cpu tstamp map which doesn't have TID. */ if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP) return; total_cpus = cpu__max_cpu().cpu; ts_fd = bpf_map__fd(skel->maps.tstamp_cpu); cpu_data = calloc(total_cpus, sizeof(*cpu_data)); if (cpu_data == NULL) return; prev_key = NULL; while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0) goto next; for (int i = 0; i < total_cpus; i++) { if (cpu_data[i].lock == 0) continue; update_lock_stat(stat_fd, -1, end_ts, aggr_mode, &cpu_data[i]); } next: prev_key = &key; } free(cpu_data); } int lock_contention_start(void) { skel->bss->enabled = 1; return 0; } int lock_contention_stop(void) { skel->bss->enabled = 0; mark_end_timestamp(); return 0; } static const char *lock_contention_get_name(struct lock_contention *con, struct contention_key *key, u64 *stack_trace, u32 flags) { int idx = 0; u64 addr; const char *name = ""; static char name_buf[KSYM_NAME_LEN]; struct symbol *sym; struct map *kmap; struct machine *machine = con->machine; if (con->aggr_mode == LOCK_AGGR_TASK) { struct contention_task_data task; int pid = key->pid; int task_fd = bpf_map__fd(skel->maps.task_data); /* do not update idle comm which contains CPU number */ if (pid) { struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid); if (t == NULL) return name; if (!bpf_map_lookup_elem(task_fd, &pid, &task) && thread__set_comm(t, task.comm, /*timestamp=*/0)) name = task.comm; } return name; } if (con->aggr_mode == LOCK_AGGR_ADDR) { int lock_fd = bpf_map__fd(skel->maps.lock_syms); /* per-process locks set upper bits of the flags */ if (flags & LCD_F_MMAP_LOCK) return "mmap_lock"; if (flags & LCD_F_SIGHAND_LOCK) return "siglock"; /* global locks with symbols */ sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap); if (sym) return sym->name; /* try semi-global locks collected separately */ if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) { if (flags == LOCK_CLASS_RQLOCK) return "rq_lock"; } return ""; } if (con->aggr_mode == LOCK_AGGR_CGROUP) { u64 cgrp_id = key->lock_addr_or_cgroup; struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); if (cgrp) return cgrp->name; snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id); return name_buf; } /* LOCK_AGGR_CALLER: skip lock internal functions */ while (machine__is_lock_function(machine, stack_trace[idx]) && idx < con->max_stack - 1) idx++; addr = stack_trace[idx]; sym = machine__find_kernel_symbol(machine, addr, &kmap); if (sym) { unsigned long offset; offset = map__map_ip(kmap, addr) - sym->start; if (offset == 0) return sym->name; snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset); } else { snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr); } return name_buf; } int lock_contention_read(struct lock_contention *con) { int fd, stack, err = 0; struct contention_key *prev_key, key = {}; struct contention_data data = {}; struct lock_stat *st = NULL; struct machine *machine = con->machine; u64 *stack_trace; size_t stack_size = con->max_stack * sizeof(*stack_trace); fd = bpf_map__fd(skel->maps.lock_stat); stack = bpf_map__fd(skel->maps.stacks); con->fails.task = skel->bss->task_fail; con->fails.stack = skel->bss->stack_fail; con->fails.time = skel->bss->time_fail; con->fails.data = skel->bss->data_fail; stack_trace = zalloc(stack_size); if (stack_trace == NULL) return -1; account_end_timestamp(con); if (con->aggr_mode == LOCK_AGGR_TASK) { struct thread *idle = machine__findnew_thread(machine, /*pid=*/0, /*tid=*/0); thread__set_comm(idle, "swapper", /*timestamp=*/0); } if (con->aggr_mode == LOCK_AGGR_ADDR) { DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .flags = BPF_F_TEST_RUN_ON_CPU, ); int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms); bpf_prog_test_run_opts(prog_fd, &opts); } /* make sure it loads the kernel map */ maps__load_first(machine->kmaps); prev_key = NULL; while (!bpf_map_get_next_key(fd, prev_key, &key)) { s64 ls_key; const char *name; /* to handle errors in the loop body */ err = -1; bpf_map_lookup_elem(fd, &key, &data); if (con->save_callstack) { bpf_map_lookup_elem(stack, &key.stack_id, stack_trace); if (!match_callstack_filter(machine, stack_trace)) { con->nr_filtered += data.count; goto next; } } switch (con->aggr_mode) { case LOCK_AGGR_CALLER: ls_key = key.stack_id; break; case LOCK_AGGR_TASK: ls_key = key.pid; break; case LOCK_AGGR_ADDR: case LOCK_AGGR_CGROUP: ls_key = key.lock_addr_or_cgroup; break; default: goto next; } st = lock_stat_find(ls_key); if (st != NULL) { st->wait_time_total += data.total_time; if (st->wait_time_max < data.max_time) st->wait_time_max = data.max_time; if (st->wait_time_min > data.min_time) st->wait_time_min = data.min_time; st->nr_contended += data.count; if (st->nr_contended) st->avg_wait_time = st->wait_time_total / st->nr_contended; goto next; } name = lock_contention_get_name(con, &key, stack_trace, data.flags); st = lock_stat_findnew(ls_key, name, data.flags); if (st == NULL) break; st->nr_contended = data.count; st->wait_time_total = data.total_time; st->wait_time_max = data.max_time; st->wait_time_min = data.min_time; if (data.count) st->avg_wait_time = data.total_time / data.count; if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) { st->callstack = memdup(stack_trace, stack_size); if (st->callstack == NULL) break; } next: prev_key = &key; /* we're fine now, reset the error */ err = 0; } free(stack_trace); return err; } int lock_contention_finish(struct lock_contention *con) { if (skel) { skel->bss->enabled = 0; lock_contention_bpf__destroy(skel); } while (!RB_EMPTY_ROOT(&con->cgroups)) { struct rb_node *node = rb_first(&con->cgroups); struct cgroup *cgrp = rb_entry(node, struct cgroup, node); rb_erase(node, &con->cgroups); cgroup__put(cgrp); } return 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Linaro. * Viresh Kumar <[email protected]> */ #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include "cpufreq-dt.h" /* * Machines for which the cpufreq device is *always* created, mostly used for * platforms using "operating-points" (V1) property. */ static const struct of_device_id allowlist[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, { .compatible = "allwinner,sun5i-a13", }, { .compatible = "allwinner,sun5i-r8", }, { .compatible = "allwinner,sun6i-a31", }, { .compatible = "allwinner,sun6i-a31s", }, { .compatible = "allwinner,sun7i-a20", }, { .compatible = "allwinner,sun8i-a23", }, { .compatible = "allwinner,sun8i-a83t", }, { .compatible = "allwinner,sun8i-h3", }, { .compatible = "apm,xgene-shadowcat", }, { .compatible = "arm,integrator-ap", }, { .compatible = "arm,integrator-cp", }, { .compatible = "hisilicon,hi3660", }, { .compatible = "fsl,imx27", }, { .compatible = "fsl,imx51", }, { .compatible = "fsl,imx53", }, { .compatible = "marvell,berlin", }, { .compatible = "marvell,pxa250", }, { .compatible = "marvell,pxa270", }, { .compatible = "samsung,exynos3250", }, { .compatible = "samsung,exynos4210", }, { .compatible = "samsung,exynos5250", }, #ifndef CONFIG_BL_SWITCHER { .compatible = "samsung,exynos5800", }, #endif { .compatible = "renesas,emev2", }, { .compatible = "renesas,r7s72100", }, { .compatible = "renesas,r8a73a4", }, { .compatible = "renesas,r8a7740", }, { .compatible = "renesas,r8a7742", }, { .compatible = "renesas,r8a7743", }, { .compatible = "renesas,r8a7744", }, { .compatible = "renesas,r8a7745", }, { .compatible = "renesas,r8a7778", }, { .compatible = "renesas,r8a7779", }, { .compatible = "renesas,r8a7790", }, { .compatible = "renesas,r8a7791", }, { .compatible = "renesas,r8a7792", }, { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,sh73a0", }, { .compatible = "rockchip,rk2928", }, { .compatible = "rockchip,rk3036", }, { .compatible = "rockchip,rk3066a", }, { .compatible = "rockchip,rk3066b", }, { .compatible = "rockchip,rk3188", }, { .compatible = "rockchip,rk3228", }, { .compatible = "rockchip,rk3288", }, { .compatible = "rockchip,rk3328", }, { .compatible = "rockchip,rk3366", }, { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", .data = &(struct cpufreq_dt_platform_data) { .have_governor_per_policy = true, }, }, { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, { .compatible = "st-ericsson,u9500", }, { .compatible = "st-ericsson,u9540", }, { .compatible = "starfive,jh7110", }, { .compatible = "ti,omap2", }, { .compatible = "ti,omap4", }, { .compatible = "ti,omap5", }, { .compatible = "xlnx,zynq-7000", }, { .compatible = "xlnx,zynqmp", }, { } }; /* * Machines for which the cpufreq device is *not* created, mostly used for * platforms using "operating-points-v2" property. */ static const struct of_device_id blocklist[] __initconst = { { .compatible = "allwinner,sun50i-a100" }, { .compatible = "allwinner,sun50i-h6", }, { .compatible = "allwinner,sun50i-h616", }, { .compatible = "allwinner,sun50i-h618", }, { .compatible = "allwinner,sun50i-h700", }, { .compatible = "apple,arm-platform", }, { .compatible = "arm,vexpress", }, { .compatible = "calxeda,highbank", }, { .compatible = "calxeda,ecx-2000", }, { .compatible = "fsl,imx7ulp", }, { .compatible = "fsl,imx7d", }, { .compatible = "fsl,imx7s", }, { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, { .compatible = "fsl,imx8mp", }, { .compatible = "marvell,armadaxp", }, { .compatible = "mediatek,mt2701", }, { .compatible = "mediatek,mt2712", }, { .compatible = "mediatek,mt7622", }, { .compatible = "mediatek,mt7623", }, { .compatible = "mediatek,mt8167", }, { .compatible = "mediatek,mt817x", }, { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, { .compatible = "mediatek,mt8186", }, { .compatible = "mediatek,mt8365", }, { .compatible = "mediatek,mt8516", }, { .compatible = "nvidia,tegra20", }, { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, { .compatible = "nvidia,tegra234", }, { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8909", }, { .compatible = "qcom,msm8996", }, { .compatible = "qcom,msm8998", }, { .compatible = "qcom,qcm2290", }, { .compatible = "qcom,qcm6490", }, { .compatible = "qcom,qcs404", }, { .compatible = "qcom,qdu1000", }, { .compatible = "qcom,sa8155p" }, { .compatible = "qcom,sa8540p" }, { .compatible = "qcom,sa8775p" }, { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sc7280", }, { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sc8280xp", }, { .compatible = "qcom,sdm670", }, { .compatible = "qcom,sdm845", }, { .compatible = "qcom,sdx75", }, { .compatible = "qcom,sm6115", }, { .compatible = "qcom,sm6350", }, { .compatible = "qcom,sm6375", }, { .compatible = "qcom,sm7225", }, { .compatible = "qcom,sm7325", }, { .compatible = "qcom,sm8150", }, { .compatible = "qcom,sm8250", }, { .compatible = "qcom,sm8350", }, { .compatible = "qcom,sm8450", }, { .compatible = "qcom,sm8550", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, { .compatible = "st,stih418", }, { .compatible = "ti,am33xx", }, { .compatible = "ti,am43", }, { .compatible = "ti,dra7", }, { .compatible = "ti,omap3", }, { .compatible = "ti,am625", }, { .compatible = "ti,am62a7", }, { .compatible = "ti,am62p5", }, { .compatible = "qcom,ipq5332", }, { .compatible = "qcom,ipq6018", }, { .compatible = "qcom,ipq8064", }, { .compatible = "qcom,ipq8074", }, { .compatible = "qcom,ipq9574", }, { .compatible = "qcom,apq8064", }, { .compatible = "qcom,msm8974", }, { .compatible = "qcom,msm8960", }, { } }; static bool __init cpu0_node_has_opp_v2_prop(void) { struct device_node *np __free(device_node) = of_cpu_device_node_get(0); bool ret = false; if (of_property_present(np, "operating-points-v2")) ret = true; return ret; } static int __init cpufreq_dt_platdev_init(void) { struct device_node *np __free(device_node) = of_find_node_by_path("/"); const struct of_device_id *match; const void *data = NULL; if (!np) return -ENODEV; match = of_match_node(allowlist, np); if (match) { data = match->data; goto create_pdev; } if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np)) goto create_pdev; return -ENODEV; create_pdev: return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", -1, data, sizeof(struct cpufreq_dt_platform_data))); } core_initcall(cpufreq_dt_platdev_init); MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree file for Seagate NAS 2-Bay (Armada 370 SoC). * * Copyright (C) 2015 Seagate * * Author: Vincent Donnefort <[email protected]> */ /* * Here are some information allowing to identify the device: * * Product name : Seagate NAS 2-Bay * Code name (board/PCB) : Dart 2-Bay * Model name (case sticker) : SRPD20 * Material desc (product spec) : STCTxxxxxxx */ /dts-v1/; #include "armada-370-seagate-nas-xbay.dtsi" / { model = "Seagate NAS 2-Bay (Dart, SRPD20)"; compatible = "seagate,dart-2", "marvell,armada370", "marvell,armada-370-xp"; gpio-fan { gpio-fan,speed-map = < 0 3>, < 950 2>, <1400 1>, <1800 0>; }; };
// SPDX-License-Identifier: GPL-2.0 /* * r8a779g0 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2022 Renesas Electronics Corp. * * Based on r8a779f0-cpg-mssr.c */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/soc/renesas/rcar-rst.h> #include <dt-bindings/clock/r8a779g0-cpg-mssr.h> #include "renesas-cpg-mssr.h" #include "rcar-gen4-cpg.h" enum clk_ids { /* Core Clock Outputs exported to DT */ LAST_DT_CORE_CLK = R8A779G0_CLK_CP, /* External Input Clocks */ CLK_EXTAL, CLK_EXTALR, /* Internal Core Clocks */ CLK_MAIN, CLK_PLL1, CLK_PLL2, CLK_PLL3, CLK_PLL4, CLK_PLL5, CLK_PLL6, CLK_PLL1_DIV2, CLK_PLL2_DIV2, CLK_PLL3_DIV2, CLK_PLL4_DIV2, CLK_PLL5_DIV2, CLK_PLL5_DIV4, CLK_PLL6_DIV2, CLK_S0, CLK_S0_VIO, CLK_S0_VC, CLK_S0_HSC, CLK_SASYNCPER, CLK_SV_VIP, CLK_SV_IR, CLK_SDSRC, CLK_RPCSRC, CLK_VIO, CLK_VC, CLK_OCO, /* Module Clocks */ MOD_CLK_BASE }; static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN), DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN), DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN), DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN), DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1), DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1), DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1), DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1), DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1), DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".sasyncper", CLK_SASYNCPER, CLK_PLL5_DIV4, 3, 1), DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1), DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1), DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5), DEF_RATE(".oco", CLK_OCO, 32768), DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), DEF_FIXED(".vio", CLK_VIO, CLK_PLL5_DIV2, 3, 1), DEF_FIXED(".vc", CLK_VC, CLK_PLL5_DIV2, 3, 1), /* Core Clock Outputs */ DEF_GEN4_Z("z0", R8A779G0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL2, 2, 0), DEF_FIXED("s0d2", R8A779G0_CLK_S0D2, CLK_S0, 2, 1), DEF_FIXED("s0d3", R8A779G0_CLK_S0D3, CLK_S0, 3, 1), DEF_FIXED("s0d4", R8A779G0_CLK_S0D4, CLK_S0, 4, 1), DEF_FIXED("cl16m", R8A779G0_CLK_CL16M, CLK_S0, 48, 1), DEF_FIXED("s0d1_vio", R8A779G0_CLK_S0D1_VIO, CLK_S0_VIO, 1, 1), DEF_FIXED("s0d2_vio", R8A779G0_CLK_S0D2_VIO, CLK_S0_VIO, 2, 1), DEF_FIXED("s0d4_vio", R8A779G0_CLK_S0D4_VIO, CLK_S0_VIO, 4, 1), DEF_FIXED("s0d8_vio", R8A779G0_CLK_S0D8_VIO, CLK_S0_VIO, 8, 1), DEF_FIXED("s0d1_vc", R8A779G0_CLK_S0D1_VC, CLK_S0_VC, 1, 1), DEF_FIXED("s0d2_vc", R8A779G0_CLK_S0D2_VC, CLK_S0_VC, 2, 1), DEF_FIXED("s0d4_vc", R8A779G0_CLK_S0D4_VC, CLK_S0_VC, 4, 1), DEF_FIXED("s0d2_mm", R8A779G0_CLK_S0D2_MM, CLK_S0, 2, 1), DEF_FIXED("s0d4_mm", R8A779G0_CLK_S0D4_MM, CLK_S0, 4, 1), DEF_FIXED("cl16m_mm", R8A779G0_CLK_CL16M_MM, CLK_S0, 48, 1), DEF_FIXED("s0d2_u3dg", R8A779G0_CLK_S0D2_U3DG, CLK_S0, 2, 1), DEF_FIXED("s0d4_u3dg", R8A779G0_CLK_S0D4_U3DG, CLK_S0, 4, 1), DEF_FIXED("s0d2_rt", R8A779G0_CLK_S0D2_RT, CLK_S0, 2, 1), DEF_FIXED("s0d3_rt", R8A779G0_CLK_S0D3_RT, CLK_S0, 3, 1), DEF_FIXED("s0d4_rt", R8A779G0_CLK_S0D4_RT, CLK_S0, 4, 1), DEF_FIXED("s0d6_rt", R8A779G0_CLK_S0D6_RT, CLK_S0, 6, 1), DEF_FIXED("s0d24_rt", R8A779G0_CLK_S0D24_RT, CLK_S0, 24, 1), DEF_FIXED("cl16m_rt", R8A779G0_CLK_CL16M_RT, CLK_S0, 48, 1), DEF_FIXED("s0d2_per", R8A779G0_CLK_S0D2_PER, CLK_S0, 2, 1), DEF_FIXED("s0d3_per", R8A779G0_CLK_S0D3_PER, CLK_S0, 3, 1), DEF_FIXED("s0d4_per", R8A779G0_CLK_S0D4_PER, CLK_S0, 4, 1), DEF_FIXED("s0d6_per", R8A779G0_CLK_S0D6_PER, CLK_S0, 6, 1), DEF_FIXED("s0d12_per", R8A779G0_CLK_S0D12_PER, CLK_S0, 12, 1), DEF_FIXED("s0d24_per", R8A779G0_CLK_S0D24_PER, CLK_S0, 24, 1), DEF_FIXED("cl16m_per", R8A779G0_CLK_CL16M_PER, CLK_S0, 48, 1), DEF_FIXED("s0d1_hsc", R8A779G0_CLK_S0D1_HSC, CLK_S0_HSC, 1, 1), DEF_FIXED("s0d2_hsc", R8A779G0_CLK_S0D2_HSC, CLK_S0_HSC, 2, 1), DEF_FIXED("s0d4_hsc", R8A779G0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1), DEF_FIXED("cl16m_hsc", R8A779G0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1), DEF_FIXED("s0d2_cc", R8A779G0_CLK_S0D2_CC, CLK_S0, 2, 1), DEF_FIXED("sasyncrt", R8A779G0_CLK_SASYNCRT, CLK_PLL5_DIV4, 48, 1), DEF_FIXED("sasyncperd1",R8A779G0_CLK_SASYNCPERD1, CLK_SASYNCPER,1, 1), DEF_FIXED("sasyncperd2",R8A779G0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1), DEF_FIXED("sasyncperd4",R8A779G0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1), DEF_FIXED("svd1_ir", R8A779G0_CLK_SVD1_IR, CLK_SV_IR, 1, 1), DEF_FIXED("svd2_ir", R8A779G0_CLK_SVD2_IR, CLK_SV_IR, 2, 1), DEF_FIXED("svd1_vip", R8A779G0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1), DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1), DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1), DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1), DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1), DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1), DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1), DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR), DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR), DEF_FIXED("dsiref", R8A779G0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1), DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR), DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR), DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, CPG_SD0CKCR), DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR), DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC), DEF_GEN4_OSC("osc", R8A779G0_CLK_OSC, CLK_EXTAL, 8), DEF_GEN4_MDSEL("r", R8A779G0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), }; static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = { DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC), DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC), DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC), DEF_MOD("canfd0", 328, R8A779G0_CLK_SASYNCPERD2), DEF_MOD("csi40", 331, R8A779G0_CLK_CSI), DEF_MOD("csi41", 400, R8A779G0_CLK_CSI), DEF_MOD("dis0", 411, R8A779G0_CLK_VIOBUSD2), DEF_MOD("dsitxlink0", 415, R8A779G0_CLK_VIOBUSD2), DEF_MOD("dsitxlink1", 416, R8A779G0_CLK_VIOBUSD2), DEF_MOD("fcpvd0", 508, R8A779G0_CLK_VIOBUSD2), DEF_MOD("fcpvd1", 509, R8A779G0_CLK_VIOBUSD2), DEF_MOD("hscif0", 514, R8A779G0_CLK_SASYNCPERD1), DEF_MOD("hscif1", 515, R8A779G0_CLK_SASYNCPERD1), DEF_MOD("hscif2", 516, R8A779G0_CLK_SASYNCPERD1), DEF_MOD("hscif3", 517, R8A779G0_CLK_SASYNCPERD1), DEF_MOD("i2c0", 518, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c1", 519, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c2", 520, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c3", 521, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER), DEF_MOD("irqc", 611, R8A779G0_CLK_CL16M), DEF_MOD("ispcs0", 612, R8A779G0_CLK_S0D2_VIO), DEF_MOD("ispcs1", 613, R8A779G0_CLK_S0D2_VIO), DEF_MOD("msi0", 618, R8A779G0_CLK_MSO), DEF_MOD("msi1", 619, R8A779G0_CLK_MSO), DEF_MOD("msi2", 620, R8A779G0_CLK_MSO), DEF_MOD("msi3", 621, R8A779G0_CLK_MSO), DEF_MOD("msi4", 622, R8A779G0_CLK_MSO), DEF_MOD("msi5", 623, R8A779G0_CLK_MSO), DEF_MOD("pciec0", 624, R8A779G0_CLK_S0D2_HSC), DEF_MOD("pciec1", 625, R8A779G0_CLK_S0D2_HSC), DEF_MOD("pwm", 628, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("rpc-if", 629, R8A779G0_CLK_RPCD2), DEF_MOD("scif0", 702, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("scif1", 703, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("scif3", 704, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("scif4", 705, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("sdhi", 706, R8A779G0_CLK_SD0), DEF_MOD("sys-dmac0", 709, R8A779G0_CLK_S0D6_PER), DEF_MOD("sys-dmac1", 710, R8A779G0_CLK_S0D6_PER), DEF_MOD("tmu0", 713, R8A779G0_CLK_SASYNCRT), DEF_MOD("tmu1", 714, R8A779G0_CLK_SASYNCPERD2), DEF_MOD("tmu2", 715, R8A779G0_CLK_SASYNCPERD2), DEF_MOD("tmu3", 716, R8A779G0_CLK_SASYNCPERD2), DEF_MOD("tmu4", 717, R8A779G0_CLK_SASYNCPERD2), DEF_MOD("tpu0", 718, R8A779G0_CLK_SASYNCPERD4), DEF_MOD("vin00", 730, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin01", 731, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin02", 800, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin03", 801, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin04", 802, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin05", 803, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin06", 804, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin07", 805, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin10", 806, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin11", 807, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin12", 808, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin13", 809, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin14", 810, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin15", 811, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin16", 812, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vin17", 813, R8A779G0_CLK_S0D4_VIO), DEF_MOD("vspd0", 830, R8A779G0_CLK_VIOBUSD2), DEF_MOD("vspd1", 831, R8A779G0_CLK_VIOBUSD2), DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R), DEF_MOD("cmt0", 910, R8A779G0_CLK_R), DEF_MOD("cmt1", 911, R8A779G0_CLK_R), DEF_MOD("cmt2", 912, R8A779G0_CLK_R), DEF_MOD("cmt3", 913, R8A779G0_CLK_R), DEF_MOD("pfc0", 915, R8A779G0_CLK_CP), DEF_MOD("pfc1", 916, R8A779G0_CLK_CP), DEF_MOD("pfc2", 917, R8A779G0_CLK_CP), DEF_MOD("pfc3", 918, R8A779G0_CLK_CP), DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M), DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC), DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER), DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER), }; /* * CPG Clock Data */ /* * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC * 14 13 (MHz) * ------------------------------------------------------------------------ * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /16 * 0 1 20 / 1 x160 x170 x160 x120 x160 x140 /19 * 1 0 Prohibited setting * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = { /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */ { 1, 192, 1, 192, 1, 16, }, { 1, 160, 1, 160, 1, 19, }, { 0, 0, 0, 0, 0, 0, }, { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a779g0_cpg_mssr_init(struct device *dev) { const struct rcar_gen4_cpg_pll_config *cpg_pll_config; u32 cpg_mode; int error; error = rcar_rst_read_mode_pins(&cpg_mode); if (error) return error; cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; if (!cpg_pll_config->extal_div) { dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); return -EINVAL; } return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } const struct cpg_mssr_info r8a779g0_cpg_mssr_info __initconst = { /* Core Clocks */ .core_clks = r8a779g0_core_clks, .num_core_clks = ARRAY_SIZE(r8a779g0_core_clks), .last_dt_core_clk = LAST_DT_CORE_CLK, .num_total_core_clks = MOD_CLK_BASE, /* Module Clocks */ .mod_clks = r8a779g0_mod_clks, .num_mod_clks = ARRAY_SIZE(r8a779g0_mod_clks), .num_hw_mod_clks = 30 * 32, /* Callbacks */ .init = r8a779g0_cpg_mssr_init, .cpg_clk_register = rcar_gen4_cpg_clk_register, .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4, };
// SPDX-License-Identifier: GPL-2.0 #include <linux/arm-smccc.h> #include <linux/kernel.h> #include <linux/smp.h> #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/proc-fns.h> #include <asm/spectre.h> #include <asm/system_misc.h> #ifdef CONFIG_ARM_PSCI static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) { struct arm_smccc_res res; arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); switch ((int)res.a0) { case SMCCC_RET_SUCCESS: return SPECTRE_MITIGATED; case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: return SPECTRE_UNAFFECTED; default: return SPECTRE_VULNERABLE; } } #else static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) { return SPECTRE_VULNERABLE; } #endif #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); static void harden_branch_predictor_bpiall(void) { write_sysreg(0, BPIALL); } static void harden_branch_predictor_iciallu(void) { write_sysreg(0, ICIALLU); } static void __maybe_unused call_smc_arch_workaround_1(void) { arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } static void __maybe_unused call_hvc_arch_workaround_1(void) { arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } static unsigned int spectre_v2_install_workaround(unsigned int method) { const char *spectre_v2_method = NULL; int cpu = smp_processor_id(); if (per_cpu(harden_branch_predictor_fn, cpu)) return SPECTRE_MITIGATED; switch (method) { case SPECTRE_V2_METHOD_BPIALL: per_cpu(harden_branch_predictor_fn, cpu) = harden_branch_predictor_bpiall; spectre_v2_method = "BPIALL"; break; case SPECTRE_V2_METHOD_ICIALLU: per_cpu(harden_branch_predictor_fn, cpu) = harden_branch_predictor_iciallu; spectre_v2_method = "ICIALLU"; break; case SPECTRE_V2_METHOD_HVC: per_cpu(harden_branch_predictor_fn, cpu) = call_hvc_arch_workaround_1; cpu_do_switch_mm = cpu_v7_hvc_switch_mm; spectre_v2_method = "hypervisor"; break; case SPECTRE_V2_METHOD_SMC: per_cpu(harden_branch_predictor_fn, cpu) = call_smc_arch_workaround_1; cpu_do_switch_mm = cpu_v7_smc_switch_mm; spectre_v2_method = "firmware"; break; } if (spectre_v2_method) pr_info("CPU%u: Spectre v2: using %s workaround\n", smp_processor_id(), spectre_v2_method); return SPECTRE_MITIGATED; } #else static unsigned int spectre_v2_install_workaround(unsigned int method) { pr_info_once("Spectre V2: workarounds disabled by configuration\n"); return SPECTRE_VULNERABLE; } #endif static void cpu_v7_spectre_v2_init(void) { unsigned int state, method = 0; switch (read_cpuid_part()) { case ARM_CPU_PART_CORTEX_A8: case ARM_CPU_PART_CORTEX_A9: case ARM_CPU_PART_CORTEX_A12: case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A75: state = SPECTRE_MITIGATED; method = SPECTRE_V2_METHOD_BPIALL; break; case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_BRAHMA_B15: state = SPECTRE_MITIGATED; method = SPECTRE_V2_METHOD_ICIALLU; break; case ARM_CPU_PART_BRAHMA_B53: /* Requires no workaround */ state = SPECTRE_UNAFFECTED; break; default: /* Other ARM CPUs require no workaround */ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { state = SPECTRE_UNAFFECTED; break; } fallthrough; /* Cortex A57/A72 require firmware workaround */ case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A72: state = spectre_v2_get_cpu_fw_mitigation_state(); if (state != SPECTRE_MITIGATED) break; switch (arm_smccc_1_1_get_conduit()) { case SMCCC_CONDUIT_HVC: method = SPECTRE_V2_METHOD_HVC; break; case SMCCC_CONDUIT_SMC: method = SPECTRE_V2_METHOD_SMC; break; default: state = SPECTRE_VULNERABLE; break; } } if (state == SPECTRE_MITIGATED) state = spectre_v2_install_workaround(method); spectre_v2_update_state(state, method); } #ifdef CONFIG_HARDEN_BRANCH_HISTORY static int spectre_bhb_method; static const char *spectre_bhb_method_name(int method) { switch (method) { case SPECTRE_V2_METHOD_LOOP8: return "loop"; case SPECTRE_V2_METHOD_BPIALL: return "BPIALL"; default: return "unknown"; } } static int spectre_bhb_install_workaround(int method) { if (spectre_bhb_method != method) { if (spectre_bhb_method) { pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", smp_processor_id()); return SPECTRE_VULNERABLE; } if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) return SPECTRE_VULNERABLE; spectre_bhb_method = method; pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", smp_processor_id(), spectre_bhb_method_name(method)); } return SPECTRE_MITIGATED; } #else static int spectre_bhb_install_workaround(int method) { return SPECTRE_VULNERABLE; } #endif static void cpu_v7_spectre_bhb_init(void) { unsigned int state, method = 0; switch (read_cpuid_part()) { case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_BRAHMA_B15: case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A72: state = SPECTRE_MITIGATED; method = SPECTRE_V2_METHOD_LOOP8; break; case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A75: state = SPECTRE_MITIGATED; method = SPECTRE_V2_METHOD_BPIALL; break; default: state = SPECTRE_UNAFFECTED; break; } if (state == SPECTRE_MITIGATED) state = spectre_bhb_install_workaround(method); spectre_v2_update_state(state, method); } static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, u32 mask, const char *msg) { u32 aux_cr; asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr)); if ((aux_cr & mask) != mask) { if (!*warned) pr_err("CPU%u: %s", smp_processor_id(), msg); *warned = true; return false; } return true; } static DEFINE_PER_CPU(bool, spectre_warned); static bool check_spectre_auxcr(bool *warned, u32 bit) { return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) && cpu_v7_check_auxcr_set(warned, bit, "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n"); } void cpu_v7_ca8_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) cpu_v7_spectre_v2_init(); } void cpu_v7_ca15_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) cpu_v7_spectre_v2_init(); cpu_v7_spectre_bhb_init(); } void cpu_v7_bugs_init(void) { cpu_v7_spectre_v2_init(); cpu_v7_spectre_bhb_init(); }
/* SPDX-License-Identifier: MIT */ /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DC_OPTC_DCN314_H__ #define __DC_OPTC_DCN314_H__ #include "dcn10/dcn10_optc.h" #define OPTC_COMMON_REG_LIST_DCN3_14(inst) \ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ SRI(OTG_VUPDATE_PARAM, OTG, inst),\ SRI(OTG_VREADY_PARAM, OTG, inst),\ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ SRI(OTG_H_TOTAL, OTG, inst),\ SRI(OTG_H_BLANK_START_END, OTG, inst),\ SRI(OTG_H_SYNC_A, OTG, inst),\ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ SRI(OTG_H_TIMING_CNTL, OTG, inst),\ SRI(OTG_V_TOTAL, OTG, inst),\ SRI(OTG_V_BLANK_START_END, OTG, inst),\ SRI(OTG_V_SYNC_A, OTG, inst),\ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ SRI(OTG_CONTROL, OTG, inst),\ SRI(OTG_STEREO_CONTROL, OTG, inst),\ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ SRI(OTG_STEREO_STATUS, OTG, inst),\ SRI(OTG_V_TOTAL_MAX, OTG, inst),\ SRI(OTG_V_TOTAL_MIN, OTG, inst),\ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ SRI(OTG_TRIGA_CNTL, OTG, inst),\ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ SRI(OTG_STATUS, OTG, inst),\ SRI(OTG_STATUS_POSITION, OTG, inst),\ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ SRI(OTG_M_CONST_DTO0, OTG, inst),\ SRI(OTG_M_CONST_DTO1, OTG, inst),\ SRI(OTG_CLOCK_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ SRI(CONTROL, VTG, inst),\ SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ SRI(OTG_GSL_CONTROL, OTG, inst),\ SRI(OTG_CRC_CNTL, OTG, inst),\ SRI(OTG_CRC0_DATA_RG, OTG, inst),\ SRI(OTG_CRC0_DATA_B, OTG, inst),\ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ SR(GSL_SOURCE_SELECT),\ SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ SRI(OTG_GSL_WINDOW_X, OTG, inst),\ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ SRI(OTG_DSC_START_POSITION, OTG, inst),\ SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ SRI(OTG_DRR_CONTROL, OTG, inst),\ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst) #define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ void dcn314_timing_generator_init(struct optc *optc1); #endif /* __DC_OPTC_DCN314_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel Tangier pinctrl functions * * Copyright (C) 2016, 2023 Intel Corporation * * Authors: Andy Shevchenko <[email protected]> * Raag Jadav <[email protected]> */ #ifndef PINCTRL_TANGIER_H #define PINCTRL_TANGIER_H #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-intel.h" struct device; struct platform_device; #define TNG_FAMILY_NR 64 #define TNG_FAMILY_LEN 0x400 /** * struct tng_family - Tangier pin family description * @barno: MMIO BAR number where registers for this family reside * @pin_base: Starting pin of pins in this family * @npins: Number of pins in this family * @protected: True if family is protected by access * @regs: Family specific common registers */ struct tng_family { unsigned int barno; unsigned int pin_base; size_t npins; bool protected; void __iomem *regs; }; #define TNG_FAMILY(b, s, e) \ { \ .barno = (b), \ .pin_base = (s), \ .npins = (e) - (s) + 1, \ } #define TNG_FAMILY_PROTECTED(b, s, e) \ { \ .barno = (b), \ .pin_base = (s), \ .npins = (e) - (s) + 1, \ .protected = true, \ } /** * struct tng_pinctrl - Tangier pinctrl private structure * @dev: Pointer to the device structure * @lock: Lock to serialize register access * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @families: Array of families this pinctrl handles * @nfamilies: Number of families in the array * @functions: Array of functions * @nfunctions: Number of functions in the array * @groups: Array of pin groups * @ngroups: Number of groups in the array * @pins: Array of pins this pinctrl controls * @npins: Number of pins in the array */ struct tng_pinctrl { struct device *dev; raw_spinlock_t lock; struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; /* Pin controller configuration */ const struct tng_family *families; size_t nfamilies; const struct intel_function *functions; size_t nfunctions; const struct intel_pingroup *groups; size_t ngroups; const struct pinctrl_pin_desc *pins; size_t npins; }; int devm_tng_pinctrl_probe(struct platform_device *pdev); #endif /* PINCTRL_TANGIER_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/media/i2c/lm3646.h * * Copyright (C) 2014 Texas Instruments * * Contact: Daniel Jeong <[email protected]> * Ldd-Mlp <[email protected]> */ #ifndef __LM3646_H__ #define __LM3646_H__ #include <media/v4l2-subdev.h> #define LM3646_NAME "lm3646" #define LM3646_I2C_ADDR_REV1 (0x67) #define LM3646_I2C_ADDR_REV0 (0x63) /* TOTAL FLASH Brightness Max * min 93350uA, step 93750uA, max 1499600uA */ #define LM3646_TOTAL_FLASH_BRT_MIN 93350 #define LM3646_TOTAL_FLASH_BRT_STEP 93750 #define LM3646_TOTAL_FLASH_BRT_MAX 1499600 #define LM3646_TOTAL_FLASH_BRT_uA_TO_REG(a) \ ((a) < LM3646_TOTAL_FLASH_BRT_MIN ? 0 : \ ((((a) - LM3646_TOTAL_FLASH_BRT_MIN) / LM3646_TOTAL_FLASH_BRT_STEP))) /* TOTAL TORCH Brightness Max * min 23040uA, step 23430uA, max 187100uA */ #define LM3646_TOTAL_TORCH_BRT_MIN 23040 #define LM3646_TOTAL_TORCH_BRT_STEP 23430 #define LM3646_TOTAL_TORCH_BRT_MAX 187100 #define LM3646_TOTAL_TORCH_BRT_uA_TO_REG(a) \ ((a) < LM3646_TOTAL_TORCH_BRT_MIN ? 0 : \ ((((a) - LM3646_TOTAL_TORCH_BRT_MIN) / LM3646_TOTAL_TORCH_BRT_STEP))) /* LED1 FLASH Brightness * min 23040uA, step 11718uA, max 1499600uA */ #define LM3646_LED1_FLASH_BRT_MIN 23040 #define LM3646_LED1_FLASH_BRT_STEP 11718 #define LM3646_LED1_FLASH_BRT_MAX 1499600 #define LM3646_LED1_FLASH_BRT_uA_TO_REG(a) \ ((a) <= LM3646_LED1_FLASH_BRT_MIN ? 0 : \ ((((a) - LM3646_LED1_FLASH_BRT_MIN) / LM3646_LED1_FLASH_BRT_STEP))+1) /* LED1 TORCH Brightness * min 2530uA, step 1460uA, max 187100uA */ #define LM3646_LED1_TORCH_BRT_MIN 2530 #define LM3646_LED1_TORCH_BRT_STEP 1460 #define LM3646_LED1_TORCH_BRT_MAX 187100 #define LM3646_LED1_TORCH_BRT_uA_TO_REG(a) \ ((a) <= LM3646_LED1_TORCH_BRT_MIN ? 0 : \ ((((a) - LM3646_LED1_TORCH_BRT_MIN) / LM3646_LED1_TORCH_BRT_STEP))+1) /* FLASH TIMEOUT DURATION * min 50ms, step 50ms, max 400ms */ #define LM3646_FLASH_TOUT_MIN 50 #define LM3646_FLASH_TOUT_STEP 50 #define LM3646_FLASH_TOUT_MAX 400 #define LM3646_FLASH_TOUT_ms_TO_REG(a) \ ((a) <= LM3646_FLASH_TOUT_MIN ? 0 : \ (((a) - LM3646_FLASH_TOUT_MIN) / LM3646_FLASH_TOUT_STEP)) /* struct lm3646_platform_data * * @flash_timeout: flash timeout * @led1_flash_brt: led1 flash mode brightness, uA * @led1_torch_brt: led1 torch mode brightness, uA */ struct lm3646_platform_data { u32 flash_timeout; u32 led1_flash_brt; u32 led1_torch_brt; }; #endif /* __LM3646_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * interfaces to Chassis Codes via PDC (firmware) * * Copyright (C) 2002 Laurent Canet <[email protected]> * Copyright (C) 2002-2006 Thibaut VARENE <[email protected]> * * TODO: poll chassis warns, trigger (configurable) machine shutdown when * needed. * Find out how to get Chassis warnings out of PAT boxes? */ #undef PDC_CHASSIS_DEBUG #ifdef PDC_CHASSIS_DEBUG #define DPRINTK(fmt, args...) printk(fmt, ## args) #else #define DPRINTK(fmt, args...) #endif #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/panic_notifier.h> #include <linux/reboot.h> #include <linux/notifier.h> #include <linux/cache.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/pdc_chassis.h> #include <asm/processor.h> #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/led.h> #define PDC_CHASSIS_VER "0.05" #ifdef CONFIG_PDC_CHASSIS static unsigned int pdc_chassis_enabled __read_mostly = 1; /** * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time. * @str: configuration param: 0 to disable chassis log * @return 1 */ static int __init pdc_chassis_setup(char *str) { /*panic_timeout = simple_strtoul(str, NULL, 0);*/ get_option(&str, &pdc_chassis_enabled); return 1; } __setup("pdcchassis=", pdc_chassis_setup); /** * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility * * Currently, only E class and A180 are known to work with this. * Inspired by Christoph Plattner */ #if 0 static void __init pdc_chassis_checkold(void) { switch(CPU_HVERSION) { case 0x480: /* E25 */ case 0x481: /* E35 */ case 0x482: /* E45 */ case 0x483: /* E55 */ case 0x516: /* A180 */ break; default: break; } DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old); } #endif /** * pdc_chassis_panic_event() - Called by the panic handler. * @this: unused * @event: unused * @ptr: unused * * As soon as a panic occurs, we should inform the PDC. */ static int pdc_chassis_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); return NOTIFY_DONE; } static struct notifier_block pdc_chassis_panic_block = { .notifier_call = pdc_chassis_panic_event, .priority = INT_MAX, }; /** * pdc_chassis_reboot_event() - Called by the reboot handler. * @this: unused * @event: unused * @ptr: unused * * As soon as a reboot occurs, we should inform the PDC. */ static int pdc_chassis_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); return NOTIFY_DONE; } static struct notifier_block pdc_chassis_reboot_block = { .notifier_call = pdc_chassis_reboot_event, .priority = INT_MAX, }; #endif /* CONFIG_PDC_CHASSIS */ /** * parisc_pdc_chassis_init() - Called at boot time. */ void __init parisc_pdc_chassis_init(void) { #ifdef CONFIG_PDC_CHASSIS if (likely(pdc_chassis_enabled)) { DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); /* Let see if we have something to handle... */ printk(KERN_INFO "Enabling %s chassis codes support v%s\n", is_pdc_pat() ? "PDC_PAT" : "regular", PDC_CHASSIS_VER); /* initialize panic notifier chain */ atomic_notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block); /* initialize reboot notifier chain */ register_reboot_notifier(&pdc_chassis_reboot_block); } #endif /* CONFIG_PDC_CHASSIS */ } /** * pdc_chassis_send_status() - Sends a predefined message to the chassis, * and changes the front panel LEDs according to the new system state * @message: Type of message, one of PDC_CHASSIS_DIRECT_* values. * * Only machines with 64 bits PDC PAT and those reported in * pdc_chassis_checkold() are supported atm. * * returns 0 if no error, -1 if no supported PDC is present or invalid message, * else returns the appropriate PDC error code. * * For a list of predefined messages, see asm-parisc/pdc_chassis.h */ int pdc_chassis_send_status(int message) { /* Maybe we should do that in an other way ? */ int retval = 0; #ifdef CONFIG_PDC_CHASSIS if (likely(pdc_chassis_enabled)) { DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); #ifdef CONFIG_64BIT if (is_pdc_pat()) { switch(message) { case PDC_CHASSIS_DIRECT_BSTART: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL); break; case PDC_CHASSIS_DIRECT_BCOMPLETE: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL); break; case PDC_CHASSIS_DIRECT_SHUTDOWN: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS); break; case PDC_CHASSIS_DIRECT_PANIC: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC); break; case PDC_CHASSIS_DIRECT_LPMC: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT); break; case PDC_CHASSIS_DIRECT_HPMC: retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT); break; default: retval = -1; } } else retval = -1; #else if (1) { switch (message) { case PDC_CHASSIS_DIRECT_BSTART: retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_INIT)); break; case PDC_CHASSIS_DIRECT_BCOMPLETE: retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN)); break; case PDC_CHASSIS_DIRECT_SHUTDOWN: retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT)); break; case PDC_CHASSIS_DIRECT_HPMC: case PDC_CHASSIS_DIRECT_PANIC: retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT)); break; case PDC_CHASSIS_DIRECT_LPMC: retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN)); break; default: retval = -1; } } else retval = -1; #endif /* CONFIG_64BIT */ } /* if (pdc_chassis_enabled) */ /* if system has LCD display, update current string */ if (retval != -1 && IS_ENABLED(CONFIG_CHASSIS_LCD_LED)) lcd_print(NULL); #endif /* CONFIG_PDC_CHASSIS */ return retval; } #ifdef CONFIG_PDC_CHASSIS_WARN #ifdef CONFIG_PROC_FS static int pdc_chassis_warn_show(struct seq_file *m, void *v) { unsigned long warn; u32 warnreg; if (pdc_chassis_warn(&warn) != PDC_OK) return -EIO; warnreg = (warn & 0xFFFFFFFF); if ((warnreg >> 24) & 0xFF) seq_printf(m, "Chassis component failure! (eg fan or PSU): 0x%.2x\n", (warnreg >> 24) & 0xFF); seq_printf(m, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK"); seq_printf(m, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK"); seq_printf(m, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK"); return 0; } static int __init pdc_chassis_create_procfs(void) { unsigned long test; int ret; ret = pdc_chassis_warn(&test); if ((ret == PDC_BAD_PROC) || (ret == PDC_BAD_OPTION)) { /* seems that some boxes (eg L1000) do not implement this */ printk(KERN_INFO "Chassis warnings not supported.\n"); return 0; } printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n", PDC_CHASSIS_VER); proc_create_single("chassis", 0400, NULL, pdc_chassis_warn_show); return 0; } __initcall(pdc_chassis_create_procfs); #endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PDC_CHASSIS_WARN */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H #define _ASM_POWERPC_NOHASH_32_PTE_85xx_H #ifdef __KERNEL__ /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based * processors * MMU Assist Register 3: 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR - PRESENT *must* be in the bottom two bits because swap PTEs use the top 30 bits. */ /* Definitions for FSL Book-E Cores */ #define _PAGE_READ 0x00001 /* H: Read permission (SR) */ #define _PAGE_PRESENT 0x00002 /* S: PTE contains a translation */ #define _PAGE_WRITE 0x00004 /* S: Write permission (SW) */ #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ #define _PAGE_EXEC 0x00010 /* H: SX permission */ #define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ #define _PAGE_ENDIAN 0x00040 /* H: E bit */ #define _PAGE_GUARDED 0x00080 /* H: G bit */ #define _PAGE_COHERENT 0x00100 /* H: M bit */ #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) #define _PMD_USER 0 #define _PTE_NONE_MASK 0 #define PTE_WIMGE_SHIFT (6) /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable * pages. We always set _PAGE_COHERENT when SMP is enabled or * the processor might need it for DMA coherency. */ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) #else #define _PAGE_BASE (_PAGE_BASE_NC) #endif #include <asm/pgtable-masks.h> #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H #define _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H #define THERMOCOUPLE_TYPE_B 0x00 #define THERMOCOUPLE_TYPE_E 0x01 #define THERMOCOUPLE_TYPE_J 0x02 #define THERMOCOUPLE_TYPE_K 0x03 #define THERMOCOUPLE_TYPE_N 0x04 #define THERMOCOUPLE_TYPE_R 0x05 #define THERMOCOUPLE_TYPE_S 0x06 #define THERMOCOUPLE_TYPE_T 0x07 #endif /* _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Carsten Langgaard, [email protected] * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2008 Dmitri Vorobiev */ #include <linux/cpu.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/of_fdt.h> #include <linux/pci.h> #include <linux/screen_info.h> #include <linux/time.h> #include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include <asm/fw/fw.h> #include <asm/mips-cps.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/malta.h> #include <asm/mips-boards/maltaint.h> #include <asm/dma.h> #include <asm/prom.h> #include <asm/traps.h> #ifdef CONFIG_VT #include <linux/console.h> #endif #define ROCIT_CONFIG_GEN0 0x1f403000 #define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) static struct resource standard_io_resources[] = { { .name = "dma1", .start = 0x00, .end = 0x1f, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "timer", .start = 0x40, .end = 0x5f, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "keyboard", .start = 0x60, .end = 0x6f, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "dma page reg", .start = 0x80, .end = 0x8f, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, }; const char *get_system_type(void) { return "MIPS Malta"; } #ifdef CONFIG_BLK_DEV_FD static void __init fd_activate(void) { /* * Activate Floppy Controller in the SMSC FDC37M817 Super I/O * Controller. * Done by YAMON 2.00 onwards */ /* Entering config state. */ SMSC_WRITE(SMSC_CONFIG_ENTER, SMSC_CONFIG_REG); /* Activate floppy controller. */ SMSC_WRITE(SMSC_CONFIG_DEVNUM, SMSC_CONFIG_REG); SMSC_WRITE(SMSC_CONFIG_DEVNUM_FLOPPY, SMSC_DATA_REG); SMSC_WRITE(SMSC_CONFIG_ACTIVATE, SMSC_CONFIG_REG); SMSC_WRITE(SMSC_CONFIG_ACTIVATE_ENABLE, SMSC_DATA_REG); /* Exit config state. */ SMSC_WRITE(SMSC_CONFIG_EXIT, SMSC_CONFIG_REG); } #endif static void __init plat_setup_iocoherency(void) { u32 cfg; if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; pr_info("Enabled Bonito CPU coherency\n"); dma_default_coherent = true; } if (strstr(fw_getcmdline(), "iobcuncached")) { BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); pr_info("Disabled Bonito IOBC coherency\n"); } else { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG |= (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); pr_info("Enabled Bonito IOBC coherency\n"); } } else if (mips_cps_numiocu(0) != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); if (cfg & ROCIT_CONFIG_GEN0_PCI_IOCU) dma_default_coherent = true; else pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); } if (dma_default_coherent) pr_info("Hardware DMA cache coherency enabled\n"); else pr_info("Software DMA cache coherency enabled\n"); } static void __init pci_clock_check(void) { unsigned int __iomem *jmpr_p = (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int)); int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07; static const int pciclocks[] __initconst = { 33, 20, 25, 30, 12, 16, 37, 10 }; int pciclock = pciclocks[jmpr]; char *optptr, *argptr = fw_getcmdline(); /* * If user passed a pci_clock= option, don't tack on another one */ optptr = strstr(argptr, "pci_clock="); if (optptr && (optptr == argptr || optptr[-1] == ' ')) return; if (pciclock != 33) { pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n", pciclock); argptr += strlen(argptr); sprintf(argptr, " pci_clock=%d", pciclock); if (pciclock < 20 || pciclock > 66) pr_warn("WARNING: IDE timing calculations will be " "incorrect\n"); } } #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) static void __init screen_info_setup(void) { static struct screen_info si = { .orig_x = 0, .orig_y = 25, .ext_mem_k = 0, .orig_video_page = 0, .orig_video_mode = 0, .orig_video_cols = 80, .unused2 = 0, .orig_video_ega_bx = 0, .unused3 = 0, .orig_video_lines = 25, .orig_video_isVGA = VIDEO_TYPE_VGAC, .orig_video_points = 16 }; vgacon_register_screen(&si); } #endif static void __init bonito_quirks_setup(void) { char *argptr; argptr = fw_getcmdline(); if (strstr(argptr, "debug")) { BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE; pr_info("Enabled Bonito debug mode\n"); } else BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE; } void __init *plat_get_fdt(void) { return (void *)__dtb_start; } void __init plat_mem_setup(void) { unsigned int i; void *fdt = plat_get_fdt(); fdt = malta_dt_shim(fdt); __dt_setup_arch(fdt); if (IS_ENABLED(CONFIG_EVA)) /* EVA has already been configured in mach-malta/kernel-init.h */ pr_info("Enhanced Virtual Addressing (EVA) activated\n"); mips_pcibios_init(); /* Request I/O space for devices used on the Malta board. */ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) request_resource(&ioport_resource, standard_io_resources+i); /* * Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge. */ enable_dma(4); if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) bonito_quirks_setup(); plat_setup_iocoherency(); pci_clock_check(); #ifdef CONFIG_BLK_DEV_FD fd_activate(); #endif #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) screen_info_setup(); #endif }
// SPDX-License-Identifier: GPL-2.0-or-later /* Lantiq cpu temperature sensor driver * * Copyright (C) 2017 Florian Eckert <[email protected]> */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/init.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <lantiq_soc.h> /* gphy1 configuration register contains cpu temperature */ #define CGU_GPHY1_CR 0x0040 #define CGU_TEMP_PD BIT(19) static void ltq_cputemp_enable(void) { ltq_cgu_w32(ltq_cgu_r32(CGU_GPHY1_CR) | CGU_TEMP_PD, CGU_GPHY1_CR); } static void ltq_cputemp_disable(void *data) { ltq_cgu_w32(ltq_cgu_r32(CGU_GPHY1_CR) & ~CGU_TEMP_PD, CGU_GPHY1_CR); } static int ltq_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *temp) { int value; switch (attr) { case hwmon_temp_input: /* get the temperature including one decimal place */ value = (ltq_cgu_r32(CGU_GPHY1_CR) >> 9) & 0x01FF; value = value * 5; /* range -38 to +154 °C, register value zero is -38.0 °C */ value -= 380; /* scale temp to millidegree */ value = value * 100; break; default: return -EOPNOTSUPP; } *temp = value; return 0; } static umode_t ltq_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, int channel) { if (type != hwmon_temp) return 0; switch (attr) { case hwmon_temp_input: return 0444; default: return 0; } } static const struct hwmon_channel_info * const ltq_info[] = { HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), NULL }; static const struct hwmon_ops ltq_hwmon_ops = { .is_visible = ltq_is_visible, .read = ltq_read, }; static const struct hwmon_chip_info ltq_chip_info = { .ops = &ltq_hwmon_ops, .info = ltq_info, }; static int ltq_cputemp_probe(struct platform_device *pdev) { struct device *hwmon_dev; int err = 0; /* available on vr9 v1.2 SoCs only */ if (ltq_soc_type() != SOC_TYPE_VR9_2) return -ENODEV; err = devm_add_action(&pdev->dev, ltq_cputemp_disable, NULL); if (err) return err; ltq_cputemp_enable(); hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "ltq_cputemp", NULL, &ltq_chip_info, NULL); if (IS_ERR(hwmon_dev)) { dev_err(&pdev->dev, "Failed to register as hwmon device"); return PTR_ERR(hwmon_dev); } return 0; } const struct of_device_id ltq_cputemp_match[] = { { .compatible = "lantiq,cputemp" }, {}, }; MODULE_DEVICE_TABLE(of, ltq_cputemp_match); static struct platform_driver ltq_cputemp_driver = { .probe = ltq_cputemp_probe, .driver = { .name = "ltq-cputemp", .of_match_table = ltq_cputemp_match, }, }; module_platform_driver(ltq_cputemp_driver); MODULE_AUTHOR("Florian Eckert <[email protected]>"); MODULE_DESCRIPTION("Lantiq cpu temperature sensor driver"); MODULE_LICENSE("GPL");
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #include "../display_mode_lib.h" #include "display_mode_vba_20v2.h" #include "../dml_inline_defs.h" /* * NOTE: * This file is gcc-parseable HW gospel, coming straight from HW engineers. * * It doesn't adhere to Linux kernel style and sometimes will do things in odd * ways. Unless there is something clearly wrong with it the code should * remain as-is as it provides us with a guarantee from HW that it is correct. */ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 #define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, double ReturnBW, bool DCCEnabledAnyPlane, double ReturnBandwidthToDCN); static unsigned int dscceComputeDelay( unsigned int bpc, double bpp, unsigned int sliceWidth, unsigned int numSlices, enum output_format_class pixelFormat); static unsigned int dscComputeDelay(enum output_format_class pixelFormat); static bool CalculateDelayAfterScaler( struct display_mode_lib *mode_lib, double ReturnBW, double ReadBandwidthPlaneLuma, double ReadBandwidthPlaneChroma, double TotalDataReadBandwidth, double DisplayPipeLineDeliveryTimeLuma, double DisplayPipeLineDeliveryTimeChroma, double DPPCLK, double DISPCLK, double PixelClock, unsigned int DSCDelay, unsigned int DPPPerPlane, bool ScalerEnabled, unsigned int NumberOfCursors, double DPPCLKDelaySubtotal, double DPPCLKDelaySCL, double DPPCLKDelaySCLLBOnly, double DPPCLKDelayCNVCFormater, double DPPCLKDelayCNVCCursor, double DISPCLKDelaySubtotal, unsigned int ScalerRecoutWidth, enum output_format_class OutputFormat, unsigned int HTotal, unsigned int SwathWidthSingleDPPY, double BytePerPixelDETY, double BytePerPixelDETC, unsigned int SwathHeightY, unsigned int SwathHeightC, bool Interlace, bool ProgressiveToInterlaceUnitInOPP, double *DSTXAfterScaler, double *DSTYAfterScaler ); // Super monster function with some 45 argument static bool CalculatePrefetchSchedule( struct display_mode_lib *mode_lib, double DPPCLK, double DISPCLK, double PixelClock, double DCFCLKDeepSleep, unsigned int DPPPerPlane, unsigned int NumberOfCursors, unsigned int VBlank, unsigned int HTotal, unsigned int MaxInterDCNTileRepeaters, unsigned int VStartup, unsigned int PageTableLevels, bool GPUVMEnable, bool DynamicMetadataEnable, unsigned int DynamicMetadataLinesBeforeActiveRequired, unsigned int DynamicMetadataTransmittedBytes, bool DCCEnable, double UrgentLatencyPixelDataOnly, double UrgentExtraLatency, double TCalc, unsigned int PDEAndMetaPTEBytesFrame, unsigned int MetaRowByte, unsigned int PixelPTEBytesPerRow, double PrefetchSourceLinesY, unsigned int SwathWidthY, double BytePerPixelDETY, double VInitPreFillY, unsigned int MaxNumSwathY, double PrefetchSourceLinesC, double BytePerPixelDETC, double VInitPreFillC, unsigned int MaxNumSwathC, unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, bool XFCEnabled, double XFCRemoteSurfaceFlipDelay, bool InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double DSTXAfterScaler, double DSTYAfterScaler, double *DestinationLinesForPrefetch, double *PrefetchBandwidth, double *DestinationLinesToRequestVMInVBlank, double *DestinationLinesToRequestRowInVBlank, double *VRatioPrefetchY, double *VRatioPrefetchC, double *RequiredPrefetchPixDataBW, double *Tno_bw, unsigned int *VUpdateOffsetPix, double *VUpdateWidthPix, double *VReadyOffsetPix); static double RoundToDFSGranularityUp(double Clock, double VCOSpeed); static double RoundToDFSGranularityDown(double Clock, double VCOSpeed); static double CalculatePrefetchSourceLines( struct display_mode_lib *mode_lib, double VRatio, double vtaps, bool Interlace, bool ProgressiveToInterlaceUnitInOPP, unsigned int SwathHeight, unsigned int ViewportYStart, double *VInitPreFill, unsigned int *MaxNumSwath); static unsigned int CalculateVMAndRowBytes( struct display_mode_lib *mode_lib, bool DCCEnable, unsigned int BlockHeight256Bytes, unsigned int BlockWidth256Bytes, enum source_format_class SourcePixelFormat, unsigned int SurfaceTiling, unsigned int BytePerPixel, enum scan_direction_class ScanDirection, unsigned int ViewportWidth, unsigned int ViewportHeight, unsigned int SwathWidthY, bool GPUVMEnable, unsigned int VMMPageSize, unsigned int PTEBufferSizeInRequestsLuma, unsigned int PDEProcessingBufIn64KBReqs, unsigned int Pitch, unsigned int DCCMetaPitch, unsigned int *MacroTileWidth, unsigned int *MetaRowByte, unsigned int *PixelPTEBytesPerRow, bool *PTEBufferSizeNotExceeded, unsigned int *dpte_row_height, unsigned int *meta_row_height); static double CalculateTWait( unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatencyPixelDataOnly, double SREnterPlusExitTime); static double CalculateRemoteSurfaceFlipDelay( struct display_mode_lib *mode_lib, double VRatio, double SwathWidth, double Bpp, double LineTime, double XFCTSlvVupdateOffset, double XFCTSlvVupdateWidth, double XFCTSlvVreadyOffset, double XFCXBUFLatencyTolerance, double XFCFillBWOverhead, double XFCSlvChunkSize, double XFCBusTransportTime, double TCalc, double TWait, double *SrcActiveDrainRate, double *TInitXFill, double *TslvChk); static void CalculateActiveRowBandwidth( bool GPUVMEnable, enum source_format_class SourcePixelFormat, double VRatio, bool DCCEnable, double LineTime, unsigned int MetaRowByteLuma, unsigned int MetaRowByteChroma, unsigned int meta_row_height_luma, unsigned int meta_row_height_chroma, unsigned int PixelPTEBytesPerRowLuma, unsigned int PixelPTEBytesPerRowChroma, unsigned int dpte_row_height_luma, unsigned int dpte_row_height_chroma, double *meta_row_bw, double *dpte_row_bw, double *qual_row_bw); static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, double UrgentExtraLatency, double UrgentLatencyPixelDataOnly, unsigned int GPUVMMaxPageTableLevels, bool GPUVMEnable, double BandwidthAvailableForImmediateFlip, unsigned int TotImmediateFlipBytes, enum source_format_class SourcePixelFormat, unsigned int ImmediateFlipBytes, double LineTime, double VRatio, double Tno_bw, double PDEAndMetaPTEBytesFrame, unsigned int MetaRowByte, unsigned int PixelPTEBytesPerRow, bool DCCEnable, unsigned int dpte_row_height, unsigned int meta_row_height, double qual_row_bw, double *DestinationLinesToRequestVMInImmediateFlip, double *DestinationLinesToRequestRowInImmediateFlip, double *final_flip_bw, bool *ImmediateFlipSupportedForPipe); static double CalculateWriteBackDelay( enum source_format_class WritebackPixelFormat, double WritebackHRatio, double WritebackVRatio, unsigned int WritebackLumaHTaps, unsigned int WritebackLumaVTaps, unsigned int WritebackChromaHTaps, unsigned int WritebackChromaVTaps, unsigned int WritebackDestinationWidth); static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( struct display_mode_lib *mode_lib); void dml20v2_recalculate(struct display_mode_lib *mode_lib) { ModeSupportAndSystemConfiguration(mode_lib); mode_lib->vba.FabricAndDRAMBandwidth = dml_min( mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth, mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000.0; PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib); dml20v2_DisplayPipeConfiguration(mode_lib); dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib); } static double adjust_ReturnBW( struct display_mode_lib *mode_lib, double ReturnBW, bool DCCEnabledAnyPlane, double ReturnBandwidthToDCN) { double CriticalCompression; if (DCCEnabledAnyPlane && ReturnBandwidthToDCN > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0) ReturnBW = dml_min( ReturnBW, ReturnBandwidthToDCN * 4 * (1.0 - mode_lib->vba.UrgentLatencyPixelDataOnly / ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 / ReturnBandwidthToDCN - mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4) + mode_lib->vba.UrgentLatencyPixelDataOnly)); CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK * mode_lib->vba.UrgentLatencyPixelDataOnly / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly + (mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024); if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0) ReturnBW = dml_min( ReturnBW, 4.0 * ReturnBandwidthToDCN * (mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK * mode_lib->vba.UrgentLatencyPixelDataOnly / dml_pow( (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly + (mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024), 2)); return ReturnBW; } static unsigned int dscceComputeDelay( unsigned int bpc, double bpp, unsigned int sliceWidth, unsigned int numSlices, enum output_format_class pixelFormat) { // valid bpc = source bits per component in the set of {8, 10, 12} // valid bpp = increments of 1/16 of a bit // min = 6/7/8 in N420/N422/444, respectively // max = such that compression is 1:1 //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode) //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4} //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420} // fixed value unsigned int rcModelSize = 8192; // N422/N420 operate at 2 pixels per clock unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l, Delay, pixels; if (pixelFormat == dm_n422 || pixelFormat == dm_420) pixelsPerClock = 2; // #all other modes operate at 1 pixel per clock else pixelsPerClock = 1; //initial transmit delay as per PPS initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock); //compute ssm delay if (bpc == 8) D = 81; else if (bpc == 10) D = 89; else D = 113; //divide by pixel per cycle to compute slice width as seen by DSC w = sliceWidth / pixelsPerClock; //422 mode has an additional cycle of delay if (pixelFormat == dm_s422) s = 1; else s = 0; //main calculation for the dscce ix = initalXmitDelay + 45; wx = (w + 2) / 3; p = 3 * wx - w; l0 = ix / w; a = ix + p * l0; ax = (a + 2) / 3 + D + 6 + 1; l = (ax + wx - 1) / wx; if ((ix % w) == 0 && p != 0) lstall = 1; else lstall = 0; Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22; //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels pixels = Delay * 3 * pixelsPerClock; return pixels; } static unsigned int dscComputeDelay(enum output_format_class pixelFormat) { unsigned int Delay = 0; if (pixelFormat == dm_420) { // sfr Delay = Delay + 2; // dsccif Delay = Delay + 0; // dscc - input deserializer Delay = Delay + 3; // dscc gets pixels every other cycle Delay = Delay + 2; // dscc - input cdc fifo Delay = Delay + 12; // dscc gets pixels every other cycle Delay = Delay + 13; // dscc - cdc uncertainty Delay = Delay + 2; // dscc - output cdc fifo Delay = Delay + 7; // dscc gets pixels every other cycle Delay = Delay + 3; // dscc - cdc uncertainty Delay = Delay + 2; // dscc - output serializer Delay = Delay + 1; // sft Delay = Delay + 1; } else if (pixelFormat == dm_n422) { // sfr Delay = Delay + 2; // dsccif Delay = Delay + 1; // dscc - input deserializer Delay = Delay + 5; // dscc - input cdc fifo Delay = Delay + 25; // dscc - cdc uncertainty Delay = Delay + 2; // dscc - output cdc fifo Delay = Delay + 10; // dscc - cdc uncertainty Delay = Delay + 2; // dscc - output serializer Delay = Delay + 1; // sft Delay = Delay + 1; } else { // sfr Delay = Delay + 2; // dsccif Delay = Delay + 0; // dscc - input deserializer Delay = Delay + 3; // dscc - input cdc fifo Delay = Delay + 12; // dscc - cdc uncertainty Delay = Delay + 2; // dscc - output cdc fifo Delay = Delay + 7; // dscc - output serializer Delay = Delay + 1; // dscc - cdc uncertainty Delay = Delay + 2; // sft Delay = Delay + 1; } return Delay; } static bool CalculateDelayAfterScaler( struct display_mode_lib *mode_lib, double ReturnBW, double ReadBandwidthPlaneLuma, double ReadBandwidthPlaneChroma, double TotalDataReadBandwidth, double DisplayPipeLineDeliveryTimeLuma, double DisplayPipeLineDeliveryTimeChroma, double DPPCLK, double DISPCLK, double PixelClock, unsigned int DSCDelay, unsigned int DPPPerPlane, bool ScalerEnabled, unsigned int NumberOfCursors, double DPPCLKDelaySubtotal, double DPPCLKDelaySCL, double DPPCLKDelaySCLLBOnly, double DPPCLKDelayCNVCFormater, double DPPCLKDelayCNVCCursor, double DISPCLKDelaySubtotal, unsigned int ScalerRecoutWidth, enum output_format_class OutputFormat, unsigned int HTotal, unsigned int SwathWidthSingleDPPY, double BytePerPixelDETY, double BytePerPixelDETC, unsigned int SwathHeightY, unsigned int SwathHeightC, bool Interlace, bool ProgressiveToInterlaceUnitInOPP, double *DSTXAfterScaler, double *DSTYAfterScaler ) { unsigned int DPPCycles, DISPCLKCycles; double DataFabricLineDeliveryTimeLuma; double DataFabricLineDeliveryTimeChroma; double DSTTotalPixelsAfterScaler; DataFabricLineDeliveryTimeLuma = SwathWidthSingleDPPY * SwathHeightY * dml_ceil(BytePerPixelDETY, 1) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneLuma / TotalDataReadBandwidth); mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeLuma - DisplayPipeLineDeliveryTimeLuma); if (BytePerPixelDETC != 0) { DataFabricLineDeliveryTimeChroma = SwathWidthSingleDPPY / 2 * SwathHeightC * dml_ceil(BytePerPixelDETC, 2) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneChroma / TotalDataReadBandwidth); mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeChroma - DisplayPipeLineDeliveryTimeChroma); } if (ScalerEnabled) DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL; else DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly; DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor; DISPCLKCycles = DISPCLKDelaySubtotal; if (DPPCLK == 0.0 || DISPCLK == 0.0) return true; *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK + DSCDelay; if (DPPPerPlane > 1) *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth; if (OutputFormat == dm_420 || (Interlace && ProgressiveToInterlaceUnitInOPP)) *DSTYAfterScaler = 1; else *DSTYAfterScaler = 0; DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler; *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1); *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal)); return true; } static bool CalculatePrefetchSchedule( struct display_mode_lib *mode_lib, double DPPCLK, double DISPCLK, double PixelClock, double DCFCLKDeepSleep, unsigned int DPPPerPlane, unsigned int NumberOfCursors, unsigned int VBlank, unsigned int HTotal, unsigned int MaxInterDCNTileRepeaters, unsigned int VStartup, unsigned int PageTableLevels, bool GPUVMEnable, bool DynamicMetadataEnable, unsigned int DynamicMetadataLinesBeforeActiveRequired, unsigned int DynamicMetadataTransmittedBytes, bool DCCEnable, double UrgentLatencyPixelDataOnly, double UrgentExtraLatency, double TCalc, unsigned int PDEAndMetaPTEBytesFrame, unsigned int MetaRowByte, unsigned int PixelPTEBytesPerRow, double PrefetchSourceLinesY, unsigned int SwathWidthY, double BytePerPixelDETY, double VInitPreFillY, unsigned int MaxNumSwathY, double PrefetchSourceLinesC, double BytePerPixelDETC, double VInitPreFillC, unsigned int MaxNumSwathC, unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, bool XFCEnabled, double XFCRemoteSurfaceFlipDelay, bool InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double DSTXAfterScaler, double DSTYAfterScaler, double *DestinationLinesForPrefetch, double *PrefetchBandwidth, double *DestinationLinesToRequestVMInVBlank, double *DestinationLinesToRequestRowInVBlank, double *VRatioPrefetchY, double *VRatioPrefetchC, double *RequiredPrefetchPixDataBW, double *Tno_bw, unsigned int *VUpdateOffsetPix, double *VUpdateWidthPix, double *VReadyOffsetPix) { bool MyError = false; double TotalRepeaterDelayTime; double Tdm, LineTime, Tsetup; double dst_y_prefetch_equ; double Tsw_oto; double prefetch_bw_oto; double Tvm_oto; double Tr0_oto; double Tpre_oto; double dst_y_prefetch_oto; double TimeForFetchingMetaPTE = 0; double TimeForFetchingRowInVBlank = 0; double LinesToRequestPrefetchPixelData = 0; *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1); TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK); *VUpdateWidthPix = (14.0 / DCFCLKDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime) * PixelClock; *VReadyOffsetPix = dml_max( 150.0 / DPPCLK, TotalRepeaterDelayTime + 20.0 / DCFCLKDeepSleep + 10.0 / DPPCLK) * PixelClock; Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock; LineTime = (double) HTotal / PixelClock; if (DynamicMetadataEnable) { double Tdmbf, Tdmec, Tdmsks; Tdm = dml_max(0.0, UrgentExtraLatency - TCalc); Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK; Tdmec = LineTime; if (DynamicMetadataLinesBeforeActiveRequired == 0) Tdmsks = VBlank * LineTime / 2.0; else Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime; if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP) Tdmsks = Tdmsks / 2; if (VStartup * LineTime < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) { MyError = true; } } else Tdm = 0; if (GPUVMEnable) { if (PageTableLevels == 4) *Tno_bw = UrgentExtraLatency + UrgentLatencyPixelDataOnly; else if (PageTableLevels == 3) *Tno_bw = UrgentExtraLatency; else *Tno_bw = 0; } else if (DCCEnable) *Tno_bw = LineTime; else *Tno_bw = LineTime / 4; dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime - (Tsetup + Tdm) / LineTime - (DSTYAfterScaler + DSTXAfterScaler / HTotal); Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime; prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto; if (GPUVMEnable == true) { Tvm_oto = dml_max( *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto, dml_max( UrgentExtraLatency + UrgentLatencyPixelDataOnly * (PageTableLevels - 1), LineTime / 4.0)); } else Tvm_oto = LineTime / 4.0; if ((GPUVMEnable == true || DCCEnable == true)) { Tr0_oto = dml_max( (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto, dml_max(UrgentLatencyPixelDataOnly, dml_max(LineTime - Tvm_oto, LineTime / 4))); } else Tr0_oto = LineTime - Tvm_oto; Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto; dst_y_prefetch_oto = Tpre_oto / LineTime; if (dst_y_prefetch_oto < dst_y_prefetch_equ) *DestinationLinesForPrefetch = dst_y_prefetch_oto; else *DestinationLinesForPrefetch = dst_y_prefetch_equ; *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1) / 4; dml_print("DML: VStartup: %d\n", VStartup); dml_print("DML: TCalc: %f\n", TCalc); dml_print("DML: TWait: %f\n", TWait); dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay); dml_print("DML: LineTime: %f\n", LineTime); dml_print("DML: Tsetup: %f\n", Tsetup); dml_print("DML: Tdm: %f\n", Tdm); dml_print("DML: DSTYAfterScaler: %f\n", DSTYAfterScaler); dml_print("DML: DSTXAfterScaler: %f\n", DSTXAfterScaler); dml_print("DML: HTotal: %d\n", HTotal); *PrefetchBandwidth = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; if (*DestinationLinesForPrefetch > 1) { *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2)) / (*DestinationLinesForPrefetch * LineTime - *Tno_bw); if (GPUVMEnable) { TimeForFetchingMetaPTE = dml_max( *Tno_bw + (double) PDEAndMetaPTEBytesFrame / *PrefetchBandwidth, dml_max( UrgentExtraLatency + UrgentLatencyPixelDataOnly * (PageTableLevels - 1), LineTime / 4)); } else { if (NumberOfCursors > 0 || XFCEnabled) TimeForFetchingMetaPTE = LineTime / 4; else TimeForFetchingMetaPTE = 0.0; } if ((GPUVMEnable == true || DCCEnable == true)) { TimeForFetchingRowInVBlank = dml_max( (MetaRowByte + PixelPTEBytesPerRow) / *PrefetchBandwidth, dml_max( UrgentLatencyPixelDataOnly, dml_max( LineTime - TimeForFetchingMetaPTE, LineTime / 4.0))); } else { if (NumberOfCursors > 0 || XFCEnabled) TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE; else TimeForFetchingRowInVBlank = 0.0; } *DestinationLinesToRequestVMInVBlank = dml_floor( 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125), 1) / 4.0; *DestinationLinesToRequestRowInVBlank = dml_floor( 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125), 1) / 4.0; LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + *DestinationLinesToRequestRowInVBlank) : 0.0); if (LinesToRequestPrefetchPixelData > 0) { *VRatioPrefetchY = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData; *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0); if ((SwathHeightY > 4) && (VInitPreFillY > 3)) { if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) { *VRatioPrefetchY = dml_max( (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData, (double) MaxNumSwathY * SwathHeightY / (LinesToRequestPrefetchPixelData - (VInitPreFillY - 3.0) / 2.0)); *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0); } else { MyError = true; *VRatioPrefetchY = 0; } } *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData; *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0); if ((SwathHeightC > 4)) { if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) { *VRatioPrefetchC = dml_max( *VRatioPrefetchC, (double) MaxNumSwathC * SwathHeightC / (LinesToRequestPrefetchPixelData - (VInitPreFillC - 3.0) / 2.0)); *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0); } else { MyError = true; *VRatioPrefetchC = 0; } } *RequiredPrefetchPixDataBW = DPPPerPlane * ((double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * dml_ceil( BytePerPixelDETY, 1) + (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * dml_ceil( BytePerPixelDETC, 2) / 2) * SwathWidthY / LineTime; } else { MyError = true; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; } } else { MyError = true; } if (MyError) { *PrefetchBandwidth = 0; *DestinationLinesToRequestVMInVBlank = 0; *DestinationLinesToRequestRowInVBlank = 0; *DestinationLinesForPrefetch = 0; *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBW = 0; } return MyError; } static double RoundToDFSGranularityUp(double Clock, double VCOSpeed) { return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1); } static double RoundToDFSGranularityDown(double Clock, double VCOSpeed) { return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1); } static double CalculatePrefetchSourceLines( struct display_mode_lib *mode_lib, double VRatio, double vtaps, bool Interlace, bool ProgressiveToInterlaceUnitInOPP, unsigned int SwathHeight, unsigned int ViewportYStart, double *VInitPreFill, unsigned int *MaxNumSwath) { unsigned int MaxPartialSwath; if (ProgressiveToInterlaceUnitInOPP) *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1); else *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1); if (!mode_lib->vba.IgnoreViewportPositioning) { *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0; if (*VInitPreFill > 1.0) MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight; else MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2) % SwathHeight; MaxPartialSwath = dml_max(1U, MaxPartialSwath); } else { if (ViewportYStart != 0) dml_print( "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n"); *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1); if (*VInitPreFill > 1.0) MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight; else MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1) % SwathHeight; } return *MaxNumSwath * SwathHeight + MaxPartialSwath; } static unsigned int CalculateVMAndRowBytes( struct display_mode_lib *mode_lib, bool DCCEnable, unsigned int BlockHeight256Bytes, unsigned int BlockWidth256Bytes, enum source_format_class SourcePixelFormat, unsigned int SurfaceTiling, unsigned int BytePerPixel, enum scan_direction_class ScanDirection, unsigned int ViewportWidth, unsigned int ViewportHeight, unsigned int SwathWidth, bool GPUVMEnable, unsigned int VMMPageSize, unsigned int PTEBufferSizeInRequestsLuma, unsigned int PDEProcessingBufIn64KBReqs, unsigned int Pitch, unsigned int DCCMetaPitch, unsigned int *MacroTileWidth, unsigned int *MetaRowByte, unsigned int *PixelPTEBytesPerRow, bool *PTEBufferSizeNotExceeded, unsigned int *dpte_row_height, unsigned int *meta_row_height) { unsigned int MetaRequestHeight; unsigned int MetaRequestWidth; unsigned int MetaSurfWidth; unsigned int MetaSurfHeight; unsigned int MPDEBytesFrame; unsigned int MetaPTEBytesFrame; unsigned int DCCMetaSurfaceBytes; unsigned int MacroTileSizeBytes; unsigned int MacroTileHeight; unsigned int DPDE0BytesFrame; unsigned int ExtraDPDEBytesFrame; unsigned int PDEAndMetaPTEBytesFrame; if (DCCEnable == true) { MetaRequestHeight = 8 * BlockHeight256Bytes; MetaRequestWidth = 8 * BlockWidth256Bytes; if (ScanDirection == dm_horz) { *meta_row_height = MetaRequestHeight; MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth) + MetaRequestWidth; *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0; } else { *meta_row_height = MetaRequestWidth; MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight) + MetaRequestHeight; *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0; } if (ScanDirection == dm_horz) { DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes) * BytePerPixel / 256; } else { DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil( (double) ViewportHeight - 1, 64 * BlockHeight256Bytes) + 64 * BlockHeight256Bytes) * BytePerPixel / 256; } if (GPUVMEnable == true) { MetaPTEBytesFrame = (dml_ceil( (double) (DCCMetaSurfaceBytes - VMMPageSize) / (8 * VMMPageSize), 1) + 1) * 64; MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1); } else { MetaPTEBytesFrame = 0; MPDEBytesFrame = 0; } } else { MetaPTEBytesFrame = 0; MPDEBytesFrame = 0; *MetaRowByte = 0; } if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_l_vp) { MacroTileSizeBytes = 256; MacroTileHeight = BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) { MacroTileSizeBytes = 4096; MacroTileHeight = 4 * BlockHeight256Bytes; } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x || SurfaceTiling == dm_sw_64kb_r_x) { MacroTileSizeBytes = 65536; MacroTileHeight = 16 * BlockHeight256Bytes; } else { MacroTileSizeBytes = 262144; MacroTileHeight = 32 * BlockHeight256Bytes; } *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight; if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) { if (ScanDirection == dm_horz) { DPDE0BytesFrame = 64 * (dml_ceil( ((Pitch * (dml_ceil( ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1); } else { DPDE0BytesFrame = 64 * (dml_ceil( ((Pitch * (dml_ceil( (double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1); } ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2); } else { DPDE0BytesFrame = 0; ExtraDPDEBytesFrame = 0; } PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame + ExtraDPDEBytesFrame; if (GPUVMEnable == true) { unsigned int PTERequestSize; unsigned int PixelPTEReqHeight; unsigned int PixelPTEReqWidth; double FractionOfPTEReturnDrop; unsigned int EffectivePDEProcessingBufIn64KBReqs; if (SurfaceTiling == dm_sw_linear) { PixelPTEReqHeight = 1; PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel; PTERequestSize = 64; FractionOfPTEReturnDrop = 0; } else if (MacroTileSizeBytes == 4096) { PixelPTEReqHeight = MacroTileHeight; PixelPTEReqWidth = 8 * *MacroTileWidth; PTERequestSize = 64; if (ScanDirection == dm_horz) FractionOfPTEReturnDrop = 0; else FractionOfPTEReturnDrop = 7.0 / 8; } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) { PixelPTEReqHeight = 16 * BlockHeight256Bytes; PixelPTEReqWidth = 16 * BlockWidth256Bytes; PTERequestSize = 128; FractionOfPTEReturnDrop = 0; } else { PixelPTEReqHeight = MacroTileHeight; PixelPTEReqWidth = 8 * *MacroTileWidth; PTERequestSize = 64; FractionOfPTEReturnDrop = 0; } if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2; else EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs; if (SurfaceTiling == dm_sw_linear) { *dpte_row_height = dml_min( 128, 1 << (unsigned int) dml_floor( dml_log2( dml_min( (double) PTEBufferSizeInRequestsLuma * PixelPTEReqWidth, EffectivePDEProcessingBufIn64KBReqs * 65536.0 / BytePerPixel) / Pitch), 1)); *PixelPTEBytesPerRow = PTERequestSize * (dml_ceil( (double) (Pitch * *dpte_row_height - 1) / PixelPTEReqWidth, 1) + 1); } else if (ScanDirection == dm_horz) { *dpte_row_height = PixelPTEReqHeight; *PixelPTEBytesPerRow = PTERequestSize * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1) + 1); } else { *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth); *PixelPTEBytesPerRow = PTERequestSize * (dml_ceil( ((double) SwathWidth - 1) / PixelPTEReqHeight, 1) + 1); } if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop) <= 64 * PTEBufferSizeInRequestsLuma) { *PTEBufferSizeNotExceeded = true; } else { *PTEBufferSizeNotExceeded = false; } } else { *PixelPTEBytesPerRow = 0; *PTEBufferSizeNotExceeded = true; } return PDEAndMetaPTEBytesFrame; } static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( struct display_mode_lib *mode_lib) { unsigned int j, k; mode_lib->vba.WritebackDISPCLK = 0.0; mode_lib->vba.DISPCLKWithRamping = 0; mode_lib->vba.DISPCLKWithoutRamping = 0; mode_lib->vba.GlobalDPPCLK = 0.0; // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation // for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.WritebackEnable[k]) { mode_lib->vba.WritebackDISPCLK = dml_max( mode_lib->vba.WritebackDISPCLK, CalculateWriteBackDISPCLK( mode_lib->vba.WritebackPixelFormat[k], mode_lib->vba.PixelClock[k], mode_lib->vba.WritebackHRatio[k], mode_lib->vba.WritebackVRatio[k], mode_lib->vba.WritebackLumaHTaps[k], mode_lib->vba.WritebackLumaVTaps[k], mode_lib->vba.WritebackChromaHTaps[k], mode_lib->vba.WritebackChromaVTaps[k], mode_lib->vba.WritebackDestinationWidth[k], mode_lib->vba.HTotal[k], mode_lib->vba.WritebackChromaLineBufferWidth)); } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.HRatio[k] > 1) { mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput * mode_lib->vba.HRatio[k] / dml_ceil( mode_lib->vba.htaps[k] / 6.0, 1)); } else { mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput); } mode_lib->vba.DPPCLKUsingSingleDPPLuma = mode_lib->vba.PixelClock[k] * dml_max( mode_lib->vba.vtaps[k] / 6.0 * dml_min( 1.0, mode_lib->vba.HRatio[k]), dml_max( mode_lib->vba.HRatio[k] * mode_lib->vba.VRatio[k] / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k], 1.0)); if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6) && mode_lib->vba.DPPCLKUsingSingleDPPLuma < 2 * mode_lib->vba.PixelClock[k]) { mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k]; } if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0; mode_lib->vba.DPPCLKUsingSingleDPP[k] = mode_lib->vba.DPPCLKUsingSingleDPPLuma; } else { if (mode_lib->vba.HRatio[k] > 1) { mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput * mode_lib->vba.HRatio[k] / 2 / dml_ceil( mode_lib->vba.HTAPsChroma[k] / 6.0, 1.0)); } else { mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput); } mode_lib->vba.DPPCLKUsingSingleDPPChroma = mode_lib->vba.PixelClock[k] * dml_max( mode_lib->vba.VTAPsChroma[k] / 6.0 * dml_min( 1.0, mode_lib->vba.HRatio[k] / 2), dml_max( mode_lib->vba.HRatio[k] * mode_lib->vba.VRatio[k] / 4 / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k], 1.0)); if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6) && mode_lib->vba.DPPCLKUsingSingleDPPChroma < 2 * mode_lib->vba.PixelClock[k]) { mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2 * mode_lib->vba.PixelClock[k]; } mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max( mode_lib->vba.DPPCLKUsingSingleDPPLuma, mode_lib->vba.DPPCLKUsingSingleDPPChroma); } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.BlendingAndTiming[k] != k) continue; if (mode_lib->vba.ODMCombineEnabled[k]) { mode_lib->vba.DISPCLKWithRamping = dml_max( mode_lib->vba.DISPCLKWithRamping, mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100)); mode_lib->vba.DISPCLKWithoutRamping = dml_max( mode_lib->vba.DISPCLKWithoutRamping, mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100)); } else if (!mode_lib->vba.ODMCombineEnabled[k]) { mode_lib->vba.DISPCLKWithRamping = dml_max( mode_lib->vba.DISPCLKWithRamping, mode_lib->vba.PixelClock[k] * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100)); mode_lib->vba.DISPCLKWithoutRamping = dml_max( mode_lib->vba.DISPCLKWithoutRamping, mode_lib->vba.PixelClock[k] * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100)); } } mode_lib->vba.DISPCLKWithRamping = dml_max( mode_lib->vba.DISPCLKWithRamping, mode_lib->vba.WritebackDISPCLK); mode_lib->vba.DISPCLKWithoutRamping = dml_max( mode_lib->vba.DISPCLKWithoutRamping, mode_lib->vba.WritebackDISPCLK); ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0); mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp( mode_lib->vba.DISPCLKWithRamping, mode_lib->vba.DISPCLKDPPCLKVCOSpeed); mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp( mode_lib->vba.DISPCLKWithoutRamping, mode_lib->vba.DISPCLKDPPCLKVCOSpeed); mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states].dispclk_mhz, mode_lib->vba.DISPCLKDPPCLKVCOSpeed); if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) { mode_lib->vba.DISPCLK_calculated = mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity; } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) { mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity; } else { mode_lib->vba.DISPCLK_calculated = mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity; } DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated); for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.DPPPerPlane[k] == 0) { mode_lib->vba.DPPCLK_calculated[k] = 0; } else { mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k] / mode_lib->vba.DPPPerPlane[k] * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100); } mode_lib->vba.GlobalDPPCLK = dml_max( mode_lib->vba.GlobalDPPCLK, mode_lib->vba.DPPCLK_calculated[k]); } mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp( mode_lib->vba.GlobalDPPCLK, mode_lib->vba.DISPCLKDPPCLKVCOSpeed); for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255 * dml_ceil( mode_lib->vba.DPPCLK_calculated[k] * 255 / mode_lib->vba.GlobalDPPCLK, 1); DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]); } // Urgent Watermark mode_lib->vba.DCCEnabledAnyPlane = false; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) if (mode_lib->vba.DCCEnable[k]) mode_lib->vba.DCCEnabledAnyPlane = true; mode_lib->vba.ReturnBandwidthToDCN = dml_min( mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK, mode_lib->vba.FabricAndDRAMBandwidth * 1000) * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN; mode_lib->vba.ReturnBW = adjust_ReturnBW( mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.DCCEnabledAnyPlane, mode_lib->vba.ReturnBandwidthToDCN); // Let's do this calculation again?? mode_lib->vba.ReturnBandwidthToDCN = dml_min( mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK, mode_lib->vba.FabricAndDRAMBandwidth * 1000); mode_lib->vba.ReturnBW = adjust_ReturnBW( mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.DCCEnabledAnyPlane, mode_lib->vba.ReturnBandwidthToDCN); DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK); DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN); DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW); for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { bool MainPlaneDoesODMCombine = false; if (mode_lib->vba.SourceScan[k] == dm_horz) mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k]; else mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) MainPlaneDoesODMCombine = true; if (MainPlaneDoesODMCombine == true) mode_lib->vba.SwathWidthY[k] = dml_min( (double) mode_lib->vba.SwathWidthSingleDPPY[k], dml_round( mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])); else { if (mode_lib->vba.DPPPerPlane[k] == 0) { mode_lib->vba.SwathWidthY[k] = 0; } else { mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k] / mode_lib->vba.DPPPerPlane[k]; } } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { mode_lib->vba.BytePerPixelDETY[k] = 8; mode_lib->vba.BytePerPixelDETC[k] = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { mode_lib->vba.BytePerPixelDETY[k] = 4; mode_lib->vba.BytePerPixelDETC[k] = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { mode_lib->vba.BytePerPixelDETY[k] = 2; mode_lib->vba.BytePerPixelDETC[k] = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) { mode_lib->vba.BytePerPixelDETY[k] = 1; mode_lib->vba.BytePerPixelDETC[k] = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { mode_lib->vba.BytePerPixelDETY[k] = 1; mode_lib->vba.BytePerPixelDETC[k] = 2; } else { // dm_420_10 mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0; mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0; } } mode_lib->vba.TotalDataReadBandwidth = 0.0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k] * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]; mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k] / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2) / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2; DTRACE( " read_bw[%i] = %fBps", k, mode_lib->vba.ReadBandwidthPlaneLuma[k] + mode_lib->vba.ReadBandwidthPlaneChroma[k]); mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k] + mode_lib->vba.ReadBandwidthPlaneChroma[k]; } mode_lib->vba.TotalDCCActiveDPP = 0; mode_lib->vba.TotalActiveDPP = 0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + mode_lib->vba.DPPPerPlane[k]; if (mode_lib->vba.DCCEnable[k]) mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + mode_lib->vba.DPPPerPlane[k]; } mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency = (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK + mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly * mode_lib->vba.NumberOfChannels / mode_lib->vba.ReturnBW; mode_lib->vba.LastPixelOfLineExtraWatermark = 0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.VRatio[k] <= 1.0) mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] = (double) mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k] / mode_lib->vba.HRatio[k] / mode_lib->vba.PixelClock[k]; else mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] = (double) mode_lib->vba.SwathWidthY[k] / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] / mode_lib->vba.DPPCLK[k]; if (mode_lib->vba.BytePerPixelDETC[k] == 0) mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0; else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = mode_lib->vba.SwathWidthY[k] / 2.0 * mode_lib->vba.DPPPerPlane[k] / (mode_lib->vba.HRatio[k] / 2.0) / mode_lib->vba.PixelClock[k]; else mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = mode_lib->vba.SwathWidthY[k] / 2.0 / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] / mode_lib->vba.DPPCLK[k]; } mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte + mode_lib->vba.TotalDCCActiveDPP * mode_lib->vba.MetaChunkSize) * 1024.0 / mode_lib->vba.ReturnBW; if (mode_lib->vba.GPUVMEnable) mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP * mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBW; mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatencyPixelDataOnly + mode_lib->vba.LastPixelOfLineExtraWatermark + mode_lib->vba.UrgentExtraLatency; DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency); DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark); mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; mode_lib->vba.TotalActiveWriteback = 0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.WritebackEnable[k]) mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k]; } if (mode_lib->vba.TotalActiveWriteback <= 1) mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency; else mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency + mode_lib->vba.WritebackChunkSize * 1024.0 / 32 / mode_lib->vba.SOCCLK; DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark); // NB P-State/DRAM Clock Change Watermark mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency + mode_lib->vba.UrgentWatermark; DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark); DTRACE(" calculating wb pstate watermark"); DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback); DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK); if (mode_lib->vba.TotalActiveWriteback <= 1) mode_lib->vba.WritebackDRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency + mode_lib->vba.WritebackLatency; else mode_lib->vba.WritebackDRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency + mode_lib->vba.WritebackLatency + mode_lib->vba.WritebackChunkSize * 1024.0 / 32 / mode_lib->vba.SOCCLK; DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark); // Stutter Efficiency for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k] / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k]; mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor( mode_lib->vba.LinesInDETY[k], mode_lib->vba.SwathHeightY[k]); mode_lib->vba.FullDETBufferingTimeY[k] = mode_lib->vba.LinesInDETYRoundedDownToSwath[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) / mode_lib->vba.VRatio[k]; if (mode_lib->vba.BytePerPixelDETC[k] > 0) { mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k] / mode_lib->vba.BytePerPixelDETC[k] / (mode_lib->vba.SwathWidthY[k] / 2); mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor( mode_lib->vba.LinesInDETC[k], mode_lib->vba.SwathHeightC[k]); mode_lib->vba.FullDETBufferingTimeC[k] = mode_lib->vba.LinesInDETCRoundedDownToSwath[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) / (mode_lib->vba.VRatio[k] / 2); } else { mode_lib->vba.LinesInDETC[k] = 0; mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0; mode_lib->vba.FullDETBufferingTimeC[k] = 999999; } } mode_lib->vba.MinFullDETBufferingTime = 999999.0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.FullDETBufferingTimeY[k] < mode_lib->vba.MinFullDETBufferingTime) { mode_lib->vba.MinFullDETBufferingTime = mode_lib->vba.FullDETBufferingTimeY[k]; mode_lib->vba.FrameTimeForMinFullDETBufferingTime = (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; } if (mode_lib->vba.FullDETBufferingTimeC[k] < mode_lib->vba.MinFullDETBufferingTime) { mode_lib->vba.MinFullDETBufferingTime = mode_lib->vba.FullDETBufferingTimeC[k]; mode_lib->vba.FrameTimeForMinFullDETBufferingTime = (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; } } mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.DCCEnable[k]) { mode_lib->vba.AverageReadBandwidthGBytePerSecond = mode_lib->vba.AverageReadBandwidthGBytePerSecond + mode_lib->vba.ReadBandwidthPlaneLuma[k] / mode_lib->vba.DCCRate[k] / 1000 + mode_lib->vba.ReadBandwidthPlaneChroma[k] / mode_lib->vba.DCCRate[k] / 1000; } else { mode_lib->vba.AverageReadBandwidthGBytePerSecond = mode_lib->vba.AverageReadBandwidthGBytePerSecond + mode_lib->vba.ReadBandwidthPlaneLuma[k] / 1000 + mode_lib->vba.ReadBandwidthPlaneChroma[k] / 1000; } if (mode_lib->vba.DCCEnable[k]) { mode_lib->vba.AverageReadBandwidthGBytePerSecond = mode_lib->vba.AverageReadBandwidthGBytePerSecond + mode_lib->vba.ReadBandwidthPlaneLuma[k] / 1000 / 256 + mode_lib->vba.ReadBandwidthPlaneChroma[k] / 1000 / 256; } if (mode_lib->vba.GPUVMEnable) { mode_lib->vba.AverageReadBandwidthGBytePerSecond = mode_lib->vba.AverageReadBandwidthGBytePerSecond + mode_lib->vba.ReadBandwidthPlaneLuma[k] / 1000 / 512 + mode_lib->vba.ReadBandwidthPlaneChroma[k] / 1000 / 512; } } mode_lib->vba.PartOfBurstThatFitsInROB = dml_min( mode_lib->vba.MinFullDETBufferingTime * mode_lib->vba.TotalDataReadBandwidth, mode_lib->vba.ROBBufferSizeInKByte * 1024 * mode_lib->vba.TotalDataReadBandwidth / (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)); mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000) / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW + (mode_lib->vba.MinFullDETBufferingTime * mode_lib->vba.TotalDataReadBandwidth - mode_lib->vba.PartOfBurstThatFitsInROB) / (mode_lib->vba.DCFCLK * 64); if (mode_lib->vba.TotalActiveWriteback == 0) { mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1 - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime) / mode_lib->vba.MinFullDETBufferingTime) * 100; } else { mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0; } mode_lib->vba.SmallestVBlank = 999999; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; } else { mode_lib->vba.VBlankTime = 0; } mode_lib->vba.SmallestVBlank = dml_min( mode_lib->vba.SmallestVBlank, mode_lib->vba.VBlankTime); } mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100 * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime - mode_lib->vba.SmallestVBlank) + mode_lib->vba.SmallestVBlank) / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100; // dml_ml->vba.DCFCLK Deep Sleep mode_lib->vba.DCFCLKDeepSleep = 8.0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) { if (mode_lib->vba.BytePerPixelDETC[k] > 0) { mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max( 1.1 * mode_lib->vba.SwathWidthY[k] * dml_ceil( mode_lib->vba.BytePerPixelDETY[k], 1) / 32 / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0 * dml_ceil( mode_lib->vba.BytePerPixelDETC[k], 2) / 32 / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]); } else mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * mode_lib->vba.SwathWidthY[k] * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0 / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]; mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max( mode_lib->vba.DCFCLKDeepSleepPerPlane[k], mode_lib->vba.PixelClock[k] / 16.0); mode_lib->vba.DCFCLKDeepSleep = dml_max( mode_lib->vba.DCFCLKDeepSleep, mode_lib->vba.DCFCLKDeepSleepPerPlane[k]); DTRACE( " dcfclk_deepsleep_per_plane[%i] = %fMHz", k, mode_lib->vba.DCFCLKDeepSleepPerPlane[k]); } DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFCLKDeepSleep); // Stutter Watermark mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFCLKDeepSleep; mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime + mode_lib->vba.LastPixelOfLineExtraWatermark + mode_lib->vba.UrgentExtraLatency; DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark); DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark); // Urgent Latency Supported for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.EffectiveDETPlusLBLinesLuma = dml_floor( mode_lib->vba.LinesInDETY[k] + dml_min( mode_lib->vba.LinesInDETY[k] * mode_lib->vba.DPPCLK[k] * mode_lib->vba.BytePerPixelDETY[k] * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] / (mode_lib->vba.ReturnBW / mode_lib->vba.DPPPerPlane[k]), (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma), mode_lib->vba.SwathHeightY[k]); mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) / mode_lib->vba.VRatio[k] - mode_lib->vba.EffectiveDETPlusLBLinesLuma * mode_lib->vba.SwathWidthY[k] * mode_lib->vba.BytePerPixelDETY[k] / (mode_lib->vba.ReturnBW / mode_lib->vba.DPPPerPlane[k]); if (mode_lib->vba.BytePerPixelDETC[k] > 0) { mode_lib->vba.EffectiveDETPlusLBLinesChroma = dml_floor( mode_lib->vba.LinesInDETC[k] + dml_min( mode_lib->vba.LinesInDETC[k] * mode_lib->vba.DPPCLK[k] * mode_lib->vba.BytePerPixelDETC[k] * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] / (mode_lib->vba.ReturnBW / mode_lib->vba.DPPPerPlane[k]), (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma), mode_lib->vba.SwathHeightC[k]); mode_lib->vba.UrgentLatencySupportUsChroma = mode_lib->vba.EffectiveDETPlusLBLinesChroma * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) / (mode_lib->vba.VRatio[k] / 2) - mode_lib->vba.EffectiveDETPlusLBLinesChroma * (mode_lib->vba.SwathWidthY[k] / 2) * mode_lib->vba.BytePerPixelDETC[k] / (mode_lib->vba.ReturnBW / mode_lib->vba.DPPPerPlane[k]); mode_lib->vba.UrgentLatencySupportUs[k] = dml_min( mode_lib->vba.UrgentLatencySupportUsLuma, mode_lib->vba.UrgentLatencySupportUsChroma); } else { mode_lib->vba.UrgentLatencySupportUs[k] = mode_lib->vba.UrgentLatencySupportUsLuma; } } mode_lib->vba.MinUrgentLatencySupportUs = 999999; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.MinUrgentLatencySupportUs = dml_min( mode_lib->vba.MinUrgentLatencySupportUs, mode_lib->vba.UrgentLatencySupportUs[k]); } // Non-Urgent Latency Tolerance mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs - mode_lib->vba.UrgentWatermark; // DSCCLK for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) { mode_lib->vba.DSCCLK_calculated[k] = 0.0; } else { if (mode_lib->vba.OutputFormat[k] == dm_420 || mode_lib->vba.OutputFormat[k] == dm_n422) mode_lib->vba.DSCFormatFactor = 2; else mode_lib->vba.DSCFormatFactor = 1; if (mode_lib->vba.ODMCombineEnabled[k]) mode_lib->vba.DSCCLK_calculated[k] = mode_lib->vba.PixelClockBackEnd[k] / 6 / mode_lib->vba.DSCFormatFactor / (1 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100); else mode_lib->vba.DSCCLK_calculated[k] = mode_lib->vba.PixelClockBackEnd[k] / 3 / mode_lib->vba.DSCFormatFactor / (1 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100); } } // DSC Delay // TODO for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { double bpp = mode_lib->vba.OutputBpp[k]; unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k]; if (mode_lib->vba.DSCEnabled[k] && bpp != 0) { if (!mode_lib->vba.ODMCombineEnabled[k]) { mode_lib->vba.DSCDelay[k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], bpp, dml_ceil( (double) mode_lib->vba.HActive[k] / mode_lib->vba.NumberOfDSCSlices[k], 1), slices, mode_lib->vba.OutputFormat[k]) + dscComputeDelay( mode_lib->vba.OutputFormat[k]); } else { mode_lib->vba.DSCDelay[k] = 2 * (dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], bpp, dml_ceil( (double) mode_lib->vba.HActive[k] / mode_lib->vba.NumberOfDSCSlices[k], 1), slices / 2.0, mode_lib->vba.OutputFormat[k]) + dscComputeDelay( mode_lib->vba.OutputFormat[k])); } mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k]; } else { mode_lib->vba.DSCDelay[k] = 0; } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes if (j != k && mode_lib->vba.BlendingAndTiming[k] == j && mode_lib->vba.DSCEnabled[j]) mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j]; // Prefetch for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { unsigned int PDEAndMetaPTEBytesFrameY; unsigned int PixelPTEBytesPerRowY; unsigned int MetaRowByteY; unsigned int MetaRowByteC; unsigned int PDEAndMetaPTEBytesFrameC; unsigned int PixelPTEBytesPerRowC; Calculate256BBlockSizes( mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2), &mode_lib->vba.BlockHeight256BytesY[k], &mode_lib->vba.BlockHeight256BytesC[k], &mode_lib->vba.BlockWidth256BytesY[k], &mode_lib->vba.BlockWidth256BytesC[k]); PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes( mode_lib, mode_lib->vba.DCCEnable[k], mode_lib->vba.BlockHeight256BytesY[k], mode_lib->vba.BlockWidth256BytesY[k], mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), mode_lib->vba.SourceScan[k], mode_lib->vba.ViewportWidth[k], mode_lib->vba.ViewportHeight[k], mode_lib->vba.SwathWidthY[k], mode_lib->vba.GPUVMEnable, mode_lib->vba.VMMPageSize, mode_lib->vba.PTEBufferSizeInRequestsLuma, mode_lib->vba.PDEProcessingBufIn64KBReqs, mode_lib->vba.PitchY[k], mode_lib->vba.DCCMetaPitchY[k], &mode_lib->vba.MacroTileWidthY[k], &MetaRowByteY, &PixelPTEBytesPerRowY, &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.SwathHeightY[k], mode_lib->vba.ViewportYStartY[k], &mode_lib->vba.VInitPreFillY[k], &mode_lib->vba.MaxNumSwathY[k]); if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) { PDEAndMetaPTEBytesFrameC = CalculateVMAndRowBytes( mode_lib, mode_lib->vba.DCCEnable[k], mode_lib->vba.BlockHeight256BytesC[k], mode_lib->vba.BlockWidth256BytesC[k], mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil( mode_lib->vba.BytePerPixelDETC[k], 2), mode_lib->vba.SourceScan[k], mode_lib->vba.ViewportWidth[k] / 2, mode_lib->vba.ViewportHeight[k] / 2, mode_lib->vba.SwathWidthY[k] / 2, mode_lib->vba.GPUVMEnable, mode_lib->vba.VMMPageSize, mode_lib->vba.PTEBufferSizeInRequestsLuma, mode_lib->vba.PDEProcessingBufIn64KBReqs, mode_lib->vba.PitchC[k], 0, &mode_lib->vba.MacroTileWidthC[k], &MetaRowByteC, &PixelPTEBytesPerRowC, &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2, mode_lib->vba.VTAPsChroma[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.SwathHeightC[k], mode_lib->vba.ViewportYStartC[k], &mode_lib->vba.VInitPreFillC[k], &mode_lib->vba.MaxNumSwathC[k]); } else { PixelPTEBytesPerRowC = 0; PDEAndMetaPTEBytesFrameC = 0; MetaRowByteC = 0; mode_lib->vba.MaxNumSwathC[k] = 0; mode_lib->vba.PrefetchSourceLinesC[k] = 0; } mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC; mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + PDEAndMetaPTEBytesFrameC; mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.VRatio[k], mode_lib->vba.DCCEnable[k], mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], MetaRowByteY, MetaRowByteC, mode_lib->vba.meta_row_height[k], mode_lib->vba.meta_row_height_chroma[k], PixelPTEBytesPerRowY, PixelPTEBytesPerRowC, mode_lib->vba.dpte_row_height[k], mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_bw[k], &mode_lib->vba.dpte_row_bw[k], &mode_lib->vba.qual_row_bw[k]); } mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.WritebackEnable[k] == true) { mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = mode_lib->vba.WritebackLatency + CalculateWriteBackDelay( mode_lib->vba.WritebackPixelFormat[k], mode_lib->vba.WritebackHRatio[k], mode_lib->vba.WritebackVRatio[k], mode_lib->vba.WritebackLumaHTaps[k], mode_lib->vba.WritebackLumaVTaps[k], mode_lib->vba.WritebackChromaHTaps[k], mode_lib->vba.WritebackChromaVTaps[k], mode_lib->vba.WritebackDestinationWidth[k]) / mode_lib->vba.DISPCLK; } else mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0; for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[j] == k && mode_lib->vba.WritebackEnable[j] == true) { mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = dml_max( mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k], mode_lib->vba.WritebackLatency + CalculateWriteBackDelay( mode_lib->vba.WritebackPixelFormat[j], mode_lib->vba.WritebackHRatio[j], mode_lib->vba.WritebackVRatio[j], mode_lib->vba.WritebackLumaHTaps[j], mode_lib->vba.WritebackLumaVTaps[j], mode_lib->vba.WritebackChromaHTaps[j], mode_lib->vba.WritebackChromaVTaps[j], mode_lib->vba.WritebackDestinationWidth[j]) / mode_lib->vba.DISPCLK); } } } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) if (mode_lib->vba.BlendingAndTiming[k] == j) mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j]; mode_lib->vba.VStartupLines = 13; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.MaxVStartupLines[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max( 1.0, dml_ceil( mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1)); } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) mode_lib->vba.MaximumMaxVStartupLines = dml_max( mode_lib->vba.MaximumMaxVStartupLines, mode_lib->vba.MaxVStartupLines[k]); for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.cursor_bw[k] = 0.0; for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j) mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j] * mode_lib->vba.CursorBPP[k][j] / 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]; } do { double MaxTotalRDBandwidth = 0; bool DestinationLineTimesForPrefetchLessThan2 = false; bool VRatioPrefetchMoreThan4 = false; bool prefetch_vm_bw_valid = true; bool prefetch_row_bw_valid = true; double TWait = CalculateTWait( mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb], mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.SREnterPlusExitTime); for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.XFCEnabled[k] == true) { mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.SwathWidthY[k], dml_ceil( mode_lib->vba.BytePerPixelDETY[k], 1), mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.XFCTSlvVupdateOffset, mode_lib->vba.XFCTSlvVupdateWidth, mode_lib->vba.XFCTSlvVreadyOffset, mode_lib->vba.XFCXBUFLatencyTolerance, mode_lib->vba.XFCFillBWOverhead, mode_lib->vba.XFCSlvChunkSize, mode_lib->vba.XFCBusTransportTime, mode_lib->vba.TCalc, TWait, &mode_lib->vba.SrcActiveDrainRate, &mode_lib->vba.TInitXFill, &mode_lib->vba.TslvChk); } else { mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0; } CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.ReadBandwidthPlaneLuma[k], mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.TotalDataReadBandwidth, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], mode_lib->vba.DPPCLK[k], mode_lib->vba.DISPCLK, mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelay[k], mode_lib->vba.DPPPerPlane[k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, mode_lib->vba.SwathWidthY[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k], mode_lib->vba.SwathWidthSingleDPPY[k], mode_lib->vba.BytePerPixelDETY[k], mode_lib->vba.BytePerPixelDETC[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]); mode_lib->vba.ErrorResult[k] = CalculatePrefetchSchedule( mode_lib, mode_lib->vba.DPPCLK[k], mode_lib->vba.DISPCLK, mode_lib->vba.PixelClock[k], mode_lib->vba.DCFCLKDeepSleep, mode_lib->vba.DPPPerPlane[k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, dml_min( mode_lib->vba.VStartupLines, mode_lib->vba.MaxVStartupLines[k]), mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], mode_lib->vba.DynamicMetadataTransmittedBytes[k], mode_lib->vba.DCCEnable[k], mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.UrgentExtraLatency, mode_lib->vba.TCalc, mode_lib->vba.PDEAndMetaPTEBytesFrame[k], mode_lib->vba.MetaRowByte[k], mode_lib->vba.PixelPTEBytesPerRow[k], mode_lib->vba.PrefetchSourceLinesY[k], mode_lib->vba.SwathWidthY[k], mode_lib->vba.BytePerPixelDETY[k], mode_lib->vba.VInitPreFillY[k], mode_lib->vba.MaxNumSwathY[k], mode_lib->vba.PrefetchSourceLinesC[k], mode_lib->vba.BytePerPixelDETC[k], mode_lib->vba.VInitPreFillC[k], mode_lib->vba.MaxNumSwathC[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], TWait, mode_lib->vba.XFCEnabled[k], mode_lib->vba.XFCRemoteSurfaceFlipDelay, mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.DSTXAfterScaler[k], mode_lib->vba.DSTYAfterScaler[k], &mode_lib->vba.DestinationLinesForPrefetch[k], &mode_lib->vba.PrefetchBandwidth[k], &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k], &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k], &mode_lib->vba.VRatioPrefetchY[k], &mode_lib->vba.VRatioPrefetchC[k], &mode_lib->vba.RequiredPrefetchPixDataBWLuma[k], &mode_lib->vba.Tno_bw[k], &mode_lib->vba.VUpdateOffsetPix[k], &mode_lib->vba.VUpdateWidthPix[k], &mode_lib->vba.VReadyOffsetPix[k]); if (mode_lib->vba.BlendingAndTiming[k] == k) { mode_lib->vba.VStartup[k] = dml_min( mode_lib->vba.VStartupLines, mode_lib->vba.MaxVStartupLines[k]); if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata != 0) { mode_lib->vba.VStartup[k] = mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata; } } else { mode_lib->vba.VStartup[k] = dml_min( mode_lib->vba.VStartupLines, mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]); } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0) mode_lib->vba.prefetch_vm_bw[k] = 0; else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) { mode_lib->vba.prefetch_vm_bw[k] = (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k] / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); } else { mode_lib->vba.prefetch_vm_bw[k] = 0; prefetch_vm_bw_valid = false; } if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k] == 0) mode_lib->vba.prefetch_row_bw[k] = 0; else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) { mode_lib->vba.prefetch_row_bw[k] = (double) (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]) / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); } else { mode_lib->vba.prefetch_row_bw[k] = 0; prefetch_row_bw_valid = false; } MaxTotalRDBandwidth = MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k] + dml_max( mode_lib->vba.prefetch_vm_bw[k], dml_max( mode_lib->vba.prefetch_row_bw[k], dml_max( mode_lib->vba.ReadBandwidthPlaneLuma[k] + mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.RequiredPrefetchPixDataBWLuma[k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k])); if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2) DestinationLineTimesForPrefetchLessThan2 = true; if (mode_lib->vba.VRatioPrefetchY[k] > 4 || mode_lib->vba.VRatioPrefetchC[k] > 4) VRatioPrefetchMoreThan4 = true; } if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4 && !DestinationLineTimesForPrefetchLessThan2) mode_lib->vba.PrefetchModeSupported = true; else { mode_lib->vba.PrefetchModeSupported = false; dml_print( "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n"); } if (mode_lib->vba.PrefetchModeSupported == true) { double final_flip_bw[DC__NUM_DPP__MAX]; unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX]; double total_dcn_read_bw_with_flip = 0; mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip - mode_lib->vba.cursor_bw[k] - dml_max( mode_lib->vba.ReadBandwidthPlaneLuma[k] + mode_lib->vba.ReadBandwidthPlaneChroma[k] + mode_lib->vba.qual_row_bw[k], mode_lib->vba.PrefetchBandwidth[k]); } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { ImmediateFlipBytes[k] = 0; if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { ImmediateFlipBytes[k] = mode_lib->vba.PDEAndMetaPTEBytesFrame[k] + mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]; } } mode_lib->vba.TotImmediateFlipBytes = 0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes + ImmediateFlipBytes[k]; } } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { CalculateFlipSchedule( mode_lib, mode_lib->vba.UrgentExtraLatency, mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.BandwidthAvailableForImmediateFlip, mode_lib->vba.TotImmediateFlipBytes, mode_lib->vba.SourcePixelFormat[k], ImmediateFlipBytes[k], mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], mode_lib->vba.PDEAndMetaPTEBytesFrame[k], mode_lib->vba.MetaRowByte[k], mode_lib->vba.PixelPTEBytesPerRow[k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], mode_lib->vba.qual_row_bw[k], &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k], &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k], &final_flip_bw[k], &mode_lib->vba.ImmediateFlipSupportedForPipe[k]); } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { total_dcn_read_bw_with_flip = total_dcn_read_bw_with_flip + mode_lib->vba.cursor_bw[k] + dml_max( mode_lib->vba.prefetch_vm_bw[k], dml_max( mode_lib->vba.prefetch_row_bw[k], final_flip_bw[k] + dml_max( mode_lib->vba.ReadBandwidthPlaneLuma[k] + mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.RequiredPrefetchPixDataBWLuma[k]))); } mode_lib->vba.ImmediateFlipSupported = true; if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) { mode_lib->vba.ImmediateFlipSupported = false; } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) { mode_lib->vba.ImmediateFlipSupported = false; } } } else { mode_lib->vba.ImmediateFlipSupported = false; } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.ErrorResult[k]) { mode_lib->vba.PrefetchModeSupported = false; dml_print( "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n"); } } mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1; } while (!((mode_lib->vba.PrefetchModeSupported && (!mode_lib->vba.ImmediateFlipSupport || mode_lib->vba.ImmediateFlipSupported)) || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines)); //Display Pipeline Delivery Time in Prefetch for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.VRatioPrefetchY[k] <= 1) { mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k] / mode_lib->vba.HRatio[k] / mode_lib->vba.PixelClock[k]; } else { mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->vba.SwathWidthY[k] / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] / mode_lib->vba.DPPCLK[k]; } if (mode_lib->vba.BytePerPixelDETC[k] == 0) { mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0; } else { if (mode_lib->vba.VRatioPrefetchC[k] <= 1) { mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k] / mode_lib->vba.HRatio[k] / mode_lib->vba.PixelClock[k]; } else { mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->vba.SwathWidthY[k] / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] / mode_lib->vba.DPPCLK[k]; } } } // Min TTUVBlank for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true; mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true; mode_lib->vba.MinTTUVBlank[k] = dml_max( mode_lib->vba.DRAMClockChangeWatermark, dml_max( mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)); } else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) { mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false; mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true; mode_lib->vba.MinTTUVBlank[k] = dml_max( mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark); } else { mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false; mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false; mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark; } if (!mode_lib->vba.DynamicMetadataEnable[k]) mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc + mode_lib->vba.MinTTUVBlank[k]; } // DCC Configuration mode_lib->vba.ActiveDPPs = 0; // NB P-State/DRAM Clock Change Support for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k]; } for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { double EffectiveLBLatencyHidingY; double EffectiveLBLatencyHidingC; double DPPOutputBufferLinesY; double DPPOutputBufferLinesC; double DPPOPPBufferingY; double MaxDETBufferingTimeY; double ActiveDRAMClockChangeLatencyMarginY; mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min( mode_lib->vba.MaxLineBufferLines, (unsigned int) dml_floor( (double) mode_lib->vba.LineBufferSize / mode_lib->vba.LBBitPerPixel[k] / (mode_lib->vba.SwathWidthY[k] / dml_max( mode_lib->vba.HRatio[k], 1.0)), 1)) - (mode_lib->vba.vtaps[k] - 1); mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min( mode_lib->vba.MaxLineBufferLines, (unsigned int) dml_floor( (double) mode_lib->vba.LineBufferSize / mode_lib->vba.LBBitPerPixel[k] / (mode_lib->vba.SwathWidthY[k] / 2.0 / dml_max( mode_lib->vba.HRatio[k] / 2, 1.0)), 1)) - (mode_lib->vba.VTAPsChroma[k] - 1); EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / mode_lib->vba.VRatio[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / (mode_lib->vba.VRatio[k] / 2) * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) { DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels / mode_lib->vba.SwathWidthY[k]; } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) { DPPOutputBufferLinesY = 0.5; } else { DPPOutputBufferLinesY = 1; } if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) { DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels / (mode_lib->vba.SwathWidthY[k] / 2); } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) { DPPOutputBufferLinesC = 0.5; } else { DPPOutputBufferLinesC = 1; } DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines); MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k] + (mode_lib->vba.LinesInDETY[k] - mode_lib->vba.LinesInDETYRoundedDownToSwath[k]) / mode_lib->vba.SwathHeightY[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark; if (mode_lib->vba.ActiveDPPs > 1) { ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1)) * mode_lib->vba.SwathHeightY[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); } if (mode_lib->vba.BytePerPixelDETC[k] > 0) { double DPPOPPBufferingC = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * (DPPOutputBufferLinesC + mode_lib->vba.OPPOutputBufferLines); double MaxDETBufferingTimeC = mode_lib->vba.FullDETBufferingTimeC[k] + (mode_lib->vba.LinesInDETC[k] - mode_lib->vba.LinesInDETCRoundedDownToSwath[k]) / mode_lib->vba.SwathHeightC[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC - mode_lib->vba.DRAMClockChangeWatermark; if (mode_lib->vba.ActiveDPPs > 1) { ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1)) * mode_lib->vba.SwathHeightC[k] * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); } mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min( ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC); } else { mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY; } if (mode_lib->vba.WritebackEnable[k]) { double WritebackDRAMClockChangeLatencyMargin; if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { WritebackDRAMClockChangeLatencyMargin = (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize + mode_lib->vba.WritebackInterfaceChromaBufferSize) / (mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * 4) - mode_lib->vba.WritebackDRAMClockChangeWatermark; } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { WritebackDRAMClockChangeLatencyMargin = dml_min( (double) mode_lib->vba.WritebackInterfaceLumaBufferSize * 8.0 / 10, 2.0 * mode_lib->vba.WritebackInterfaceChromaBufferSize * 8 / 10) / (mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])) - mode_lib->vba.WritebackDRAMClockChangeWatermark; } else { WritebackDRAMClockChangeLatencyMargin = dml_min( (double) mode_lib->vba.WritebackInterfaceLumaBufferSize, 2.0 * mode_lib->vba.WritebackInterfaceChromaBufferSize) / (mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])) - mode_lib->vba.WritebackDRAMClockChangeWatermark; } mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min( mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin); } } { float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999; int PlaneWithMinActiveDRAMClockChangeMargin = -1; mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) { mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]; if (mode_lib->vba.BlendingAndTiming[k] == k) { PlaneWithMinActiveDRAMClockChangeMargin = k; } else { for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j) { PlaneWithMinActiveDRAMClockChangeMargin = j; } } } } } mode_lib->vba.MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k)) && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) { SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]; } } if (mode_lib->vba.DRAMClockChangeSupportsVActive && mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { mode_lib->vba.DRAMClockChangeWatermark += 25; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { if (mode_lib->vba.DRAMClockChangeWatermark > dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) mode_lib->vba.MinTTUVBlank[k] += 25; } } mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else if (mode_lib->vba.DummyPStateCheck && mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if ((mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1 || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 && mode_lib->vba.AllowDramClockChangeOneDisplayVactive)) && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported; } } } else { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported; } } } for (k = 0; k <= mode_lib->vba.soc.num_states; k++) for (j = 0; j < 2; j++) mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0]; //XFC Parameters: for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.XFCEnabled[k] == true) { double TWait; mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset; mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth; mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset; TWait = CalculateTWait( mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb], mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.SREnterPlusExitTime); mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.SwathWidthY[k], dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.XFCTSlvVupdateOffset, mode_lib->vba.XFCTSlvVupdateWidth, mode_lib->vba.XFCTSlvVreadyOffset, mode_lib->vba.XFCXBUFLatencyTolerance, mode_lib->vba.XFCFillBWOverhead, mode_lib->vba.XFCSlvChunkSize, mode_lib->vba.XFCBusTransportTime, mode_lib->vba.TCalc, TWait, &mode_lib->vba.SrcActiveDrainRate, &mode_lib->vba.TInitXFill, &mode_lib->vba.TslvChk); mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = dml_floor( mode_lib->vba.XFCRemoteSurfaceFlipDelay / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1); mode_lib->vba.XFCTransferDelay[k] = dml_ceil( mode_lib->vba.XFCBusTransportTime / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1); mode_lib->vba.XFCPrechargeDelay[k] = dml_ceil( (mode_lib->vba.XFCBusTransportTime + mode_lib->vba.TInitXFill + mode_lib->vba.TslvChk) / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1); mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance * mode_lib->vba.SrcActiveDrainRate; mode_lib->vba.FinalFillMargin = (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]) * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k] * mode_lib->vba.SrcActiveDrainRate + mode_lib->vba.XFCFillConstant; mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay * mode_lib->vba.SrcActiveDrainRate + mode_lib->vba.FinalFillMargin; mode_lib->vba.RemainingFillLevel = dml_max( 0.0, mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel); mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel / (mode_lib->vba.SrcActiveDrainRate * mode_lib->vba.XFCFillBWOverhead / 100); mode_lib->vba.XFCPrefetchMargin[k] = mode_lib->vba.XFCRemoteSurfaceFlipDelay + mode_lib->vba.TFinalxFill + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]) * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; } else { mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0; mode_lib->vba.XFCSlaveVupdateWidth[k] = 0; mode_lib->vba.XFCSlaveVReadyOffset[k] = 0; mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0; mode_lib->vba.XFCPrechargeDelay[k] = 0; mode_lib->vba.XFCTransferDelay[k] = 0; mode_lib->vba.XFCPrefetchMargin[k] = 0; } } { unsigned int VStartupMargin = 0; bool FirstMainPlane = true; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k) { unsigned int Margin = (mode_lib->vba.MaxVStartupLines[k] - mode_lib->vba.VStartup[k]) * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; if (FirstMainPlane) { VStartupMargin = Margin; FirstMainPlane = false; } else VStartupMargin = dml_min(VStartupMargin, Margin); } if (mode_lib->vba.UseMaximumVStartup) { if (mode_lib->vba.VTotal_Max[k] == mode_lib->vba.VTotal[k]) { //only use max vstart if it is not drr or lateflip. mode_lib->vba.VStartup[k] = mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]; } } } } } static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) { double BytePerPixDETY; double BytePerPixDETC; double Read256BytesBlockHeightY; double Read256BytesBlockHeightC; double Read256BytesBlockWidthY; double Read256BytesBlockWidthC; double MaximumSwathHeightY; double MaximumSwathHeightC; double MinimumSwathHeightY; double MinimumSwathHeightC; double SwathWidth; double SwathWidthGranularityY; double SwathWidthGranularityC; double RoundedUpMaxSwathSizeBytesY; double RoundedUpMaxSwathSizeBytesC; unsigned int j, k; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { bool MainPlaneDoesODMCombine = false; if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { BytePerPixDETY = 8; BytePerPixDETC = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { BytePerPixDETY = 4; BytePerPixDETC = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { BytePerPixDETY = 2; BytePerPixDETC = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) { BytePerPixDETY = 1; BytePerPixDETC = 0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { BytePerPixDETY = 1; BytePerPixDETC = 2; } else { BytePerPixDETY = 4.0 / 3.0; BytePerPixDETC = 8.0 / 3.0; } if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { Read256BytesBlockHeightY = 1; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { Read256BytesBlockHeightY = 4; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { Read256BytesBlockHeightY = 8; } else { Read256BytesBlockHeightY = 16; } Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1) / Read256BytesBlockHeightY; Read256BytesBlockHeightC = 0; Read256BytesBlockWidthC = 0; } else { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { Read256BytesBlockHeightY = 1; Read256BytesBlockHeightC = 1; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { Read256BytesBlockHeightY = 16; Read256BytesBlockHeightC = 8; } else { Read256BytesBlockHeightY = 8; Read256BytesBlockHeightC = 8; } Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1) / Read256BytesBlockHeightY; Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2) / Read256BytesBlockHeightC; } if (mode_lib->vba.SourceScan[k] == dm_horz) { MaximumSwathHeightY = Read256BytesBlockHeightY; MaximumSwathHeightC = Read256BytesBlockHeightC; } else { MaximumSwathHeightY = Read256BytesBlockWidthY; MaximumSwathHeightC = Read256BytesBlockWidthC; } if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64 && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_s_x) && mode_lib->vba.SourceScan[k] == dm_horz)) { MinimumSwathHeightY = MaximumSwathHeightY; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8 && mode_lib->vba.SourceScan[k] != dm_horz) { MinimumSwathHeightY = MaximumSwathHeightY; } else { MinimumSwathHeightY = MaximumSwathHeightY / 2.0; } MinimumSwathHeightC = MaximumSwathHeightC; } else { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { MinimumSwathHeightY = MaximumSwathHeightY; MinimumSwathHeightC = MaximumSwathHeightC; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8 && mode_lib->vba.SourceScan[k] == dm_horz) { MinimumSwathHeightY = MaximumSwathHeightY / 2.0; MinimumSwathHeightC = MaximumSwathHeightC; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10 && mode_lib->vba.SourceScan[k] == dm_horz) { MinimumSwathHeightC = MaximumSwathHeightC / 2.0; MinimumSwathHeightY = MaximumSwathHeightY; } else { MinimumSwathHeightY = MaximumSwathHeightY; MinimumSwathHeightC = MaximumSwathHeightC; } } if (mode_lib->vba.SourceScan[k] == dm_horz) { SwathWidth = mode_lib->vba.ViewportWidth[k]; } else { SwathWidth = mode_lib->vba.ViewportHeight[k]; } if (mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { if (mode_lib->vba.BlendingAndTiming[k] == j && mode_lib->vba.ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) { MainPlaneDoesODMCombine = true; } } if (MainPlaneDoesODMCombine == true) { SwathWidth = dml_min( SwathWidth, mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]); } else { if (mode_lib->vba.DPPPerPlane[k] == 0) SwathWidth = 0; else SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k]; } SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY; RoundedUpMaxSwathSizeBytesY = (dml_ceil( (double) (SwathWidth - 1), SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY * MaximumSwathHeightY; if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) { RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256) + 256; } if (MaximumSwathHeightC > 0) { SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2) / MaximumSwathHeightC; RoundedUpMaxSwathSizeBytesC = (dml_ceil( (double) (SwathWidth / 2.0 - 1), SwathWidthGranularityC) + SwathWidthGranularityC) * BytePerPixDETC * MaximumSwathHeightC; if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) { RoundedUpMaxSwathSizeBytesC = dml_ceil( RoundedUpMaxSwathSizeBytesC, 256) + 256; } } else RoundedUpMaxSwathSizeBytesC = 0.0; if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC <= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) { mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY; mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC; } else { mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY; mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC; } if (mode_lib->vba.SwathHeightC[k] == 0) { mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024; mode_lib->vba.DETBufferSizeC[k] = 0; } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) { mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; } else { mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 * 2 / 3; mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 3; } } } static double CalculateTWait( unsigned int PrefetchMode, double DRAMClockChangeLatency, double UrgentLatencyPixelDataOnly, double SREnterPlusExitTime) { if (PrefetchMode == 0) { return dml_max( DRAMClockChangeLatency + UrgentLatencyPixelDataOnly, dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly)); } else if (PrefetchMode == 1) { return dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly); } else { return UrgentLatencyPixelDataOnly; } } static double CalculateRemoteSurfaceFlipDelay( struct display_mode_lib *mode_lib, double VRatio, double SwathWidth, double Bpp, double LineTime, double XFCTSlvVupdateOffset, double XFCTSlvVupdateWidth, double XFCTSlvVreadyOffset, double XFCXBUFLatencyTolerance, double XFCFillBWOverhead, double XFCSlvChunkSize, double XFCBusTransportTime, double TCalc, double TWait, double *SrcActiveDrainRate, double *TInitXFill, double *TslvChk) { double TSlvSetup, AvgfillRate, result; *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime; TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset; *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100); AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100); *TslvChk = XFCSlvChunkSize / AvgfillRate; dml_print( "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n", *SrcActiveDrainRate); dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup); dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill); dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate); dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk); result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result); return result; } static double CalculateWriteBackDelay( enum source_format_class WritebackPixelFormat, double WritebackHRatio, double WritebackVRatio, unsigned int WritebackLumaHTaps, unsigned int WritebackLumaVTaps, unsigned int WritebackChromaHTaps, unsigned int WritebackChromaVTaps, unsigned int WritebackDestinationWidth) { double CalculateWriteBackDelay = dml_max( dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio, WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1) * dml_ceil( WritebackDestinationWidth / 4.0, 1) + dml_ceil(1.0 / WritebackVRatio, 1) * (dml_ceil( WritebackLumaVTaps / 4.0, 1) + 4)); if (WritebackPixelFormat != dm_444_32) { CalculateWriteBackDelay = dml_max( CalculateWriteBackDelay, dml_max( dml_ceil( WritebackChromaHTaps / 2.0, 1) / (2 * WritebackHRatio), WritebackChromaVTaps * dml_ceil( 1 / (2 * WritebackVRatio), 1) * dml_ceil( WritebackDestinationWidth / 2.0 / 2.0, 1) + dml_ceil( 1 / (2 * WritebackVRatio), 1) * (dml_ceil( WritebackChromaVTaps / 4.0, 1) + 4))); } return CalculateWriteBackDelay; } static void CalculateActiveRowBandwidth( bool GPUVMEnable, enum source_format_class SourcePixelFormat, double VRatio, bool DCCEnable, double LineTime, unsigned int MetaRowByteLuma, unsigned int MetaRowByteChroma, unsigned int meta_row_height_luma, unsigned int meta_row_height_chroma, unsigned int PixelPTEBytesPerRowLuma, unsigned int PixelPTEBytesPerRowChroma, unsigned int dpte_row_height_luma, unsigned int dpte_row_height_chroma, double *meta_row_bw, double *dpte_row_bw, double *qual_row_bw) { if (DCCEnable != true) { *meta_row_bw = 0; } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime) + VRatio / 2 * MetaRowByteChroma / (meta_row_height_chroma * LineTime); } else { *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime); } if (GPUVMEnable != true) { *dpte_row_bw = 0; } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime) + VRatio / 2 * PixelPTEBytesPerRowChroma / (dpte_row_height_chroma * LineTime); } else { *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime); } if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) { *qual_row_bw = *meta_row_bw + *dpte_row_bw; } else { *qual_row_bw = 0; } } static void CalculateFlipSchedule( struct display_mode_lib *mode_lib, double UrgentExtraLatency, double UrgentLatencyPixelDataOnly, unsigned int GPUVMMaxPageTableLevels, bool GPUVMEnable, double BandwidthAvailableForImmediateFlip, unsigned int TotImmediateFlipBytes, enum source_format_class SourcePixelFormat, unsigned int ImmediateFlipBytes, double LineTime, double VRatio, double Tno_bw, double PDEAndMetaPTEBytesFrame, unsigned int MetaRowByte, unsigned int PixelPTEBytesPerRow, bool DCCEnable, unsigned int dpte_row_height, unsigned int meta_row_height, double qual_row_bw, double *DestinationLinesToRequestVMInImmediateFlip, double *DestinationLinesToRequestRowInImmediateFlip, double *final_flip_bw, bool *ImmediateFlipSupportedForPipe) { double min_row_time = 0.0; if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { *DestinationLinesToRequestVMInImmediateFlip = 0.0; *DestinationLinesToRequestRowInImmediateFlip = 0.0; *final_flip_bw = qual_row_bw; *ImmediateFlipSupportedForPipe = true; } else { double TimeForFetchingMetaPTEImmediateFlip; double TimeForFetchingRowInVBlankImmediateFlip; if (GPUVMEnable == true) { mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip * ImmediateFlipBytes / TotImmediateFlipBytes; TimeForFetchingMetaPTEImmediateFlip = dml_max( Tno_bw + PDEAndMetaPTEBytesFrame / mode_lib->vba.ImmediateFlipBW[0], dml_max( UrgentExtraLatency + UrgentLatencyPixelDataOnly * (GPUVMMaxPageTableLevels - 1), LineTime / 4.0)); } else { TimeForFetchingMetaPTEImmediateFlip = 0; } *DestinationLinesToRequestVMInImmediateFlip = dml_floor( 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125), 1) / 4.0; if ((GPUVMEnable == true || DCCEnable == true)) { mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip * ImmediateFlipBytes / TotImmediateFlipBytes; TimeForFetchingRowInVBlankImmediateFlip = dml_max( (MetaRowByte + PixelPTEBytesPerRow) / mode_lib->vba.ImmediateFlipBW[0], dml_max(UrgentLatencyPixelDataOnly, LineTime / 4.0)); } else { TimeForFetchingRowInVBlankImmediateFlip = 0; } *DestinationLinesToRequestRowInImmediateFlip = dml_floor( 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125), 1) / 4.0; if (GPUVMEnable == true) { *final_flip_bw = dml_max( PDEAndMetaPTEBytesFrame / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowByte + PixelPTEBytesPerRow) / (TimeForFetchingRowInVBlankImmediateFlip * LineTime)); } else if (MetaRowByte + PixelPTEBytesPerRow > 0) { *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow) / (TimeForFetchingRowInVBlankImmediateFlip * LineTime); } else { *final_flip_bw = 0; } if (GPUVMEnable && !DCCEnable) min_row_time = dpte_row_height * LineTime / VRatio; else if (!GPUVMEnable && DCCEnable) min_row_time = meta_row_height * LineTime / VRatio; else min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime / VRatio; if (*DestinationLinesToRequestVMInImmediateFlip >= 8 || *DestinationLinesToRequestRowInImmediateFlip >= 16 || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) *ImmediateFlipSupportedForPipe = false; else *ImmediateFlipSupportedForPipe = true; } } static unsigned int TruncToValidBPP( double DecimalBPP, double DesiredBPP, bool DSCEnabled, enum output_encoder_class Output, enum output_format_class Format, unsigned int DSCInputBitPerComponent) { if (Output == dm_hdmi) { if (Format == dm_420) { if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) return 18; else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15)) return 15; else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12)) return 12; else return BPP_INVALID; } else if (Format == dm_444) { if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36)) return 36; else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30)) return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) return 18; else return BPP_INVALID; } else { if (DecimalBPP / 1.5 >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; else if (DecimalBPP / 1.5 >= 20 && (DesiredBPP == 0 || DesiredBPP == 20)) return 20; else if (DecimalBPP / 1.5 >= 16 && (DesiredBPP == 0 || DesiredBPP == 16)) return 16; else return BPP_INVALID; } } else { if (DSCEnabled) { if (Format == dm_420) { if (DesiredBPP == 0) { if (DecimalBPP < 6) return BPP_INVALID; else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1.0 / 16.0) return 1.5 * DSCInputBitPerComponent - 1.0 / 16.0; else return dml_floor(16 * DecimalBPP, 1) / 16.0; } else { if (DecimalBPP < 6 || DesiredBPP < 6 || DesiredBPP > 1.5 * DSCInputBitPerComponent - 1.0 / 16.0 || DecimalBPP < DesiredBPP) { return BPP_INVALID; } else { return DesiredBPP; } } } else if (Format == dm_n422) { if (DesiredBPP == 0) { if (DecimalBPP < 7) return BPP_INVALID; else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1.0 / 16.0) return 2 * DSCInputBitPerComponent - 1.0 / 16.0; else return dml_floor(16 * DecimalBPP, 1) / 16.0; } else { if (DecimalBPP < 7 || DesiredBPP < 7 || DesiredBPP > 2 * DSCInputBitPerComponent - 1.0 / 16.0 || DecimalBPP < DesiredBPP) { return BPP_INVALID; } else { return DesiredBPP; } } } else { if (DesiredBPP == 0) { if (DecimalBPP < 8) return BPP_INVALID; else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1.0 / 16.0) return 3 * DSCInputBitPerComponent - 1.0 / 16.0; else return dml_floor(16 * DecimalBPP, 1) / 16.0; } else { if (DecimalBPP < 8 || DesiredBPP < 8 || DesiredBPP > 3 * DSCInputBitPerComponent - 1.0 / 16.0 || DecimalBPP < DesiredBPP) { return BPP_INVALID; } else { return DesiredBPP; } } } } else if (Format == dm_420) { if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) return 18; else if (DecimalBPP >= 15 && (DesiredBPP == 0 || DesiredBPP == 15)) return 15; else if (DecimalBPP >= 12 && (DesiredBPP == 0 || DesiredBPP == 12)) return 12; else return BPP_INVALID; } else if (Format == dm_s422 || Format == dm_n422) { if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; else if (DecimalBPP >= 20 && (DesiredBPP == 0 || DesiredBPP == 20)) return 20; else if (DecimalBPP >= 16 && (DesiredBPP == 0 || DesiredBPP == 16)) return 16; else return BPP_INVALID; } else { if (DecimalBPP >= 36 && (DesiredBPP == 0 || DesiredBPP == 36)) return 36; else if (DecimalBPP >= 30 && (DesiredBPP == 0 || DesiredBPP == 30)) return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) return 18; else return BPP_INVALID; } } } void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *locals = &mode_lib->vba; int i; unsigned int j, k, m; /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ /*Scale Ratio, taps Support Check*/ mode_lib->vba.ScaleRatioAndTapsSupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.ScalerEnabled[k] == false && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) || mode_lib->vba.HRatio[k] != 1.0 || mode_lib->vba.htaps[k] != 1.0 || mode_lib->vba.VRatio[k] != 1.0 || mode_lib->vba.vtaps[k] != 1.0)) { mode_lib->vba.ScaleRatioAndTapsSupport = false; } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0 || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0 || (mode_lib->vba.htaps[k] > 1.0 && (mode_lib->vba.htaps[k] % 2) == 1) || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k] || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k] || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8 && (mode_lib->vba.HRatio[k] / 2.0 > mode_lib->vba.HTAPsChroma[k] || mode_lib->vba.VRatio[k] / 2.0 > mode_lib->vba.VTAPsChroma[k]))) { mode_lib->vba.ScaleRatioAndTapsSupport = false; } } /*Source Format, Pixel Format and Scan Support Check*/ mode_lib->vba.SourceFormatPixelAndScanSupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear && mode_lib->vba.SourceScan[k] != dm_horz) || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x) && mode_lib->vba.SourcePixelFormat[k] != dm_444_64) || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8 || mode_lib->vba.SourcePixelFormat[k] == dm_420_8 || mode_lib->vba.SourcePixelFormat[k] == dm_420_10)) || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl || mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_l_vp) && !((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32) && mode_lib->vba.SourceScan[k] == dm_horz && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp == true && mode_lib->vba.DCCEnable[k] == false)) || (mode_lib->vba.DCCEnable[k] == true && (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear || mode_lib->vba.SourcePixelFormat[k] == dm_420_8 || mode_lib->vba.SourcePixelFormat[k] == dm_420_10)))) { mode_lib->vba.SourceFormatPixelAndScanSupport = false; } } /*Bandwidth Support Check*/ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { locals->BytePerPixelInDETY[k] = 8.0; locals->BytePerPixelInDETC[k] = 0.0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { locals->BytePerPixelInDETY[k] = 4.0; locals->BytePerPixelInDETC[k] = 0.0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) { locals->BytePerPixelInDETY[k] = 2.0; locals->BytePerPixelInDETC[k] = 0.0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) { locals->BytePerPixelInDETY[k] = 1.0; locals->BytePerPixelInDETC[k] = 0.0; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { locals->BytePerPixelInDETY[k] = 1.0; locals->BytePerPixelInDETC[k] = 2.0; } else { locals->BytePerPixelInDETY[k] = 4.0 / 3; locals->BytePerPixelInDETC[k] = 8.0 / 3; } if (mode_lib->vba.SourceScan[k] == dm_horz) { locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k]; } else { locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k]; } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0) / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]; locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0) / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0; locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k]; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * 4.0; } else if (mode_lib->vba.WritebackEnable[k] == true && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * 3.0; } else if (mode_lib->vba.WritebackEnable[k] == true) { locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] * mode_lib->vba.WritebackDestinationHeight[k] / (mode_lib->vba.WritebackSourceHeight[k] * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * 1.5; } else { locals->WriteBandwidth[k] = 0.0; } } mode_lib->vba.DCCEnabledInAnyPlane = false; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.DCCEnable[k] == true) { mode_lib->vba.DCCEnabledInAnyPlane = true; } } mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->FabricAndDRAMBandwidthPerState[i] = dml_min( mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth, mode_lib->vba.FabricClockPerState[i] * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000; locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000) * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; locals->ReturnBWPerState[i][0] = locals->ReturnBWToDCNPerState; if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) + locals->UrgentLatency))); } locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2)); } locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) + locals->UrgentLatency))); } locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { locals->ReturnBWPerState[i][0] = dml_min(locals->ReturnBWPerState[i][0], 4 * locals->ReturnBWToDCNPerState * (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2)); } } /*Writeback Latency support check*/ mode_lib->vba.WritebackLatencySupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true) { if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { if (locals->WriteBandwidth[k] > (mode_lib->vba.WritebackInterfaceLumaBufferSize + mode_lib->vba.WritebackInterfaceChromaBufferSize) / mode_lib->vba.WritebackLatency) { mode_lib->vba.WritebackLatencySupport = false; } } else { if (locals->WriteBandwidth[k] > 1.5 * dml_min( mode_lib->vba.WritebackInterfaceLumaBufferSize, 2.0 * mode_lib->vba.WritebackInterfaceChromaBufferSize) / mode_lib->vba.WritebackLatency) { mode_lib->vba.WritebackLatencySupport = false; } } } } /*Re-ordering Buffer Support Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i][0]; if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i][0] > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { locals->ROBSupport[i][0] = true; } else { locals->ROBSupport[i][0] = false; } } /*Writeback Mode Support Check*/ mode_lib->vba.TotalNumberOfActiveWriteback = 0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true) { if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0) mode_lib->vba.ActiveWritebacksPerPlane[k] = 1; mode_lib->vba.TotalNumberOfActiveWriteback = mode_lib->vba.TotalNumberOfActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k]; } } mode_lib->vba.WritebackModeSupport = true; if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) { mode_lib->vba.WritebackModeSupport = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true && mode_lib->vba.Writeback10bpc420Supported != true && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { mode_lib->vba.WritebackModeSupport = false; } } /*Writeback Scale Ratio and Taps Support Check*/ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true) { if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false && (mode_lib->vba.WritebackHRatio[k] != 1.0 || mode_lib->vba.WritebackVRatio[k] != 1.0)) { mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; } if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio || mode_lib->vba.WritebackVRatio[k] > mode_lib->vba.WritebackMaxVSCLRatio || mode_lib->vba.WritebackHRatio[k] < mode_lib->vba.WritebackMinHSCLRatio || mode_lib->vba.WritebackVRatio[k] < mode_lib->vba.WritebackMinVSCLRatio || mode_lib->vba.WritebackLumaHTaps[k] > mode_lib->vba.WritebackMaxHSCLTaps || mode_lib->vba.WritebackLumaVTaps[k] > mode_lib->vba.WritebackMaxVSCLTaps || mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackLumaHTaps[k] || mode_lib->vba.WritebackVRatio[k] > mode_lib->vba.WritebackLumaVTaps[k] || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0 && ((mode_lib->vba.WritebackLumaHTaps[k] % 2) == 1)) || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32 && (mode_lib->vba.WritebackChromaHTaps[k] > mode_lib->vba.WritebackMaxHSCLTaps || mode_lib->vba.WritebackChromaVTaps[k] > mode_lib->vba.WritebackMaxVSCLTaps || 2.0 * mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackChromaHTaps[k] || 2.0 * mode_lib->vba.WritebackVRatio[k] > mode_lib->vba.WritebackChromaVTaps[k] || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0 && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) { mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; } if (mode_lib->vba.WritebackVRatio[k] < 1.0) { mode_lib->vba.WritebackLumaVExtra = dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0); } else { mode_lib->vba.WritebackLumaVExtra = -1; } if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32 && mode_lib->vba.WritebackLumaVTaps[k] > (mode_lib->vba.WritebackLineBufferLumaBufferSize + mode_lib->vba.WritebackLineBufferChromaBufferSize) / 3.0 / mode_lib->vba.WritebackDestinationWidth[k] - mode_lib->vba.WritebackLumaVExtra) || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8 && mode_lib->vba.WritebackLumaVTaps[k] > mode_lib->vba.WritebackLineBufferLumaBufferSize * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] - mode_lib->vba.WritebackLumaVExtra) || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10 && mode_lib->vba.WritebackLumaVTaps[k] > mode_lib->vba.WritebackLineBufferLumaBufferSize * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] - mode_lib->vba.WritebackLumaVExtra)) { mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; } if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) { mode_lib->vba.WritebackChromaVExtra = 0.0; } else { mode_lib->vba.WritebackChromaVExtra = -1; } if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8 && mode_lib->vba.WritebackChromaVTaps[k] > mode_lib->vba.WritebackLineBufferChromaBufferSize * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] - mode_lib->vba.WritebackChromaVExtra) || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10 && mode_lib->vba.WritebackChromaVTaps[k] > mode_lib->vba.WritebackLineBufferChromaBufferSize * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] - mode_lib->vba.WritebackChromaVExtra)) { mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; } } } /*Maximum DISPCLK/DPPCLK Support check*/ mode_lib->vba.WritebackRequiredDISPCLK = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.WritebackEnable[k] == true) { mode_lib->vba.WritebackRequiredDISPCLK = dml_max( mode_lib->vba.WritebackRequiredDISPCLK, CalculateWriteBackDISPCLK( mode_lib->vba.WritebackPixelFormat[k], mode_lib->vba.PixelClock[k], mode_lib->vba.WritebackHRatio[k], mode_lib->vba.WritebackVRatio[k], mode_lib->vba.WritebackLumaHTaps[k], mode_lib->vba.WritebackLumaVTaps[k], mode_lib->vba.WritebackChromaHTaps[k], mode_lib->vba.WritebackChromaVTaps[k], mode_lib->vba.WritebackDestinationWidth[k], mode_lib->vba.HTotal[k], mode_lib->vba.WritebackChromaLineBufferWidth)); } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.HRatio[k] > 1.0) { locals->PSCL_FACTOR[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput * mode_lib->vba.HRatio[k] / dml_ceil( mode_lib->vba.htaps[k] / 6.0, 1.0)); } else { locals->PSCL_FACTOR[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput); } if (locals->BytePerPixelInDETC[k] == 0.0) { locals->PSCL_FACTOR_CHROMA[k] = 0.0; locals->MinDPPCLKUsingSingleDPP[k] = mode_lib->vba.PixelClock[k] * dml_max3( mode_lib->vba.vtaps[k] / 6.0 * dml_min( 1.0, mode_lib->vba.HRatio[k]), mode_lib->vba.HRatio[k] * mode_lib->vba.VRatio[k] / locals->PSCL_FACTOR[k], 1.0); if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0) && locals->MinDPPCLKUsingSingleDPP[k] < 2.0 * mode_lib->vba.PixelClock[k]) { locals->MinDPPCLKUsingSingleDPP[k] = 2.0 * mode_lib->vba.PixelClock[k]; } } else { if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) { locals->PSCL_FACTOR_CHROMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput * mode_lib->vba.HRatio[k] / 2.0 / dml_ceil( mode_lib->vba.HTAPsChroma[k] / 6.0, 1.0)); } else { locals->PSCL_FACTOR_CHROMA[k] = dml_min( mode_lib->vba.MaxDCHUBToPSCLThroughput, mode_lib->vba.MaxPSCLToLBThroughput); } locals->MinDPPCLKUsingSingleDPP[k] = mode_lib->vba.PixelClock[k] * dml_max5( mode_lib->vba.vtaps[k] / 6.0 * dml_min( 1.0, mode_lib->vba.HRatio[k]), mode_lib->vba.HRatio[k] * mode_lib->vba.VRatio[k] / locals->PSCL_FACTOR[k], mode_lib->vba.VTAPsChroma[k] / 6.0 * dml_min( 1.0, mode_lib->vba.HRatio[k] / 2.0), mode_lib->vba.HRatio[k] * mode_lib->vba.VRatio[k] / 4.0 / locals->PSCL_FACTOR_CHROMA[k], 1.0); if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0 || mode_lib->vba.HTAPsChroma[k] > 6.0 || mode_lib->vba.VTAPsChroma[k] > 6.0) && locals->MinDPPCLKUsingSingleDPP[k] < 2.0 * mode_lib->vba.PixelClock[k]) { locals->MinDPPCLKUsingSingleDPP[k] = 2.0 * mode_lib->vba.PixelClock[k]; } } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { Calculate256BBlockSizes( mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil(locals->BytePerPixelInDETY[k], 1.0), dml_ceil(locals->BytePerPixelInDETC[k], 2.0), &locals->Read256BlockHeightY[k], &locals->Read256BlockHeightC[k], &locals->Read256BlockWidthY[k], &locals->Read256BlockWidthC[k]); if (mode_lib->vba.SourceScan[k] == dm_horz) { locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k]; locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k]; } else { locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k]; locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k]; } if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16 || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64 && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_s || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_s_x) && mode_lib->vba.SourceScan[k] == dm_horz)) { locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; } else { locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k] / 2.0; } locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; } else { if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8 && mode_lib->vba.SourceScan[k] == dm_horz) { locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k] / 2.0; locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10 && mode_lib->vba.SourceScan[k] == dm_horz) { locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k] / 2.0; locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; } else { locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; } } if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { mode_lib->vba.MaximumSwathWidthSupport = 8192.0; } else { mode_lib->vba.MaximumSwathWidthSupport = 5120.0; } mode_lib->vba.MaximumSwathWidthInDETBuffer = dml_min( mode_lib->vba.MaximumSwathWidthSupport, mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0 / (locals->BytePerPixelInDETY[k] * locals->MinSwathHeightY[k] + locals->BytePerPixelInDETC[k] / 2.0 * locals->MinSwathHeightC[k])); if (locals->BytePerPixelInDETC[k] == 0.0) { mode_lib->vba.MaximumSwathWidthInLineBuffer = mode_lib->vba.LineBufferSize * dml_max(mode_lib->vba.HRatio[k], 1.0) / mode_lib->vba.LBBitPerPixel[k] / (mode_lib->vba.vtaps[k] + dml_max( dml_ceil( mode_lib->vba.VRatio[k], 1.0) - 2, 0.0)); } else { mode_lib->vba.MaximumSwathWidthInLineBuffer = dml_min( mode_lib->vba.LineBufferSize * dml_max( mode_lib->vba.HRatio[k], 1.0) / mode_lib->vba.LBBitPerPixel[k] / (mode_lib->vba.vtaps[k] + dml_max( dml_ceil( mode_lib->vba.VRatio[k], 1.0) - 2, 0.0)), 2.0 * mode_lib->vba.LineBufferSize * dml_max( mode_lib->vba.HRatio[k] / 2.0, 1.0) / mode_lib->vba.LBBitPerPixel[k] / (mode_lib->vba.VTAPsChroma[k] + dml_max( dml_ceil( mode_lib->vba.VRatio[k] / 2.0, 1.0) - 2, 0.0))); } locals->MaximumSwathWidth[k] = dml_min( mode_lib->vba.MaximumSwathWidthInDETBuffer, mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], mode_lib->vba.DISPCLKDPPCLKVCOSpeed); for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], mode_lib->vba.DISPCLKDPPCLKVCOSpeed); mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDppclk[i], mode_lib->vba.DISPCLKDPPCLKVCOSpeed); locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0); if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i] && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k] * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0); if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i] && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; if (mode_lib->vba.ODMCapability) { if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; } } if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); } else { locals->NoOfDPP[i][j][k] = 2; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0; } locals->RequiredDISPCLK[i][j] = dml_max( locals->RequiredDISPCLK[i][j], mode_lib->vba.PlaneRequiredDISPCLK); if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity) || (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) { locals->DISPCLK_DPPCLK_Support[i][j] = false; } } locals->TotalNumberOfActiveDPP[i][j] = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; if (j == 1) { while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP && locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) { double BWOfNonSplitPlaneOfMaximumBandwidth; unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth; BWOfNonSplitPlaneOfMaximumBandwidth = 0; NumberOfNonSplitPlaneOfMaximumBandwidth = 0; for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) { BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k]; NumberOfNonSplitPlaneOfMaximumBandwidth = k; } } locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2; locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth] * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2; locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1; } } if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) { locals->RequiredDISPCLK[i][j] = 0.0; locals->DISPCLK_DPPCLK_Support[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { locals->NoOfDPP[i][j][k] = 1; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); } else { locals->NoOfDPP[i][j][k] = 2; locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0; } if (i != mode_lib->vba.soc.num_states) { mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0); } else { mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); } locals->RequiredDISPCLK[i][j] = dml_max( locals->RequiredDISPCLK[i][j], mode_lib->vba.PlaneRequiredDISPCLK); if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity || mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) locals->DISPCLK_DPPCLK_Support[i][j] = false; } locals->TotalNumberOfActiveDPP[i][j] = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; } locals->RequiredDISPCLK[i][j] = dml_max( locals->RequiredDISPCLK[i][j], mode_lib->vba.WritebackRequiredDISPCLK); if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity < mode_lib->vba.WritebackRequiredDISPCLK) { locals->DISPCLK_DPPCLK_Support[i][j] = false; } } } /*Viewport Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->ViewportSizeSupport[i][0] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) > locals->MaximumSwathWidth[k]) { locals->ViewportSizeSupport[i][0] = false; } } else { if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { locals->ViewportSizeSupport[i][0] = false; } } } } /*Total Available Pipes Support Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP) locals->TotalAvailablePipesSupport[i][j] = true; else locals->TotalAvailablePipesSupport[i][j] = false; } } /*Total Available OTG Support Check*/ mode_lib->vba.TotalNumberOfActiveOTG = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1.0; } } if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) { mode_lib->vba.NumberOfOTGSupport = true; } else { mode_lib->vba.NumberOfOTGSupport = false; } /*Display IO and DSC Support Check*/ mode_lib->vba.NonsupportedDSCInputBPC = false; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0 || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0 || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) { mode_lib->vba.NonsupportedDSCInputBPC = true; } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->RequiresDSC[i][k] = 0; locals->RequiresFEC[i][k] = 0; if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.Output[k] == dm_hdmi) { locals->RequiresDSC[i][k] = 0; locals->RequiresFEC[i][k] = 0; locals->OutputBppPerState[i][k] = TruncToValidBPP( dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, mode_lib->vba.ForcedOutputLinkBPP[k], false, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); } else if (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_edp) { if (mode_lib->vba.Output[k] == dm_edp) { mode_lib->vba.EffectiveFECOverhead = 0.0; } else { mode_lib->vba.EffectiveFECOverhead = mode_lib->vba.FECOverhead; } if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) { mode_lib->vba.Outbpp = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], false, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); mode_lib->vba.OutbppDSC = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], true, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); if (mode_lib->vba.DSCEnabled[k] == true) { locals->RequiresDSC[i][k] = true; if (mode_lib->vba.Output[k] == dm_dp) { locals->RequiresFEC[i][k] = true; } else { locals->RequiresFEC[i][k] = false; } mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; } else { locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = false; } locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp; } if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) { mode_lib->vba.Outbpp = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], false, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); mode_lib->vba.OutbppDSC = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], true, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); if (mode_lib->vba.DSCEnabled[k] == true) { locals->RequiresDSC[i][k] = true; if (mode_lib->vba.Output[k] == dm_dp) { locals->RequiresFEC[i][k] = true; } else { locals->RequiresFEC[i][k] = false; } mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; } else { locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = false; } locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp; } if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 810.0) { mode_lib->vba.Outbpp = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], false, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); mode_lib->vba.OutbppDSC = TruncToValidBPP( (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0 * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, mode_lib->vba.ForcedOutputLinkBPP[k], true, mode_lib->vba.Output[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.DSCInputBitPerComponent[k]); if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) { locals->RequiresDSC[i][k] = true; if (mode_lib->vba.Output[k] == dm_dp) { locals->RequiresFEC[i][k] = true; } else { locals->RequiresFEC[i][k] = false; } mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; } else { locals->RequiresDSC[i][k] = false; locals->RequiresFEC[i][k] = false; } locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp; } } } else { locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE; } } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->DIOSupport[i] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (!mode_lib->vba.skip_dio_check[k] && (locals->OutputBppPerState[i][k] == BPP_INVALID || (mode_lib->vba.OutputFormat[k] == dm_420 && mode_lib->vba.Interlace[k] == true && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true))) { locals->DIOSupport[i] = false; } } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->DSCCLKRequiredMoreThanSupported[i] = false; if (mode_lib->vba.BlendingAndTiming[k] == k) { if ((mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_edp)) { if (mode_lib->vba.OutputFormat[k] == dm_420 || mode_lib->vba.OutputFormat[k] == dm_n422) { mode_lib->vba.DSCFormatFactor = 2; } else { mode_lib->vba.DSCFormatFactor = 1; } if (locals->RequiresDSC[i][k] == true) { if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = true; } } else { if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { locals->DSCCLKRequiredMoreThanSupported[i] = true; } } } } } } } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { locals->NotEnoughDSCUnits[i] = false; mode_lib->vba.TotalDSCUnitsRequired = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->RequiresDSC[i][k] == true) { if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 2.0; } else { mode_lib->vba.TotalDSCUnitsRequired = mode_lib->vba.TotalDSCUnitsRequired + 1.0; } } } if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) { locals->NotEnoughDSCUnits[i] = true; } } /*DSC Delay per state*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] != k) { mode_lib->vba.slices = 0; } else if (locals->RequiresDSC[i][k] == 0 || locals->RequiresDSC[i][k] == false) { mode_lib->vba.slices = 0; } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) { mode_lib->vba.slices = dml_ceil( mode_lib->vba.PixelClockBackEnd[k] / 400.0, 4.0); } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) { mode_lib->vba.slices = 8.0; } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) { mode_lib->vba.slices = 4.0; } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) { mode_lib->vba.slices = 2.0; } else { mode_lib->vba.slices = 1.0; } if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE || locals->OutputBppPerState[i][k] == BPP_INVALID) { mode_lib->vba.bpp = 0.0; } else { mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; } if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { locals->DSCDelayPerState[i][k] = dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], mode_lib->vba.bpp, dml_ceil( mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0), mode_lib->vba.slices, mode_lib->vba.OutputFormat[k]) + dscComputeDelay( mode_lib->vba.OutputFormat[k]); } else { locals->DSCDelayPerState[i][k] = 2.0 * (dscceComputeDelay( mode_lib->vba.DSCInputBitPerComponent[k], mode_lib->vba.bpp, dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0), mode_lib->vba.slices / 2, mode_lib->vba.OutputFormat[k]) + dscComputeDelay(mode_lib->vba.OutputFormat[k])); } locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k]; } else { locals->DSCDelayPerState[i][k] = 0.0; } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) { if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true) locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m]; } } } } //Prefetch Check for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); else locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; locals->SwathWidthGranularityY = 256 / dml_ceil(locals->BytePerPixelInDETY[k], 1) / locals->MaxSwathHeightY[k]; locals->RoundedUpMaxSwathSizeBytesY = (dml_ceil(locals->SwathWidthYPerState[i][j][k] - 1, locals->SwathWidthGranularityY) + locals->SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k]; if (locals->SourcePixelFormat[k] == dm_420_10) { locals->RoundedUpMaxSwathSizeBytesY = dml_ceil(locals->RoundedUpMaxSwathSizeBytesY, 256) + 256; } if (locals->MaxSwathHeightC[k] > 0) { locals->SwathWidthGranularityC = 256 / dml_ceil(locals->BytePerPixelInDETC[k], 2) / locals->MaxSwathHeightC[k]; locals->RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYPerState[i][j][k] / 2 - 1, locals->SwathWidthGranularityC) + locals->SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k]; } if (locals->SourcePixelFormat[k] == dm_420_10) { locals->RoundedUpMaxSwathSizeBytesC = dml_ceil(locals->RoundedUpMaxSwathSizeBytesC, 256) + 256; } else { locals->RoundedUpMaxSwathSizeBytesC = 0; } if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024.0 / 2) { locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k]; locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k]; } else { locals->SwathHeightYPerState[i][j][k] = locals->MinSwathHeightY[k]; locals->SwathHeightCPerState[i][j][k] = locals->MinSwathHeightC[k]; } if (locals->BytePerPixelInDETC[k] == 0) { locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; locals->LinesInDETChroma = 0; } else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) { locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } else { locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines, dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k] / dml_max(locals->HRatio[k], 1)), 1)) - (locals->vtaps[k] - 1); locals->EffectiveLBLatencyHidingSourceLinesChroma = dml_min(locals->MaxLineBufferLines, dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k] / 2 / dml_max(locals->HRatio[k] / 2, 1)), 1)) - (locals->VTAPsChroma[k] - 1); locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesLuma), locals->SwathHeightYPerState[i][j][k]); if (locals->BytePerPixelInDETC[k] == 0) { locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]); } else { locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0], locals->EffectiveLBLatencyHidingSourceLinesChroma), locals->SwathHeightCPerState[i][j][k]); locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]), locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k])); } } } } for (i = 0; i <= locals->soc.num_states; i++) { for (j = 0; j < 2; j++) { locals->UrgentLatencySupport[i][j] = true; for (k = 0; k < locals->NumberOfActivePlanes; k++) { if (locals->UrgentLatencySupportUsPerState[i][j][k] < locals->UrgentLatency) locals->UrgentLatencySupport[i][j] = false; } } } /*Prefetch Check*/ for (i = 0; i <= locals->soc.num_states; i++) { for (j = 0; j < 2; j++) { locals->TotalNumberOfDCCActiveDPP[i][j] = 0; for (k = 0; k < locals->NumberOfActivePlanes; k++) { if (locals->DCCEnable[k] == true) { locals->TotalNumberOfDCCActiveDPP[i][j] = locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; } } } } CalculateMinAndMaxPrefetchMode(locals->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &locals->MinPrefetchMode, &locals->MaxPrefetchMode); locals->MaxTotalVActiveRDBandwidth = 0; for (k = 0; k < locals->NumberOfActivePlanes; k++) { locals->MaxTotalVActiveRDBandwidth = locals->MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k]; } for (i = 0; i <= locals->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < locals->NumberOfActivePlanes; k++) { locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k]; locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k]; locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.PixelClock[k] / 16.0); if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { if (mode_lib->vba.VRatio[k] <= 1.0) { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], 1.0) / 64.0 * mode_lib->vba.HRatio[k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], 1.0) / 64.0 * mode_lib->vba.PSCL_FACTOR[k] * mode_lib->vba.RequiredDPPCLK[i][j][k]); } } else { if (mode_lib->vba.VRatio[k] <= 1.0) { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], 1.0) / 32.0 * mode_lib->vba.HRatio[k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETY[k], 1.0) / 32.0 * mode_lib->vba.PSCL_FACTOR[k] * mode_lib->vba.RequiredDPPCLK[i][j][k]); } if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], 2.0) / 32.0 * mode_lib->vba.HRatio[k] / 2.0 * mode_lib->vba.PixelClock[k] / mode_lib->vba.NoOfDPP[i][j][k]); } else { mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0] = dml_max( mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], 1.1 * dml_ceil( mode_lib->vba.BytePerPixelInDETC[k], 2.0) / 32.0 * mode_lib->vba.PSCL_FACTOR_CHROMA[k] * mode_lib->vba.RequiredDPPCLK[i][j][k]); } } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes( mode_lib, mode_lib->vba.DCCEnable[k], mode_lib->vba.Read256BlockHeightY[k], mode_lib->vba.Read256BlockWidthY[k], mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0), mode_lib->vba.SourceScan[k], mode_lib->vba.ViewportWidth[k], mode_lib->vba.ViewportHeight[k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.GPUVMEnable, mode_lib->vba.VMMPageSize, mode_lib->vba.PTEBufferSizeInRequestsLuma, mode_lib->vba.PDEProcessingBufIn64KBReqs, mode_lib->vba.PitchY[k], mode_lib->vba.DCCMetaPitchY[k], &mode_lib->vba.MacroTileWidthY[k], &mode_lib->vba.MetaRowBytesY, &mode_lib->vba.DPTEBytesPerRowY, &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], &mode_lib->vba.dpte_row_height[k], &mode_lib->vba.meta_row_height[k]); mode_lib->vba.PrefetchLinesY[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k], mode_lib->vba.vtaps[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.SwathHeightYPerState[i][j][k], mode_lib->vba.ViewportYStartY[k], &mode_lib->vba.PrefillY[k], &mode_lib->vba.MaxNumSwY[k]); if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) { mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes( mode_lib, mode_lib->vba.DCCEnable[k], mode_lib->vba.Read256BlockHeightY[k], mode_lib->vba.Read256BlockWidthY[k], mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.SurfaceTiling[k], dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0), mode_lib->vba.SourceScan[k], mode_lib->vba.ViewportWidth[k] / 2.0, mode_lib->vba.ViewportHeight[k] / 2.0, mode_lib->vba.SwathWidthYPerState[i][j][k] / 2.0, mode_lib->vba.GPUVMEnable, mode_lib->vba.VMMPageSize, mode_lib->vba.PTEBufferSizeInRequestsLuma, mode_lib->vba.PDEProcessingBufIn64KBReqs, mode_lib->vba.PitchC[k], 0.0, &mode_lib->vba.MacroTileWidthC[k], &mode_lib->vba.MetaRowBytesC, &mode_lib->vba.DPTEBytesPerRowC, &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], &mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_height_chroma[k]); mode_lib->vba.PrefetchLinesC[0][0][k] = CalculatePrefetchSourceLines( mode_lib, mode_lib->vba.VRatio[k] / 2.0, mode_lib->vba.VTAPsChroma[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.SwathHeightCPerState[i][j][k], mode_lib->vba.ViewportYStartC[k], &mode_lib->vba.PrefillC[k], &mode_lib->vba.MaxNumSwC[k]); } else { mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; mode_lib->vba.MetaRowBytesC = 0.0; mode_lib->vba.DPTEBytesPerRowC = 0.0; locals->PrefetchLinesC[0][0][k] = 0.0; locals->PTEBufferSizeNotExceededC[i][j][k] = true; locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; } locals->PDEAndMetaPTEBytesPerFrame[0][0][k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; locals->MetaRowBytes[0][0][k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; locals->DPTEBytesPerRow[0][0][k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; CalculateActiveRowBandwidth( mode_lib->vba.GPUVMEnable, mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.VRatio[k], mode_lib->vba.DCCEnable[k], mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.MetaRowBytesY, mode_lib->vba.MetaRowBytesC, mode_lib->vba.meta_row_height[k], mode_lib->vba.meta_row_height_chroma[k], mode_lib->vba.DPTEBytesPerRowY, mode_lib->vba.DPTEBytesPerRowC, mode_lib->vba.dpte_row_height[k], mode_lib->vba.dpte_row_height_chroma[k], &mode_lib->vba.meta_row_bw[k], &mode_lib->vba.dpte_row_bw[k], &mode_lib->vba.qual_row_bw[k]); } mode_lib->vba.ExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] + (mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PixelChunkSizeInKByte + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] * mode_lib->vba.MetaChunkSize) * 1024.0 / mode_lib->vba.ReturnBWPerState[i][0]; if (mode_lib->vba.GPUVMEnable == true) { mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + mode_lib->vba.TotalNumberOfActiveDPP[i][j] * mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBWPerState[i][0]; } mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { if (mode_lib->vba.WritebackEnable[k] == true) { locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency + CalculateWriteBackDelay( mode_lib->vba.WritebackPixelFormat[k], mode_lib->vba.WritebackHRatio[k], mode_lib->vba.WritebackVRatio[k], mode_lib->vba.WritebackLumaHTaps[k], mode_lib->vba.WritebackLumaVTaps[k], mode_lib->vba.WritebackChromaHTaps[k], mode_lib->vba.WritebackChromaVTaps[k], mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j]; } else { locals->WritebackDelay[i][k] = 0.0; } for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { if (mode_lib->vba.BlendingAndTiming[m] == k && mode_lib->vba.WritebackEnable[m] == true) { locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k], mode_lib->vba.WritebackLatency + CalculateWriteBackDelay( mode_lib->vba.WritebackPixelFormat[m], mode_lib->vba.WritebackHRatio[m], mode_lib->vba.WritebackVRatio[m], mode_lib->vba.WritebackLumaHTaps[m], mode_lib->vba.WritebackLumaVTaps[m], mode_lib->vba.WritebackChromaHTaps[m], mode_lib->vba.WritebackChromaVTaps[m], mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]); } } } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { if (mode_lib->vba.BlendingAndTiming[k] == m) { locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m]; } } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { for (m = 0; m < locals->NumberOfCursors[k]; m++) locals->cursor_bw[k] = locals->NumberOfCursors[k] * locals->CursorWidth[k][m] * locals->CursorBPP[k][m] / 8 / (locals->HTotal[k] / locals->PixelClock[k]) * locals->VRatio[k]; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->MaximumVStartup[0][0][k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); } mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode; do { mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode; mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1; mode_lib->vba.TWait = CalculateTWait( mode_lib->vba.PrefetchMode[i][j], mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.UrgentLatency, mode_lib->vba.SREnterPlusExitTime); for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.XFCEnabled[k] == true) { mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay( mode_lib, mode_lib->vba.VRatio[k], locals->SwathWidthYPerState[i][j][k], dml_ceil(locals->BytePerPixelInDETY[k], 1.0), mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.XFCTSlvVupdateOffset, mode_lib->vba.XFCTSlvVupdateWidth, mode_lib->vba.XFCTSlvVreadyOffset, mode_lib->vba.XFCXBUFLatencyTolerance, mode_lib->vba.XFCFillBWOverhead, mode_lib->vba.XFCSlvChunkSize, mode_lib->vba.XFCBusTransportTime, mode_lib->vba.TimeCalc, mode_lib->vba.TWait, &mode_lib->vba.SrcActiveDrainRate, &mode_lib->vba.TInitXFill, &mode_lib->vba.TslvChk); } else { mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; } CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i][0], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, mode_lib->vba.SwathWidthYPerState[i][j][k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k], mode_lib->vba.SwathWidthYSingleDPP[k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.SwathHeightYThisState[k], mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]); mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule( mode_lib, mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.NumberOfCursors[k], mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.MaxInterDCNTileRepeaters, mode_lib->vba.MaximumVStartup[0][0][k], mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.DynamicMetadataEnable[k], mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], mode_lib->vba.DynamicMetadataTransmittedBytes[k], mode_lib->vba.DCCEnable[k], mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.ExtraLatency, mode_lib->vba.TimeCalc, mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], mode_lib->vba.MetaRowBytes[0][0][k], mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.PrefetchLinesY[0][0][k], mode_lib->vba.SwathWidthYPerState[i][j][k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.PrefillY[k], mode_lib->vba.MaxNumSwY[k], mode_lib->vba.PrefetchLinesC[0][0][k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.PrefillC[k], mode_lib->vba.MaxNumSwC[k], mode_lib->vba.SwathHeightYPerState[i][j][k], mode_lib->vba.SwathHeightCPerState[i][j][k], mode_lib->vba.TWait, mode_lib->vba.XFCEnabled[k], mode_lib->vba.XFCRemoteSurfaceFlipDelay, mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, mode_lib->vba.DSTXAfterScaler[k], mode_lib->vba.DSTYAfterScaler[k], &mode_lib->vba.LineTimesForPrefetch[k], &mode_lib->vba.PrefetchBW[k], &mode_lib->vba.LinesForMetaPTE[k], &mode_lib->vba.LinesForMetaAndDPTERow[k], &mode_lib->vba.VRatioPreY[i][j][k], &mode_lib->vba.VRatioPreC[i][j][k], &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k], &mode_lib->vba.Tno_bw[k], &mode_lib->vba.VUpdateOffsetPix[k], &mode_lib->vba.VUpdateWidthPix[k], &mode_lib->vba.VReadyOffsetPix[k]); } mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; locals->prefetch_vm_bw_valid = true; locals->prefetch_row_bw_valid = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->PDEAndMetaPTEBytesPerFrame[0][0][k] == 0) locals->prefetch_vm_bw[k] = 0; else if (locals->LinesForMetaPTE[k] > 0) locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[0][0][k] / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_vm_bw[k] = 0; locals->prefetch_vm_bw_valid = false; } if (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k] == 0) locals->prefetch_row_bw[k] = 0; else if (locals->LinesForMetaAndDPTERow[k] > 0) locals->prefetch_row_bw[k] = (locals->MetaRowBytes[0][0][k] + locals->DPTEBytesPerRow[0][0][k]) / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); else { locals->prefetch_row_bw[k] = 0; locals->prefetch_row_bw_valid = false; } mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch + mode_lib->vba.cursor_bw[k] + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]; mode_lib->vba.MaximumReadBandwidthWithPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch + mode_lib->vba.cursor_bw[k] + dml_max3( mode_lib->vba.prefetch_vm_bw[k], mode_lib->vba.prefetch_row_bw[k], dml_max(mode_lib->vba.ReadBandwidth[k], mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); } locals->BandwidthWithoutPrefetchSupported[i][0] = true; if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0]) { locals->BandwidthWithoutPrefetchSupported[i][0] = false; } locals->PrefetchSupported[i][j] = true; if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0]) { locals->PrefetchSupported[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->LineTimesForPrefetch[k] < 2.0 || locals->LinesForMetaPTE[k] >= 8.0 || locals->LinesForMetaAndDPTERow[k] >= 16.0 || mode_lib->vba.IsErrorResult[i][j][k] == true) { locals->PrefetchSupported[i][j] = false; } } locals->VRatioInPrefetchSupported[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->VRatioPreY[i][j][k] > 4.0 || locals->VRatioPreC[i][j][k] > 4.0 || mode_lib->vba.IsErrorResult[i][j][k] == true) { locals->VRatioInPrefetchSupported[i][j] = false; } } } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) && mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode); if (mode_lib->vba.PrefetchSupported[i][j] == true && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBWPerState[i][0]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.BandwidthAvailableForImmediateFlip - mode_lib->vba.cursor_bw[k] - dml_max( mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.qual_row_bw[k], mode_lib->vba.PrefetchBW[k]); } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.ImmediateFlipBytes[k] = 0.0; if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.ImmediateFlipBytes[k] = mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k] + mode_lib->vba.MetaRowBytes[0][0][k] + mode_lib->vba.DPTEBytesPerRow[0][0][k]; } } mode_lib->vba.TotImmediateFlipBytes = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { mode_lib->vba.TotImmediateFlipBytes = mode_lib->vba.TotImmediateFlipBytes + mode_lib->vba.ImmediateFlipBytes[k]; } } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { CalculateFlipSchedule( mode_lib, mode_lib->vba.ExtraLatency, mode_lib->vba.UrgentLatencyPixelDataOnly, mode_lib->vba.GPUVMMaxPageTableLevels, mode_lib->vba.GPUVMEnable, mode_lib->vba.BandwidthAvailableForImmediateFlip, mode_lib->vba.TotImmediateFlipBytes, mode_lib->vba.SourcePixelFormat[k], mode_lib->vba.ImmediateFlipBytes[k], mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], mode_lib->vba.VRatio[k], mode_lib->vba.Tno_bw[k], mode_lib->vba.PDEAndMetaPTEBytesPerFrame[0][0][k], mode_lib->vba.MetaRowBytes[0][0][k], mode_lib->vba.DPTEBytesPerRow[0][0][k], mode_lib->vba.DCCEnable[k], mode_lib->vba.dpte_row_height[k], mode_lib->vba.meta_row_height[k], mode_lib->vba.qual_row_bw[k], &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k], &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k], &mode_lib->vba.final_flip_bw[k], &mode_lib->vba.ImmediateFlipSupportedForPipe[k]); } mode_lib->vba.total_dcn_read_bw_with_flip = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.total_dcn_read_bw_with_flip = mode_lib->vba.total_dcn_read_bw_with_flip + mode_lib->vba.cursor_bw[k] + dml_max3( mode_lib->vba.prefetch_vm_bw[k], mode_lib->vba.prefetch_row_bw[k], mode_lib->vba.final_flip_bw[k] + dml_max( mode_lib->vba.ReadBandwidth[k], mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])); } mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; if (mode_lib->vba.total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBWPerState[i][0]) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } } } else { mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; } } } /*Vertical Active BW support*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0] = dml_min(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][0]) mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = true; else mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] = false; } /*PTE Buffer Size Check*/ for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { for (j = 0; j < 2; j++) { locals->PTEBufferSizeNotExceeded[i][j] = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (locals->PTEBufferSizeNotExceededY[i][j][k] == false || locals->PTEBufferSizeNotExceededC[i][j][k] == false) { locals->PTEBufferSizeNotExceeded[i][j] = false; } } } } /*Cursor Support Check*/ mode_lib->vba.CursorSupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { for (j = 0; j < 2; j++) { if (mode_lib->vba.CursorWidth[k][j] > 0.0) { if (dml_floor( dml_floor( mode_lib->vba.CursorBufferSize - mode_lib->vba.CursorChunkSize, mode_lib->vba.CursorChunkSize) * 1024.0 / (mode_lib->vba.CursorWidth[k][j] * mode_lib->vba.CursorBPP[k][j] / 8.0), 1.0) * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatencyPixelDataOnly || (mode_lib->vba.CursorBPP[k][j] == 64.0 && mode_lib->vba.Cursor64BppSupport == false)) { mode_lib->vba.CursorSupport = false; } } } } /*Valid Pitch Check*/ mode_lib->vba.PitchSupport = true; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { locals->AlignedYPitch[k] = dml_ceil( dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]), locals->MacroTileWidthY[k]); if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) { mode_lib->vba.PitchSupport = false; } if (mode_lib->vba.DCCEnable[k] == true) { locals->AlignedDCCMetaPitch[k] = dml_ceil( dml_max( mode_lib->vba.DCCMetaPitchY[k], mode_lib->vba.ViewportWidth[k]), 64.0 * locals->Read256BlockWidthY[k]); } else { locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k]; } if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) { mode_lib->vba.PitchSupport = false; } if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) { locals->AlignedCPitch[k] = dml_ceil( dml_max( mode_lib->vba.PitchC[k], mode_lib->vba.ViewportWidth[k] / 2.0), locals->MacroTileWidthC[k]); } else { locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k]; } if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) { mode_lib->vba.PitchSupport = false; } } /*Mode Support, Voltage State and SOC Configuration*/ for (i = mode_lib->vba.soc.num_states; i >= 0; i--) { for (j = 0; j < 2; j++) { enum dm_validation_status status = DML_VALIDATION_OK; if (mode_lib->vba.ScaleRatioAndTapsSupport != true) { status = DML_FAIL_SCALE_RATIO_TAP; } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { status = DML_FAIL_SOURCE_PIXEL_FORMAT; } else if (locals->ViewportSizeSupport[i][0] != true) { status = DML_FAIL_VIEWPORT_SIZE; } else if (locals->DIOSupport[i] != true) { status = DML_FAIL_DIO_SUPPORT; } else if (locals->NotEnoughDSCUnits[i] != false) { status = DML_FAIL_NOT_ENOUGH_DSC; } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) { status = DML_FAIL_DSC_CLK_REQUIRED; } else if (locals->UrgentLatencySupport[i][j] != true) { status = DML_FAIL_URGENT_LATENCY; } else if (locals->ROBSupport[i][0] != true) { status = DML_FAIL_REORDERING_BUFFER; } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { status = DML_FAIL_DISPCLK_DPPCLK; } else if (locals->TotalAvailablePipesSupport[i][j] != true) { status = DML_FAIL_TOTAL_AVAILABLE_PIPES; } else if (mode_lib->vba.NumberOfOTGSupport != true) { status = DML_FAIL_NUM_OTG; } else if (mode_lib->vba.WritebackModeSupport != true) { status = DML_FAIL_WRITEBACK_MODE; } else if (mode_lib->vba.WritebackLatencySupport != true) { status = DML_FAIL_WRITEBACK_LATENCY; } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) { status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP; } else if (mode_lib->vba.CursorSupport != true) { status = DML_FAIL_CURSOR_SUPPORT; } else if (mode_lib->vba.PitchSupport != true) { status = DML_FAIL_PITCH_SUPPORT; } else if (locals->PrefetchSupported[i][j] != true) { status = DML_FAIL_PREFETCH_SUPPORT; } else if (locals->TotalVerticalActiveBandwidthSupport[i][0] != true) { status = DML_FAIL_TOTAL_V_ACTIVE_BW; } else if (locals->VRatioInPrefetchSupported[i][j] != true) { status = DML_FAIL_V_RATIO_PREFETCH; } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) { status = DML_FAIL_PTE_BUFFER_SIZE; } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) { status = DML_FAIL_DSC_INPUT_BPC; } if (status == DML_VALIDATION_OK) { locals->ModeSupport[i][j] = true; } else { locals->ModeSupport[i][j] = false; } locals->ValidationStatus[i] = status; } } { unsigned int MaximumMPCCombine = 0; mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1; for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) { if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) { mode_lib->vba.VoltageLevel = i; if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false || mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible)) { MaximumMPCCombine = 1; } else { MaximumMPCCombine = 0; } break; } } mode_lib->vba.ImmediateFlipSupport = locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k]; locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k]; } mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine]; mode_lib->vba.maxMpcComb = MaximumMPCCombine; } mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel][0]; mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { mode_lib->vba.ODMCombineEnabled[k] = locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; } else { mode_lib->vba.ODMCombineEnabled[k] = 0; } mode_lib->vba.DSCEnabled[k] = locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; mode_lib->vba.OutputBpp[k] = locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k]; } }
// SPDX-License-Identifier: GPL-2.0 /* * The MT7629 driver based on Linux generic pinctrl binding. * * Copyright (C) 2018 MediaTek Inc. * Author: Ryder Lee <[email protected]> */ #include "pinctrl-moore.h" #define MT7629_PIN(_number, _name, _eint_n) \ MTK_PIN(_number, _name, 0, _eint_n, DRV_GRP1) static const struct mtk_pin_field_calc mt7629_pin_mode_range[] = { PIN_FIELD(0, 78, 0x300, 0x10, 0, 4), }; static const struct mtk_pin_field_calc mt7629_pin_dir_range[] = { PIN_FIELD(0, 78, 0x0, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_di_range[] = { PIN_FIELD(0, 78, 0x200, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_do_range[] = { PIN_FIELD(0, 78, 0x100, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_ies_range[] = { PIN_FIELD(0, 10, 0x1000, 0x10, 0, 1), PIN_FIELD(11, 18, 0x2000, 0x10, 0, 1), PIN_FIELD(19, 32, 0x3000, 0x10, 0, 1), PIN_FIELD(33, 48, 0x4000, 0x10, 0, 1), PIN_FIELD(49, 50, 0x5000, 0x10, 0, 1), PIN_FIELD(51, 69, 0x6000, 0x10, 0, 1), PIN_FIELD(70, 78, 0x7000, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_smt_range[] = { PIN_FIELD(0, 10, 0x1100, 0x10, 0, 1), PIN_FIELD(11, 18, 0x2100, 0x10, 0, 1), PIN_FIELD(19, 32, 0x3100, 0x10, 0, 1), PIN_FIELD(33, 48, 0x4100, 0x10, 0, 1), PIN_FIELD(49, 50, 0x5100, 0x10, 0, 1), PIN_FIELD(51, 69, 0x6100, 0x10, 0, 1), PIN_FIELD(70, 78, 0x7100, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_pullen_range[] = { PIN_FIELD(0, 10, 0x1400, 0x10, 0, 1), PIN_FIELD(11, 18, 0x2400, 0x10, 0, 1), PIN_FIELD(19, 32, 0x3400, 0x10, 0, 1), PIN_FIELD(33, 48, 0x4400, 0x10, 0, 1), PIN_FIELD(49, 50, 0x5400, 0x10, 0, 1), PIN_FIELD(51, 69, 0x6400, 0x10, 0, 1), PIN_FIELD(70, 78, 0x7400, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_pullsel_range[] = { PIN_FIELD(0, 10, 0x1500, 0x10, 0, 1), PIN_FIELD(11, 18, 0x2500, 0x10, 0, 1), PIN_FIELD(19, 32, 0x3500, 0x10, 0, 1), PIN_FIELD(33, 48, 0x4500, 0x10, 0, 1), PIN_FIELD(49, 50, 0x5500, 0x10, 0, 1), PIN_FIELD(51, 69, 0x6500, 0x10, 0, 1), PIN_FIELD(70, 78, 0x7500, 0x10, 0, 1), }; static const struct mtk_pin_field_calc mt7629_pin_drv_range[] = { PIN_FIELD(0, 10, 0x1600, 0x10, 0, 4), PIN_FIELD(11, 18, 0x2600, 0x10, 0, 4), PIN_FIELD(19, 32, 0x3600, 0x10, 0, 4), PIN_FIELD(33, 48, 0x4600, 0x10, 0, 4), PIN_FIELD(49, 50, 0x5600, 0x10, 0, 4), PIN_FIELD(51, 69, 0x6600, 0x10, 0, 4), PIN_FIELD(70, 78, 0x7600, 0x10, 0, 4), }; static const struct mtk_pin_field_calc mt7629_pin_tdsel_range[] = { PIN_FIELD(0, 10, 0x1200, 0x10, 0, 4), PIN_FIELD(11, 18, 0x2200, 0x10, 0, 4), PIN_FIELD(19, 32, 0x3200, 0x10, 0, 4), PIN_FIELD(33, 48, 0x4200, 0x10, 0, 4), PIN_FIELD(49, 50, 0x5200, 0x10, 0, 4), PIN_FIELD(51, 69, 0x6200, 0x10, 0, 4), PIN_FIELD(70, 78, 0x7200, 0x10, 0, 4), }; static const struct mtk_pin_field_calc mt7629_pin_rdsel_range[] = { PIN_FIELD(0, 10, 0x1300, 0x10, 0, 4), PIN_FIELD(11, 18, 0x2300, 0x10, 0, 4), PIN_FIELD(19, 32, 0x3300, 0x10, 0, 4), PIN_FIELD(33, 48, 0x4300, 0x10, 0, 4), PIN_FIELD(49, 50, 0x5300, 0x10, 0, 4), PIN_FIELD(51, 69, 0x6300, 0x10, 0, 4), PIN_FIELD(70, 78, 0x7300, 0x10, 0, 4), }; static const struct mtk_pin_reg_calc mt7629_reg_cals[] = { [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7629_pin_mode_range), [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7629_pin_dir_range), [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7629_pin_di_range), [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7629_pin_do_range), [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7629_pin_ies_range), [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7629_pin_smt_range), [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt7629_pin_pullsel_range), [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt7629_pin_pullen_range), [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7629_pin_drv_range), [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt7629_pin_tdsel_range), [PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt7629_pin_rdsel_range), }; static const struct mtk_pin_desc mt7629_pins[] = { MT7629_PIN(0, "TOP_5G_CLK", 53), MT7629_PIN(1, "TOP_5G_DATA", 54), MT7629_PIN(2, "WF0_5G_HB0", 55), MT7629_PIN(3, "WF0_5G_HB1", 56), MT7629_PIN(4, "WF0_5G_HB2", 57), MT7629_PIN(5, "WF0_5G_HB3", 58), MT7629_PIN(6, "WF0_5G_HB4", 59), MT7629_PIN(7, "WF0_5G_HB5", 60), MT7629_PIN(8, "WF0_5G_HB6", 61), MT7629_PIN(9, "XO_REQ", 9), MT7629_PIN(10, "TOP_RST_N", 10), MT7629_PIN(11, "SYS_WATCHDOG", 11), MT7629_PIN(12, "EPHY_LED0_N_JTDO", 12), MT7629_PIN(13, "EPHY_LED1_N_JTDI", 13), MT7629_PIN(14, "EPHY_LED2_N_JTMS", 14), MT7629_PIN(15, "EPHY_LED3_N_JTCLK", 15), MT7629_PIN(16, "EPHY_LED4_N_JTRST_N", 16), MT7629_PIN(17, "WF2G_LED_N", 17), MT7629_PIN(18, "WF5G_LED_N", 18), MT7629_PIN(19, "I2C_SDA", 19), MT7629_PIN(20, "I2C_SCL", 20), MT7629_PIN(21, "GPIO_9", 21), MT7629_PIN(22, "GPIO_10", 22), MT7629_PIN(23, "GPIO_11", 23), MT7629_PIN(24, "GPIO_12", 24), MT7629_PIN(25, "UART1_TXD", 25), MT7629_PIN(26, "UART1_RXD", 26), MT7629_PIN(27, "UART1_CTS", 27), MT7629_PIN(28, "UART1_RTS", 28), MT7629_PIN(29, "UART2_TXD", 29), MT7629_PIN(30, "UART2_RXD", 30), MT7629_PIN(31, "UART2_CTS", 31), MT7629_PIN(32, "UART2_RTS", 32), MT7629_PIN(33, "MDI_TP_P1", 33), MT7629_PIN(34, "MDI_TN_P1", 34), MT7629_PIN(35, "MDI_RP_P1", 35), MT7629_PIN(36, "MDI_RN_P1", 36), MT7629_PIN(37, "MDI_RP_P2", 37), MT7629_PIN(38, "MDI_RN_P2", 38), MT7629_PIN(39, "MDI_TP_P2", 39), MT7629_PIN(40, "MDI_TN_P2", 40), MT7629_PIN(41, "MDI_TP_P3", 41), MT7629_PIN(42, "MDI_TN_P3", 42), MT7629_PIN(43, "MDI_RP_P3", 43), MT7629_PIN(44, "MDI_RN_P3", 44), MT7629_PIN(45, "MDI_RP_P4", 45), MT7629_PIN(46, "MDI_RN_P4", 46), MT7629_PIN(47, "MDI_TP_P4", 47), MT7629_PIN(48, "MDI_TN_P4", 48), MT7629_PIN(49, "SMI_MDC", 49), MT7629_PIN(50, "SMI_MDIO", 50), MT7629_PIN(51, "PCIE_PERESET_N", 51), MT7629_PIN(52, "PWM_0", 52), MT7629_PIN(53, "GPIO_0", 0), MT7629_PIN(54, "GPIO_1", 1), MT7629_PIN(55, "GPIO_2", 2), MT7629_PIN(56, "GPIO_3", 3), MT7629_PIN(57, "GPIO_4", 4), MT7629_PIN(58, "GPIO_5", 5), MT7629_PIN(59, "GPIO_6", 6), MT7629_PIN(60, "GPIO_7", 7), MT7629_PIN(61, "GPIO_8", 8), MT7629_PIN(62, "SPI_CLK", 62), MT7629_PIN(63, "SPI_CS", 63), MT7629_PIN(64, "SPI_MOSI", 64), MT7629_PIN(65, "SPI_MISO", 65), MT7629_PIN(66, "SPI_WP", 66), MT7629_PIN(67, "SPI_HOLD", 67), MT7629_PIN(68, "UART0_TXD", 68), MT7629_PIN(69, "UART0_RXD", 69), MT7629_PIN(70, "TOP_2G_CLK", 70), MT7629_PIN(71, "TOP_2G_DATA", 71), MT7629_PIN(72, "WF0_2G_HB0", 72), MT7629_PIN(73, "WF0_2G_HB1", 73), MT7629_PIN(74, "WF0_2G_HB2", 74), MT7629_PIN(75, "WF0_2G_HB3", 75), MT7629_PIN(76, "WF0_2G_HB4", 76), MT7629_PIN(77, "WF0_2G_HB5", 77), MT7629_PIN(78, "WF0_2G_HB6", 78), }; /* List all groups consisting of these pins dedicated to the enablement of * certain hardware block and the corresponding mode for all of the pins. * The hardware probably has multiple combinations of these pinouts. */ /* LED for EPHY */ static int mt7629_ephy_leds_pins[] = { 12, 13, 14, 15, 16, 17, 18, }; static int mt7629_ephy_leds_funcs[] = { 1, 1, 1, 1, 1, 1, 1, }; static int mt7629_ephy_led0_pins[] = { 12, }; static int mt7629_ephy_led0_funcs[] = { 1, }; static int mt7629_ephy_led1_pins[] = { 13, }; static int mt7629_ephy_led1_funcs[] = { 1, }; static int mt7629_ephy_led2_pins[] = { 14, }; static int mt7629_ephy_led2_funcs[] = { 1, }; static int mt7629_ephy_led3_pins[] = { 15, }; static int mt7629_ephy_led3_funcs[] = { 1, }; static int mt7629_ephy_led4_pins[] = { 16, }; static int mt7629_ephy_led4_funcs[] = { 1, }; static int mt7629_wf2g_led_pins[] = { 17, }; static int mt7629_wf2g_led_funcs[] = { 1, }; static int mt7629_wf5g_led_pins[] = { 18, }; static int mt7629_wf5g_led_funcs[] = { 1, }; /* Watchdog */ static int mt7629_watchdog_pins[] = { 11, }; static int mt7629_watchdog_funcs[] = { 1, }; /* LED for GPHY */ static int mt7629_gphy_leds_0_pins[] = { 21, 22, 23, }; static int mt7629_gphy_leds_0_funcs[] = { 2, 2, 2, }; static int mt7629_gphy_led1_0_pins[] = { 21, }; static int mt7629_gphy_led1_0_funcs[] = { 2, }; static int mt7629_gphy_led2_0_pins[] = { 22, }; static int mt7629_gphy_led2_0_funcs[] = { 2, }; static int mt7629_gphy_led3_0_pins[] = { 23, }; static int mt7629_gphy_led3_0_funcs[] = { 2, }; static int mt7629_gphy_leds_1_pins[] = { 57, 58, 59, }; static int mt7629_gphy_leds_1_funcs[] = { 1, 1, 1, }; static int mt7629_gphy_led1_1_pins[] = { 57, }; static int mt7629_gphy_led1_1_funcs[] = { 1, }; static int mt7629_gphy_led2_1_pins[] = { 58, }; static int mt7629_gphy_led2_1_funcs[] = { 1, }; static int mt7629_gphy_led3_1_pins[] = { 59, }; static int mt7629_gphy_led3_1_funcs[] = { 1, }; /* I2C */ static int mt7629_i2c_0_pins[] = { 19, 20, }; static int mt7629_i2c_0_funcs[] = { 1, 1, }; static int mt7629_i2c_1_pins[] = { 53, 54, }; static int mt7629_i2c_1_funcs[] = { 1, 1, }; /* SPI */ static int mt7629_spi_0_pins[] = { 21, 22, 23, 24, }; static int mt7629_spi_0_funcs[] = { 1, 1, 1, 1, }; static int mt7629_spi_1_pins[] = { 62, 63, 64, 65, }; static int mt7629_spi_1_funcs[] = { 1, 1, 1, 1, }; static int mt7629_spi_wp_pins[] = { 66, }; static int mt7629_spi_wp_funcs[] = { 1, }; static int mt7629_spi_hold_pins[] = { 67, }; static int mt7629_spi_hold_funcs[] = { 1, }; /* UART */ static int mt7629_uart1_0_txd_rxd_pins[] = { 25, 26, }; static int mt7629_uart1_0_txd_rxd_funcs[] = { 1, 1, }; static int mt7629_uart1_1_txd_rxd_pins[] = { 53, 54, }; static int mt7629_uart1_1_txd_rxd_funcs[] = { 2, 2, }; static int mt7629_uart2_0_txd_rxd_pins[] = { 29, 30, }; static int mt7629_uart2_0_txd_rxd_funcs[] = { 1, 1, }; static int mt7629_uart2_1_txd_rxd_pins[] = { 57, 58, }; static int mt7629_uart2_1_txd_rxd_funcs[] = { 2, 2, }; static int mt7629_uart1_0_cts_rts_pins[] = { 27, 28, }; static int mt7629_uart1_0_cts_rts_funcs[] = { 1, 1, }; static int mt7629_uart1_1_cts_rts_pins[] = { 55, 56, }; static int mt7629_uart1_1_cts_rts_funcs[] = { 2, 2, }; static int mt7629_uart2_0_cts_rts_pins[] = { 31, 32, }; static int mt7629_uart2_0_cts_rts_funcs[] = { 1, 1, }; static int mt7629_uart2_1_cts_rts_pins[] = { 59, 60, }; static int mt7629_uart2_1_cts_rts_funcs[] = { 2, 2, }; static int mt7629_uart0_txd_rxd_pins[] = { 68, 69, }; static int mt7629_uart0_txd_rxd_funcs[] = { 1, 1, }; /* MDC/MDIO */ static int mt7629_mdc_mdio_pins[] = { 49, 50, }; static int mt7629_mdc_mdio_funcs[] = { 1, 1, }; /* PCIE */ static int mt7629_pcie_pereset_pins[] = { 51, }; static int mt7629_pcie_pereset_funcs[] = { 1, }; static int mt7629_pcie_wake_pins[] = { 55, }; static int mt7629_pcie_wake_funcs[] = { 1, }; static int mt7629_pcie_clkreq_pins[] = { 56, }; static int mt7629_pcie_clkreq_funcs[] = { 1, }; /* PWM */ static int mt7629_pwm_0_pins[] = { 52, }; static int mt7629_pwm_0_funcs[] = { 1, }; static int mt7629_pwm_1_pins[] = { 61, }; static int mt7629_pwm_1_funcs[] = { 2, }; /* WF 2G */ static int mt7629_wf0_2g_pins[] = { 70, 71, 72, 73, 74, 75, 76, 77, 78, }; static int mt7629_wf0_2g_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, }; /* WF 5G */ static int mt7629_wf0_5g_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, }; static int mt7629_wf0_5g_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; /* SNFI */ static int mt7629_snfi_pins[] = { 62, 63, 64, 65, 66, 67 }; static int mt7629_snfi_funcs[] = { 2, 2, 2, 2, 2, 2 }; /* SPI NOR */ static int mt7629_snor_pins[] = { 62, 63, 64, 65, 66, 67 }; static int mt7629_snor_funcs[] = { 1, 1, 1, 1, 1, 1 }; static const struct group_desc mt7629_groups[] = { PINCTRL_PIN_GROUP("ephy_leds", mt7629_ephy_leds), PINCTRL_PIN_GROUP("ephy_led0", mt7629_ephy_led0), PINCTRL_PIN_GROUP("ephy_led1", mt7629_ephy_led1), PINCTRL_PIN_GROUP("ephy_led2", mt7629_ephy_led2), PINCTRL_PIN_GROUP("ephy_led3", mt7629_ephy_led3), PINCTRL_PIN_GROUP("ephy_led4", mt7629_ephy_led4), PINCTRL_PIN_GROUP("wf2g_led", mt7629_wf2g_led), PINCTRL_PIN_GROUP("wf5g_led", mt7629_wf5g_led), PINCTRL_PIN_GROUP("watchdog", mt7629_watchdog), PINCTRL_PIN_GROUP("gphy_leds_0", mt7629_gphy_leds_0), PINCTRL_PIN_GROUP("gphy_led1_0", mt7629_gphy_led1_0), PINCTRL_PIN_GROUP("gphy_led2_0", mt7629_gphy_led2_0), PINCTRL_PIN_GROUP("gphy_led3_0", mt7629_gphy_led3_0), PINCTRL_PIN_GROUP("gphy_leds_1", mt7629_gphy_leds_1), PINCTRL_PIN_GROUP("gphy_led1_1", mt7629_gphy_led1_1), PINCTRL_PIN_GROUP("gphy_led2_1", mt7629_gphy_led2_1), PINCTRL_PIN_GROUP("gphy_led3_1", mt7629_gphy_led3_1), PINCTRL_PIN_GROUP("i2c_0", mt7629_i2c_0), PINCTRL_PIN_GROUP("i2c_1", mt7629_i2c_1), PINCTRL_PIN_GROUP("spi_0", mt7629_spi_0), PINCTRL_PIN_GROUP("spi_1", mt7629_spi_1), PINCTRL_PIN_GROUP("spi_wp", mt7629_spi_wp), PINCTRL_PIN_GROUP("spi_hold", mt7629_spi_hold), PINCTRL_PIN_GROUP("uart1_0_txd_rxd", mt7629_uart1_0_txd_rxd), PINCTRL_PIN_GROUP("uart1_1_txd_rxd", mt7629_uart1_1_txd_rxd), PINCTRL_PIN_GROUP("uart2_0_txd_rxd", mt7629_uart2_0_txd_rxd), PINCTRL_PIN_GROUP("uart2_1_txd_rxd", mt7629_uart2_1_txd_rxd), PINCTRL_PIN_GROUP("uart1_0_cts_rts", mt7629_uart1_0_cts_rts), PINCTRL_PIN_GROUP("uart1_1_cts_rts", mt7629_uart1_1_cts_rts), PINCTRL_PIN_GROUP("uart2_0_cts_rts", mt7629_uart2_0_cts_rts), PINCTRL_PIN_GROUP("uart2_1_cts_rts", mt7629_uart2_1_cts_rts), PINCTRL_PIN_GROUP("uart0_txd_rxd", mt7629_uart0_txd_rxd), PINCTRL_PIN_GROUP("mdc_mdio", mt7629_mdc_mdio), PINCTRL_PIN_GROUP("pcie_pereset", mt7629_pcie_pereset), PINCTRL_PIN_GROUP("pcie_wake", mt7629_pcie_wake), PINCTRL_PIN_GROUP("pcie_clkreq", mt7629_pcie_clkreq), PINCTRL_PIN_GROUP("pwm_0", mt7629_pwm_0), PINCTRL_PIN_GROUP("pwm_1", mt7629_pwm_1), PINCTRL_PIN_GROUP("wf0_5g", mt7629_wf0_5g), PINCTRL_PIN_GROUP("wf0_2g", mt7629_wf0_2g), PINCTRL_PIN_GROUP("snfi", mt7629_snfi), PINCTRL_PIN_GROUP("spi_nor", mt7629_snor), }; /* Joint those groups owning the same capability in user point of view which * allows that people tend to use through the device tree. */ static const char *mt7629_ethernet_groups[] = { "mdc_mdio", }; static const char *mt7629_i2c_groups[] = { "i2c_0", "i2c_1", }; static const char *mt7629_led_groups[] = { "ephy_leds", "ephy_led0", "ephy_led1", "ephy_led2", "ephy_led3", "ephy_led4", "wf2g_led", "wf5g_led", "gphy_leds_0", "gphy_led1_0", "gphy_led2_0", "gphy_led3_0", "gphy_leds_1", "gphy_led1_1", "gphy_led2_1", "gphy_led3_1",}; static const char *mt7629_pcie_groups[] = { "pcie_pereset", "pcie_wake", "pcie_clkreq", }; static const char *mt7629_pwm_groups[] = { "pwm_0", "pwm_1", }; static const char *mt7629_spi_groups[] = { "spi_0", "spi_1", "spi_wp", "spi_hold", }; static const char *mt7629_uart_groups[] = { "uart1_0_txd_rxd", "uart1_1_txd_rxd", "uart2_0_txd_rxd", "uart2_1_txd_rxd", "uart1_0_cts_rts", "uart1_1_cts_rts", "uart2_0_cts_rts", "uart2_1_cts_rts", "uart0_txd_rxd", }; static const char *mt7629_wdt_groups[] = { "watchdog", }; static const char *mt7629_wifi_groups[] = { "wf0_5g", "wf0_2g", }; static const char *mt7629_flash_groups[] = { "snfi", "spi_nor" }; static const struct function_desc mt7629_functions[] = { PINCTRL_PIN_FUNCTION("eth", mt7629_ethernet), PINCTRL_PIN_FUNCTION("i2c", mt7629_i2c), PINCTRL_PIN_FUNCTION("led", mt7629_led), PINCTRL_PIN_FUNCTION("pcie", mt7629_pcie), PINCTRL_PIN_FUNCTION("pwm", mt7629_pwm), PINCTRL_PIN_FUNCTION("spi", mt7629_spi), PINCTRL_PIN_FUNCTION("uart", mt7629_uart), PINCTRL_PIN_FUNCTION("watchdog", mt7629_wdt), PINCTRL_PIN_FUNCTION("wifi", mt7629_wifi), PINCTRL_PIN_FUNCTION("flash", mt7629_flash), }; static const struct mtk_eint_hw mt7629_eint_hw = { .port_mask = 7, .ports = 7, .ap_num = ARRAY_SIZE(mt7629_pins), .db_cnt = 16, .db_time = debounce_time_mt2701, }; static struct mtk_pin_soc mt7629_data = { .reg_cal = mt7629_reg_cals, .pins = mt7629_pins, .npins = ARRAY_SIZE(mt7629_pins), .grps = mt7629_groups, .ngrps = ARRAY_SIZE(mt7629_groups), .funcs = mt7629_functions, .nfuncs = ARRAY_SIZE(mt7629_functions), .eint_hw = &mt7629_eint_hw, .gpio_m = 0, .ies_present = true, .base_names = mtk_default_register_base_names, .nbase_names = ARRAY_SIZE(mtk_default_register_base_names), .bias_disable_set = mtk_pinconf_bias_disable_set_rev1, .bias_disable_get = mtk_pinconf_bias_disable_get_rev1, .bias_set = mtk_pinconf_bias_set_rev1, .bias_get = mtk_pinconf_bias_get_rev1, .drive_set = mtk_pinconf_drive_set_rev1, .drive_get = mtk_pinconf_drive_get_rev1, }; static const struct of_device_id mt7629_pinctrl_of_match[] = { { .compatible = "mediatek,mt7629-pinctrl", }, {} }; static int mt7629_pinctrl_probe(struct platform_device *pdev) { return mtk_moore_pinctrl_probe(pdev, &mt7629_data); } static struct platform_driver mt7629_pinctrl_driver = { .driver = { .name = "mt7629-pinctrl", .of_match_table = mt7629_pinctrl_of_match, }, .probe = mt7629_pinctrl_probe, }; static int __init mt7629_pinctrl_init(void) { return platform_driver_register(&mt7629_pinctrl_driver); } arch_initcall(mt7629_pinctrl_init);
/* * linux/drivers/video/pxafb.c * * Copyright (C) 1999 Eric A. Thomas. * Copyright (C) 2004 Jean-Frederic Clere. * Copyright (C) 2004 Ian Campbell. * Copyright (C) 2004 Jeff Lackey. * Based on sa1100fb.c Copyright (C) 1999 Eric A. Thomas * which in turn is * Based on acornfb.c Copyright (C) Russell King. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Intel PXA250/210 LCD Controller Frame Buffer Driver * * Please direct your questions and comments on this driver to the following * email address: * * [email protected] * * Add support for overlay1 and overlay2 based on pxafb_overlay.c: * * Copyright (C) 2004, Intel Corporation * * 2003/08/27: <[email protected]> * 2004/03/10: <[email protected]> * 2004/10/28: <[email protected]> * * Copyright (C) 2006-2008 Marvell International Ltd. * All Rights Reserved */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/cpufreq.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/console.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <linux/soc/pxa/cpu.h> #include <video/of_display_timing.h> #include <video/videomode.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/div64.h> #include <linux/platform_data/video-pxafb.h> /* * Complain if VAR is out of range. */ #define DEBUG_VAR 1 #include "pxafb.h" #include "pxa3xx-regs.h" /* Bits which should not be set in machine configuration structures */ #define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\ LCCR0_DIS | LCCR0_EFM | LCCR0_IUM |\ LCCR0_SFM | LCCR0_LDM | LCCR0_ENB) #define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\ LCCR3_PCD | LCCR3_BPP(0xf)) static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *); static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); static void setup_base_frame(struct pxafb_info *fbi, struct fb_var_screeninfo *var, int branch); static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, unsigned long offset, size_t size); static unsigned long video_mem_size = 0; static inline unsigned long lcd_readl(struct pxafb_info *fbi, unsigned int off) { return __raw_readl(fbi->mmio_base + off); } static inline void lcd_writel(struct pxafb_info *fbi, unsigned int off, unsigned long val) { __raw_writel(val, fbi->mmio_base + off); } static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) { unsigned long flags; local_irq_save(flags); /* * We need to handle two requests being made at the same time. * There are two important cases: * 1. When we are changing VT (C_REENABLE) while unblanking * (C_ENABLE) We must perform the unblanking, which will * do our REENABLE for us. * 2. When we are blanking, but immediately unblank before * we have blanked. We do the "REENABLE" thing here as * well, just to be sure. */ if (fbi->task_state == C_ENABLE && state == C_REENABLE) state = (u_int) -1; if (fbi->task_state == C_DISABLE && state == C_ENABLE) state = C_REENABLE; if (state != (u_int)-1) { fbi->task_state = state; schedule_work(&fbi->task); } local_irq_restore(flags); } static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); u_int val; if (regno >= fbi->palette_size) return 1; if (fbi->fb.var.grayscale) { fbi->palette_cpu[regno] = ((blue >> 8) & 0x00ff); return 0; } switch (fbi->lccr4 & LCCR4_PAL_FOR_MASK) { case LCCR4_PAL_FOR_0: val = ((red >> 0) & 0xf800); val |= ((green >> 5) & 0x07e0); val |= ((blue >> 11) & 0x001f); fbi->palette_cpu[regno] = val; break; case LCCR4_PAL_FOR_1: val = ((red << 8) & 0x00f80000); val |= ((green >> 0) & 0x0000fc00); val |= ((blue >> 8) & 0x000000f8); ((u32 *)(fbi->palette_cpu))[regno] = val; break; case LCCR4_PAL_FOR_2: val = ((red << 8) & 0x00fc0000); val |= ((green >> 0) & 0x0000fc00); val |= ((blue >> 8) & 0x000000fc); ((u32 *)(fbi->palette_cpu))[regno] = val; break; case LCCR4_PAL_FOR_3: val = ((red << 8) & 0x00ff0000); val |= ((green >> 0) & 0x0000ff00); val |= ((blue >> 8) & 0x000000ff); ((u32 *)(fbi->palette_cpu))[regno] = val; break; } return 0; } static int pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); unsigned int val; int ret = 1; /* * If inverse mode was selected, invert all the colours * rather than the register number. The register number * is what you poke into the framebuffer to produce the * colour you requested. */ if (fbi->cmap_inverse) { red = 0xffff - red; green = 0xffff - green; blue = 0xffff - blue; } /* * If greyscale is true, then we convert the RGB value * to greyscale no matter what visual we are using. */ if (fbi->fb.var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; switch (fbi->fb.fix.visual) { case FB_VISUAL_TRUECOLOR: /* * 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < 16) { u32 *pal = fbi->fb.pseudo_palette; val = chan_to_field(red, &fbi->fb.var.red); val |= chan_to_field(green, &fbi->fb.var.green); val |= chan_to_field(blue, &fbi->fb.var.blue); pal[regno] = val; ret = 0; } break; case FB_VISUAL_STATIC_PSEUDOCOLOR: case FB_VISUAL_PSEUDOCOLOR: ret = pxafb_setpalettereg(regno, red, green, blue, trans, info); break; } return ret; } /* calculate pixel depth, transparency bit included, >=16bpp formats _only_ */ static inline int var_to_depth(struct fb_var_screeninfo *var) { return var->red.length + var->green.length + var->blue.length + var->transp.length; } /* calculate 4-bit BPP value for LCCR3 and OVLxC1 */ static int pxafb_var_to_bpp(struct fb_var_screeninfo *var) { int bpp = -EINVAL; switch (var->bits_per_pixel) { case 1: bpp = 0; break; case 2: bpp = 1; break; case 4: bpp = 2; break; case 8: bpp = 3; break; case 16: bpp = 4; break; case 24: switch (var_to_depth(var)) { case 18: bpp = 6; break; /* 18-bits/pixel packed */ case 19: bpp = 8; break; /* 19-bits/pixel packed */ case 24: bpp = 9; break; } break; case 32: switch (var_to_depth(var)) { case 18: bpp = 5; break; /* 18-bits/pixel unpacked */ case 19: bpp = 7; break; /* 19-bits/pixel unpacked */ case 25: bpp = 10; break; } break; } return bpp; } /* * pxafb_var_to_lccr3(): * Convert a bits per pixel value to the correct bit pattern for LCCR3 * * NOTE: for PXA27x with overlays support, the LCCR3_PDFOR_x bits have an * implication of the acutal use of transparency bit, which we handle it * here separatedly. See PXA27x Developer's Manual, Section <<7.4.6 Pixel * Formats>> for the valid combination of PDFOR, PAL_FOR for various BPP. * * Transparency for palette pixel formats is not supported at the moment. */ static uint32_t pxafb_var_to_lccr3(struct fb_var_screeninfo *var) { int bpp = pxafb_var_to_bpp(var); uint32_t lccr3; if (bpp < 0) return 0; lccr3 = LCCR3_BPP(bpp); switch (var_to_depth(var)) { case 16: lccr3 |= var->transp.length ? LCCR3_PDFOR_3 : 0; break; case 18: lccr3 |= LCCR3_PDFOR_3; break; case 24: lccr3 |= var->transp.length ? LCCR3_PDFOR_2 : LCCR3_PDFOR_3; break; case 19: case 25: lccr3 |= LCCR3_PDFOR_0; break; } return lccr3; } #define SET_PIXFMT(v, r, g, b, t) \ ({ \ (v)->transp.offset = (t) ? (r) + (g) + (b) : 0; \ (v)->transp.length = (t) ? (t) : 0; \ (v)->blue.length = (b); (v)->blue.offset = 0; \ (v)->green.length = (g); (v)->green.offset = (b); \ (v)->red.length = (r); (v)->red.offset = (b) + (g); \ }) /* set the RGBT bitfields of fb_var_screeninf according to * var->bits_per_pixel and given depth */ static void pxafb_set_pixfmt(struct fb_var_screeninfo *var, int depth) { if (depth == 0) depth = var->bits_per_pixel; if (var->bits_per_pixel < 16) { /* indexed pixel formats */ var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 8; } switch (depth) { case 16: var->transp.length ? SET_PIXFMT(var, 5, 5, 5, 1) : /* RGBT555 */ SET_PIXFMT(var, 5, 6, 5, 0); break; /* RGB565 */ case 18: SET_PIXFMT(var, 6, 6, 6, 0); break; /* RGB666 */ case 19: SET_PIXFMT(var, 6, 6, 6, 1); break; /* RGBT666 */ case 24: var->transp.length ? SET_PIXFMT(var, 8, 8, 7, 1) : /* RGBT887 */ SET_PIXFMT(var, 8, 8, 8, 0); break; /* RGB888 */ case 25: SET_PIXFMT(var, 8, 8, 8, 1); break; /* RGBT888 */ } } #ifdef CONFIG_CPU_FREQ /* * pxafb_display_dma_period() * Calculate the minimum period (in picoseconds) between two DMA * requests for the LCD controller. If we hit this, it means we're * doing nothing but LCD DMA. */ static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var) { /* * Period = pixclock * bits_per_byte * bytes_per_transfer * / memory_bits_per_pixel; */ return var->pixclock * 8 * 16 / var->bits_per_pixel; } #endif /* * Select the smallest mode that allows the desired resolution to be * displayed. If desired parameters can be rounded up. */ static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struct fb_var_screeninfo *var) { struct pxafb_mode_info *mode = NULL; struct pxafb_mode_info *modelist = mach->modes; unsigned int best_x = 0xffffffff, best_y = 0xffffffff; unsigned int i; for (i = 0; i < mach->num_modes; i++) { if (modelist[i].xres >= var->xres && modelist[i].yres >= var->yres && modelist[i].xres < best_x && modelist[i].yres < best_y && modelist[i].bpp >= var->bits_per_pixel) { best_x = modelist[i].xres; best_y = modelist[i].yres; mode = &modelist[i]; } } return mode; } static void pxafb_setmode(struct fb_var_screeninfo *var, struct pxafb_mode_info *mode) { var->xres = mode->xres; var->yres = mode->yres; var->bits_per_pixel = mode->bpp; var->pixclock = mode->pixclock; var->hsync_len = mode->hsync_len; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->vsync_len = mode->vsync_len; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->sync = mode->sync; var->grayscale = mode->cmap_greyscale; var->transp.length = mode->transparency; /* set the initial RGBA bitfields */ pxafb_set_pixfmt(var, mode->depth); } static int pxafb_adjust_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { int line_length; var->xres = max_t(int, var->xres, MIN_XRES); var->yres = max_t(int, var->yres, MIN_YRES); if (!(fbi->lccr0 & LCCR0_LCDT)) { clamp_val(var->hsync_len, 1, 64); clamp_val(var->vsync_len, 1, 64); clamp_val(var->left_margin, 1, 255); clamp_val(var->right_margin, 1, 255); clamp_val(var->upper_margin, 1, 255); clamp_val(var->lower_margin, 1, 255); } /* make sure each line is aligned on word boundary */ line_length = var->xres * var->bits_per_pixel / 8; line_length = ALIGN(line_length, 4); var->xres = line_length * 8 / var->bits_per_pixel; /* we don't support xpan, force xres_virtual to be equal to xres */ var->xres_virtual = var->xres; if (var->accel_flags & FB_ACCELF_TEXT) var->yres_virtual = fbi->fb.fix.smem_len / line_length; else var->yres_virtual = max(var->yres_virtual, var->yres); /* check for limits */ if (var->xres > MAX_XRES || var->yres > MAX_YRES) return -EINVAL; if (var->yres > var->yres_virtual) return -EINVAL; return 0; } /* * pxafb_check_var(): * Get the video params out of 'var'. If a value doesn't fit, round it up, * if it's too big, return -EINVAL. * * Round up in the following order: bits_per_pixel, xres, * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, * bitfields, horizontal timing, vertical timing. */ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); struct pxafb_mach_info *inf = fbi->inf; int err; if (inf->fixed_modes) { struct pxafb_mode_info *mode; mode = pxafb_getmode(inf, var); if (!mode) return -EINVAL; pxafb_setmode(var, mode); } /* do a test conversion to BPP fields to check the color formats */ err = pxafb_var_to_bpp(var); if (err < 0) return err; pxafb_set_pixfmt(var, var_to_depth(var)); err = pxafb_adjust_timing(fbi, var); if (err) return err; #ifdef CONFIG_CPU_FREQ pr_debug("pxafb: dma period = %d ps\n", pxafb_display_dma_period(var)); #endif return 0; } /* * pxafb_set_par(): * Set the user defined part of the display for the specified console */ static int pxafb_set_par(struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); struct fb_var_screeninfo *var = &info->var; if (var->bits_per_pixel >= 16) fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; else if (!fbi->cmap_static) fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; else { /* * Some people have weird ideas about wanting static * pseudocolor maps. I suspect their user space * applications are broken. */ fbi->fb.fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; } fbi->fb.fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; if (var->bits_per_pixel >= 16) fbi->palette_size = 0; else fbi->palette_size = var->bits_per_pixel == 1 ? 4 : 1 << var->bits_per_pixel; fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0]; if (fbi->fb.var.bits_per_pixel >= 16) fb_dealloc_cmap(&fbi->fb.cmap); else fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); pxafb_activate_var(var, fbi); return 0; } static int pxafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); struct fb_var_screeninfo newvar; int dma = DMA_MAX + DMA_BASE; if (fbi->state != C_ENABLE) return 0; /* Only take .xoffset, .yoffset and .vmode & FB_VMODE_YWRAP from what * was passed in and copy the rest from the old screeninfo. */ memcpy(&newvar, &fbi->fb.var, sizeof(newvar)); newvar.xoffset = var->xoffset; newvar.yoffset = var->yoffset; newvar.vmode &= ~FB_VMODE_YWRAP; newvar.vmode |= var->vmode & FB_VMODE_YWRAP; setup_base_frame(fbi, &newvar, 1); if (fbi->lccr0 & LCCR0_SDS) lcd_writel(fbi, FBR1, fbi->fdadr[dma + 1] | 0x1); lcd_writel(fbi, FBR0, fbi->fdadr[dma] | 0x1); return 0; } /* * pxafb_blank(): * Blank the display by setting all palette values to zero. Note, the * 16 bpp mode does not really use the palette, so this will not * blank the display in all modes. */ static int pxafb_blank(int blank, struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); int i; switch (blank) { case FB_BLANK_POWERDOWN: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_NORMAL: if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) for (i = 0; i < fbi->palette_size; i++) pxafb_setpalettereg(i, 0, 0, 0, 0, info); pxafb_schedule_work(fbi, C_DISABLE); /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ break; case FB_BLANK_UNBLANK: /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) fb_set_cmap(&fbi->fb.cmap, info); pxafb_schedule_work(fbi, C_ENABLE); } return 0; } static const struct fb_ops pxafb_ops = { .owner = THIS_MODULE, FB_DEFAULT_IOMEM_OPS, .fb_check_var = pxafb_check_var, .fb_set_par = pxafb_set_par, .fb_pan_display = pxafb_pan_display, .fb_setcolreg = pxafb_setcolreg, .fb_blank = pxafb_blank, }; #ifdef CONFIG_FB_PXA_OVERLAY static void overlay1fb_setup(struct pxafb_layer *ofb) { int size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual; unsigned long start = ofb->video_mem_phys; setup_frame_dma(ofb->fbi, DMA_OV1, PAL_NONE, start, size); } /* Depending on the enable status of overlay1/2, the DMA should be * updated from FDADRx (when disabled) or FBRx (when enabled). */ static void overlay1fb_enable(struct pxafb_layer *ofb) { int enabled = lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN; uint32_t fdadr1 = ofb->fbi->fdadr[DMA_OV1] | (enabled ? 0x1 : 0); lcd_writel(ofb->fbi, enabled ? FBR1 : FDADR1, fdadr1); lcd_writel(ofb->fbi, OVL1C2, ofb->control[1]); lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] | OVLxC1_OEN); } static void overlay1fb_disable(struct pxafb_layer *ofb) { uint32_t lccr5; if (!(lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN)) return; lccr5 = lcd_readl(ofb->fbi, LCCR5); lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN); lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(1)); lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(1)); lcd_writel(ofb->fbi, FBR1, ofb->fbi->fdadr[DMA_OV1] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) pr_warn("%s: timeout disabling overlay1\n", __func__); lcd_writel(ofb->fbi, LCCR5, lccr5); } static void overlay2fb_setup(struct pxafb_layer *ofb) { int size, div = 1, pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd); unsigned long start[3] = { ofb->video_mem_phys, 0, 0 }; if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED) { size = ofb->fb.fix.line_length * ofb->fb.var.yres_virtual; setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); } else { size = ofb->fb.var.xres_virtual * ofb->fb.var.yres_virtual; switch (pfor) { case OVERLAY_FORMAT_YUV444_PLANAR: div = 1; break; case OVERLAY_FORMAT_YUV422_PLANAR: div = 2; break; case OVERLAY_FORMAT_YUV420_PLANAR: div = 4; break; } start[1] = start[0] + size; start[2] = start[1] + size / div; setup_frame_dma(ofb->fbi, DMA_OV2_Y, -1, start[0], size); setup_frame_dma(ofb->fbi, DMA_OV2_Cb, -1, start[1], size / div); setup_frame_dma(ofb->fbi, DMA_OV2_Cr, -1, start[2], size / div); } } static void overlay2fb_enable(struct pxafb_layer *ofb) { int pfor = NONSTD_TO_PFOR(ofb->fb.var.nonstd); int enabled = lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN; uint32_t fdadr2 = ofb->fbi->fdadr[DMA_OV2_Y] | (enabled ? 0x1 : 0); uint32_t fdadr3 = ofb->fbi->fdadr[DMA_OV2_Cb] | (enabled ? 0x1 : 0); uint32_t fdadr4 = ofb->fbi->fdadr[DMA_OV2_Cr] | (enabled ? 0x1 : 0); if (pfor == OVERLAY_FORMAT_RGB || pfor == OVERLAY_FORMAT_YUV444_PACKED) lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2); else { lcd_writel(ofb->fbi, enabled ? FBR2 : FDADR2, fdadr2); lcd_writel(ofb->fbi, enabled ? FBR3 : FDADR3, fdadr3); lcd_writel(ofb->fbi, enabled ? FBR4 : FDADR4, fdadr4); } lcd_writel(ofb->fbi, OVL2C2, ofb->control[1]); lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] | OVLxC1_OEN); } static void overlay2fb_disable(struct pxafb_layer *ofb) { uint32_t lccr5; if (!(lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN)) return; lccr5 = lcd_readl(ofb->fbi, LCCR5); lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN); lcd_writel(ofb->fbi, LCSR1, LCSR1_BS(2)); lcd_writel(ofb->fbi, LCCR5, lccr5 & ~LCSR1_BS(2)); lcd_writel(ofb->fbi, FBR2, ofb->fbi->fdadr[DMA_OV2_Y] | 0x3); lcd_writel(ofb->fbi, FBR3, ofb->fbi->fdadr[DMA_OV2_Cb] | 0x3); lcd_writel(ofb->fbi, FBR4, ofb->fbi->fdadr[DMA_OV2_Cr] | 0x3); if (wait_for_completion_timeout(&ofb->branch_done, 1 * HZ) == 0) pr_warn("%s: timeout disabling overlay2\n", __func__); } static struct pxafb_layer_ops ofb_ops[] = { [0] = { .enable = overlay1fb_enable, .disable = overlay1fb_disable, .setup = overlay1fb_setup, }, [1] = { .enable = overlay2fb_enable, .disable = overlay2fb_disable, .setup = overlay2fb_setup, }, }; static int overlayfb_open(struct fb_info *info, int user) { struct pxafb_layer *ofb = container_of(info, struct pxafb_layer, fb); /* no support for framebuffer console on overlay */ if (user == 0) return -ENODEV; if (ofb->usage++ == 0) { /* unblank the base framebuffer */ console_lock(); fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK); console_unlock(); } return 0; } static int overlayfb_release(struct fb_info *info, int user) { struct pxafb_layer *ofb = container_of(info, struct pxafb_layer, fb); if (ofb->usage == 1) { ofb->ops->disable(ofb); ofb->fb.var.height = -1; ofb->fb.var.width = -1; ofb->fb.var.xres = ofb->fb.var.xres_virtual = 0; ofb->fb.var.yres = ofb->fb.var.yres_virtual = 0; ofb->usage--; } return 0; } static int overlayfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxafb_layer *ofb = container_of(info, struct pxafb_layer, fb); struct fb_var_screeninfo *base_var = &ofb->fbi->fb.var; int xpos, ypos, pfor, bpp; xpos = NONSTD_TO_XPOS(var->nonstd); ypos = NONSTD_TO_YPOS(var->nonstd); pfor = NONSTD_TO_PFOR(var->nonstd); bpp = pxafb_var_to_bpp(var); if (bpp < 0) return -EINVAL; /* no support for YUV format on overlay1 */ if (ofb->id == OVERLAY1 && pfor != 0) return -EINVAL; /* for YUV packed formats, bpp = 'minimum bpp of YUV components' */ switch (pfor) { case OVERLAY_FORMAT_RGB: bpp = pxafb_var_to_bpp(var); if (bpp < 0) return -EINVAL; pxafb_set_pixfmt(var, var_to_depth(var)); break; case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break; case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 8; break; case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 4; break; case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 2; break; default: return -EINVAL; } /* each line must start at a 32-bit word boundary */ if ((xpos * bpp) % 32) return -EINVAL; /* xres must align on 32-bit word boundary */ var->xres = roundup(var->xres * bpp, 32) / bpp; if ((xpos + var->xres > base_var->xres) || (ypos + var->yres > base_var->yres)) return -EINVAL; var->xres_virtual = var->xres; var->yres_virtual = max(var->yres, var->yres_virtual); return 0; } static int overlayfb_check_video_memory(struct pxafb_layer *ofb) { struct fb_var_screeninfo *var = &ofb->fb.var; int pfor = NONSTD_TO_PFOR(var->nonstd); int size, bpp = 0; switch (pfor) { case OVERLAY_FORMAT_RGB: bpp = var->bits_per_pixel; break; case OVERLAY_FORMAT_YUV444_PACKED: bpp = 24; break; case OVERLAY_FORMAT_YUV444_PLANAR: bpp = 24; break; case OVERLAY_FORMAT_YUV422_PLANAR: bpp = 16; break; case OVERLAY_FORMAT_YUV420_PLANAR: bpp = 12; break; } ofb->fb.fix.line_length = var->xres_virtual * bpp / 8; size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual); if (ofb->video_mem) { if (ofb->video_mem_size >= size) return 0; } return -EINVAL; } static int overlayfb_set_par(struct fb_info *info) { struct pxafb_layer *ofb = container_of(info, struct pxafb_layer, fb); struct fb_var_screeninfo *var = &info->var; int xpos, ypos, pfor, bpp, ret; ret = overlayfb_check_video_memory(ofb); if (ret) return ret; bpp = pxafb_var_to_bpp(var); xpos = NONSTD_TO_XPOS(var->nonstd); ypos = NONSTD_TO_YPOS(var->nonstd); pfor = NONSTD_TO_PFOR(var->nonstd); ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) | OVLxC1_BPP(bpp); ofb->control[1] = OVLxC2_XPOS(xpos) | OVLxC2_YPOS(ypos); if (ofb->id == OVERLAY2) ofb->control[1] |= OVL2C2_PFOR(pfor); ofb->ops->setup(ofb); ofb->ops->enable(ofb); return 0; } static const struct fb_ops overlay_fb_ops = { .owner = THIS_MODULE, .fb_open = overlayfb_open, .fb_release = overlayfb_release, .fb_check_var = overlayfb_check_var, .fb_set_par = overlayfb_set_par, }; static void init_pxafb_overlay(struct pxafb_info *fbi, struct pxafb_layer *ofb, int id) { sprintf(ofb->fb.fix.id, "overlay%d", id + 1); ofb->fb.fix.type = FB_TYPE_PACKED_PIXELS; ofb->fb.fix.xpanstep = 0; ofb->fb.fix.ypanstep = 1; ofb->fb.var.activate = FB_ACTIVATE_NOW; ofb->fb.var.height = -1; ofb->fb.var.width = -1; ofb->fb.var.vmode = FB_VMODE_NONINTERLACED; ofb->fb.fbops = &overlay_fb_ops; ofb->fb.node = -1; ofb->fb.pseudo_palette = NULL; ofb->id = id; ofb->ops = &ofb_ops[id]; ofb->usage = 0; ofb->fbi = fbi; init_completion(&ofb->branch_done); } static inline int pxafb_overlay_supported(void) { if (cpu_is_pxa27x() || cpu_is_pxa3xx()) return 1; return 0; } static int pxafb_overlay_map_video_memory(struct pxafb_info *pxafb, struct pxafb_layer *ofb) { /* We assume that user will use at most video_mem_size for overlay fb, * anyway, it's useless to use 16bpp main plane and 24bpp overlay */ ofb->video_mem = alloc_pages_exact(PAGE_ALIGN(pxafb->video_mem_size), GFP_KERNEL | __GFP_ZERO); if (ofb->video_mem == NULL) return -ENOMEM; ofb->video_mem_phys = virt_to_phys(ofb->video_mem); ofb->video_mem_size = PAGE_ALIGN(pxafb->video_mem_size); mutex_lock(&ofb->fb.mm_lock); ofb->fb.fix.smem_start = ofb->video_mem_phys; ofb->fb.fix.smem_len = pxafb->video_mem_size; mutex_unlock(&ofb->fb.mm_lock); ofb->fb.screen_base = ofb->video_mem; return 0; } static void pxafb_overlay_init(struct pxafb_info *fbi) { int i, ret; if (!pxafb_overlay_supported()) return; for (i = 0; i < 2; i++) { struct pxafb_layer *ofb = &fbi->overlay[i]; init_pxafb_overlay(fbi, ofb, i); ret = register_framebuffer(&ofb->fb); if (ret) { dev_err(fbi->dev, "failed to register overlay %d\n", i); continue; } ret = pxafb_overlay_map_video_memory(fbi, ofb); if (ret) { dev_err(fbi->dev, "failed to map video memory for overlay %d\n", i); unregister_framebuffer(&ofb->fb); continue; } ofb->registered = 1; } /* mask all IU/BS/EOF/SOF interrupts */ lcd_writel(fbi, LCCR5, ~0); pr_info("PXA Overlay driver loaded successfully!\n"); } static void pxafb_overlay_exit(struct pxafb_info *fbi) { int i; if (!pxafb_overlay_supported()) return; for (i = 0; i < 2; i++) { struct pxafb_layer *ofb = &fbi->overlay[i]; if (ofb->registered) { if (ofb->video_mem) free_pages_exact(ofb->video_mem, ofb->video_mem_size); unregister_framebuffer(&ofb->fb); } } } #else static inline void pxafb_overlay_init(struct pxafb_info *fbi) {} static inline void pxafb_overlay_exit(struct pxafb_info *fbi) {} #endif /* CONFIG_FB_PXA_OVERLAY */ /* * Calculate the PCD value from the clock rate (in picoseconds). * We take account of the PPCR clock setting. * From PXA Developer's Manual: * * PixelClock = LCLK * ------------- * 2 ( PCD + 1 ) * * PCD = LCLK * ------------- - 1 * 2(PixelClock) * * Where: * LCLK = LCD/Memory Clock * PCD = LCCR3[7:0] * * PixelClock here is in Hz while the pixclock argument given is the * period in picoseconds. Hence PixelClock = 1 / ( pixclock * 10^-12 ) * * The function get_lclk_frequency_10khz returns LCLK in units of * 10khz. Calling the result of this function lclk gives us the * following * * PCD = (lclk * 10^4 ) * ( pixclock * 10^-12 ) * -------------------------------------- - 1 * 2 * * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below. */ static inline unsigned int get_pcd(struct pxafb_info *fbi, unsigned int pixclock) { unsigned long long pcd; /* FIXME: Need to take into account Double Pixel Clock mode * (DPC) bit? or perhaps set it based on the various clock * speeds */ pcd = (unsigned long long)(clk_get_rate(fbi->clk) / 10000); pcd *= pixclock; do_div(pcd, 100000000 * 2); /* no need for this, since we should subtract 1 anyway. they cancel */ /* pcd += 1; */ /* make up for integer math truncations */ return (unsigned int)pcd; } /* * Some touchscreens need hsync information from the video driver to * function correctly. We export it here. Note that 'hsync_time' and * the value returned from pxafb_get_hsync_time() is the *reciprocal* * of the hsync period in seconds. */ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd) { unsigned long htime; if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) { fbi->hsync_time = 0; return; } htime = clk_get_rate(fbi->clk) / (pcd * fbi->fb.var.hsync_len); fbi->hsync_time = htime; } unsigned long pxafb_get_hsync_time(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); /* If display is blanked/suspended, hsync isn't active */ if (!fbi || (fbi->state != C_ENABLE)) return 0; return fbi->hsync_time; } EXPORT_SYMBOL(pxafb_get_hsync_time); static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, unsigned long start, size_t size) { struct pxafb_dma_descriptor *dma_desc, *pal_desc; unsigned int dma_desc_off, pal_desc_off; if (dma < 0 || dma >= DMA_MAX * 2) return -EINVAL; dma_desc = &fbi->dma_buff->dma_desc[dma]; dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]); dma_desc->fsadr = start; dma_desc->fidr = 0; dma_desc->ldcmd = size; if (pal < 0 || pal >= PAL_MAX * 2) { dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; } else { pal_desc = &fbi->dma_buff->pal_desc[pal]; pal_desc_off = offsetof(struct pxafb_dma_buff, pal_desc[pal]); pal_desc->fsadr = fbi->dma_buff_phys + pal * PALETTE_SIZE; pal_desc->fidr = 0; if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) pal_desc->ldcmd = fbi->palette_size * sizeof(u16); else pal_desc->ldcmd = fbi->palette_size * sizeof(u32); pal_desc->ldcmd |= LDCMD_PAL; /* flip back and forth between palette and frame buffer */ pal_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; dma_desc->fdadr = fbi->dma_buff_phys + pal_desc_off; fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; } return 0; } static void setup_base_frame(struct pxafb_info *fbi, struct fb_var_screeninfo *var, int branch) { struct fb_fix_screeninfo *fix = &fbi->fb.fix; int nbytes, dma, pal, bpp = var->bits_per_pixel; unsigned long offset; dma = DMA_BASE + (branch ? DMA_MAX : 0); pal = (bpp >= 16) ? PAL_NONE : PAL_BASE + (branch ? PAL_MAX : 0); nbytes = fix->line_length * var->yres; offset = fix->line_length * var->yoffset + fbi->video_mem_phys; if (fbi->lccr0 & LCCR0_SDS) { nbytes = nbytes / 2; setup_frame_dma(fbi, dma + 1, PAL_NONE, offset + nbytes, nbytes); } setup_frame_dma(fbi, dma, pal, offset, nbytes); } #ifdef CONFIG_FB_PXA_SMARTPANEL static int setup_smart_dma(struct pxafb_info *fbi) { struct pxafb_dma_descriptor *dma_desc; unsigned long dma_desc_off, cmd_buff_off; dma_desc = &fbi->dma_buff->dma_desc[DMA_CMD]; dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[DMA_CMD]); cmd_buff_off = offsetof(struct pxafb_dma_buff, cmd_buff); dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; dma_desc->fsadr = fbi->dma_buff_phys + cmd_buff_off; dma_desc->fidr = 0; dma_desc->ldcmd = fbi->n_smart_cmds * sizeof(uint16_t); fbi->fdadr[DMA_CMD] = dma_desc->fdadr; return 0; } int pxafb_smart_flush(struct fb_info *info) { struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); uint32_t prsr; int ret = 0; /* disable controller until all registers are set up */ lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); /* 1. make it an even number of commands to align on 32-bit boundary * 2. add the interrupt command to the end of the chain so we can * keep track of the end of the transfer */ while (fbi->n_smart_cmds & 1) fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_NOOP; fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_INTERRUPT; fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_WAIT_FOR_VSYNC; setup_smart_dma(fbi); /* continue to execute next command */ prsr = lcd_readl(fbi, PRSR) | PRSR_ST_OK | PRSR_CON_NT; lcd_writel(fbi, PRSR, prsr); /* stop the processor in case it executed "wait for sync" cmd */ lcd_writel(fbi, CMDCR, 0x0001); /* don't send interrupts for fifo underruns on channel 6 */ lcd_writel(fbi, LCCR5, LCCR5_IUM(6)); lcd_writel(fbi, LCCR1, fbi->reg_lccr1); lcd_writel(fbi, LCCR2, fbi->reg_lccr2); lcd_writel(fbi, LCCR3, fbi->reg_lccr3); lcd_writel(fbi, LCCR4, fbi->reg_lccr4); lcd_writel(fbi, FDADR0, fbi->fdadr[0]); lcd_writel(fbi, FDADR6, fbi->fdadr[6]); /* begin sending */ lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) { pr_warn("%s: timeout waiting for command done\n", __func__); ret = -ETIMEDOUT; } /* quick disable */ prsr = lcd_readl(fbi, PRSR) & ~(PRSR_ST_OK | PRSR_CON_NT); lcd_writel(fbi, PRSR, prsr); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); lcd_writel(fbi, FDADR6, 0); fbi->n_smart_cmds = 0; return ret; } int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds) { int i; struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); for (i = 0; i < n_cmds; i++, cmds++) { /* if it is a software delay, flush and delay */ if ((*cmds & 0xff00) == SMART_CMD_DELAY) { pxafb_smart_flush(info); mdelay(*cmds & 0xff); continue; } /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */ if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8) pxafb_smart_flush(info); fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds; } return 0; } static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk) { unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000); return (t == 0) ? 1 : t; } static void setup_smart_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { struct pxafb_mach_info *inf = fbi->inf; struct pxafb_mode_info *mode = &inf->modes[0]; unsigned long lclk = clk_get_rate(fbi->clk); unsigned t1, t2, t3, t4; t1 = max(mode->a0csrd_set_hld, mode->a0cswr_set_hld); t2 = max(mode->rd_pulse_width, mode->wr_pulse_width); t3 = mode->op_hold_time; t4 = mode->cmd_inh_time; fbi->reg_lccr1 = LCCR1_DisWdth(var->xres) | LCCR1_BegLnDel(__smart_timing(t1, lclk)) | LCCR1_EndLnDel(__smart_timing(t2, lclk)) | LCCR1_HorSnchWdth(__smart_timing(t3, lclk)); fbi->reg_lccr2 = LCCR2_DisHght(var->yres); fbi->reg_lccr3 = fbi->lccr3 | LCCR3_PixClkDiv(__smart_timing(t4, lclk)); fbi->reg_lccr3 |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? LCCR3_HSP : 0; fbi->reg_lccr3 |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? LCCR3_VSP : 0; /* FIXME: make this configurable */ fbi->reg_cmdcr = 1; } static int pxafb_smart_thread(void *arg) { struct pxafb_info *fbi = arg; struct pxafb_mach_info *inf = fbi->inf; if (!inf->smart_update) { pr_err("%s: not properly initialized, thread terminated\n", __func__); return -EINVAL; } pr_debug("%s(): task starting\n", __func__); set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) continue; mutex_lock(&fbi->ctrlr_lock); if (fbi->state == C_ENABLE) { inf->smart_update(&fbi->fb); complete(&fbi->refresh_done); } mutex_unlock(&fbi->ctrlr_lock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(30)); } pr_debug("%s(): task ending\n", __func__); return 0; } static int pxafb_smart_init(struct pxafb_info *fbi) { if (!(fbi->lccr0 & LCCR0_LCDT)) return 0; fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; fbi->n_smart_cmds = 0; init_completion(&fbi->command_done); init_completion(&fbi->refresh_done); fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi, "lcd_refresh"); if (IS_ERR(fbi->smart_thread)) { pr_err("%s: unable to create kernel thread\n", __func__); return PTR_ERR(fbi->smart_thread); } return 0; } #else static inline int pxafb_smart_init(struct pxafb_info *fbi) { return 0; } #endif /* CONFIG_FB_PXA_SMARTPANEL */ static void setup_parallel_timing(struct pxafb_info *fbi, struct fb_var_screeninfo *var) { unsigned int lines_per_panel, pcd = get_pcd(fbi, var->pixclock); fbi->reg_lccr1 = LCCR1_DisWdth(var->xres) + LCCR1_HorSnchWdth(var->hsync_len) + LCCR1_BegLnDel(var->left_margin) + LCCR1_EndLnDel(var->right_margin); /* * If we have a dual scan LCD, we need to halve * the YRES parameter. */ lines_per_panel = var->yres; if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) lines_per_panel /= 2; fbi->reg_lccr2 = LCCR2_DisHght(lines_per_panel) + LCCR2_VrtSnchWdth(var->vsync_len) + LCCR2_BegFrmDel(var->upper_margin) + LCCR2_EndFrmDel(var->lower_margin); fbi->reg_lccr3 = fbi->lccr3 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); if (pcd) { fbi->reg_lccr3 |= LCCR3_PixClkDiv(pcd); set_hsync_time(fbi, pcd); } } /* * pxafb_activate_var(): * Configures LCD Controller based on entries in var parameter. * Settings are only written to the controller if changes were made. */ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *fbi) { u_long flags; /* Update shadow copy atomically */ local_irq_save(flags); #ifdef CONFIG_FB_PXA_SMARTPANEL if (fbi->lccr0 & LCCR0_LCDT) setup_smart_timing(fbi, var); else #endif setup_parallel_timing(fbi, var); setup_base_frame(fbi, var, 0); fbi->reg_lccr0 = fbi->lccr0 | (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_QDM | LCCR0_BM | LCCR0_OUM); fbi->reg_lccr3 |= pxafb_var_to_lccr3(var); fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK; fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); local_irq_restore(flags); /* * Only update the registers if the controller is enabled * and something has changed. */ if ((lcd_readl(fbi, LCCR0) != fbi->reg_lccr0) || (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) || (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) || (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || (lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) || (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || ((fbi->lccr0 & LCCR0_SDS) && (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))) pxafb_schedule_work(fbi, C_REENABLE); return 0; } /* * NOTE! The following functions are purely helpers for set_ctrlr_state. * Do not call them directly; set_ctrlr_state does the correct serialisation * to ensure that things happen in the right way 100% of time time. * -- rmk */ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) { pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); if (fbi->backlight_power) fbi->backlight_power(on); } static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) { pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff"); if (fbi->lcd_power) fbi->lcd_power(on, &fbi->fb.var); if (fbi->lcd_supply && fbi->lcd_supply_enabled != on) { int ret; if (on) ret = regulator_enable(fbi->lcd_supply); else ret = regulator_disable(fbi->lcd_supply); if (ret < 0) pr_warn("Unable to %s LCD supply regulator: %d\n", on ? "enable" : "disable", ret); else fbi->lcd_supply_enabled = on; } } static void pxafb_enable_controller(struct pxafb_info *fbi) { pr_debug("pxafb: Enabling LCD controller\n"); pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr[0]); pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr[1]); pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); /* enable LCD controller clock */ if (clk_prepare_enable(fbi->clk)) { pr_err("%s: Failed to prepare clock\n", __func__); return; } if (fbi->lccr0 & LCCR0_LCDT) return; /* Sequence from 11.7.10 */ lcd_writel(fbi, LCCR4, fbi->reg_lccr4); lcd_writel(fbi, LCCR3, fbi->reg_lccr3); lcd_writel(fbi, LCCR2, fbi->reg_lccr2); lcd_writel(fbi, LCCR1, fbi->reg_lccr1); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); lcd_writel(fbi, FDADR0, fbi->fdadr[0]); if (fbi->lccr0 & LCCR0_SDS) lcd_writel(fbi, FDADR1, fbi->fdadr[1]); lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); } static void pxafb_disable_controller(struct pxafb_info *fbi) { uint32_t lccr0; #ifdef CONFIG_FB_PXA_SMARTPANEL if (fbi->lccr0 & LCCR0_LCDT) { wait_for_completion_timeout(&fbi->refresh_done, msecs_to_jiffies(200)); return; } #endif /* Clear LCD Status Register */ lcd_writel(fbi, LCSR, 0xffffffff); lccr0 = lcd_readl(fbi, LCCR0) & ~LCCR0_LDM; lcd_writel(fbi, LCCR0, lccr0); lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS); wait_for_completion_timeout(&fbi->disable_done, msecs_to_jiffies(200)); /* disable LCD controller clock */ clk_disable_unprepare(fbi->clk); } /* * pxafb_handle_irq: Handle 'LCD DONE' interrupts. */ static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) { struct pxafb_info *fbi = dev_id; unsigned int lccr0, lcsr; lcsr = lcd_readl(fbi, LCSR); if (lcsr & LCSR_LDD) { lccr0 = lcd_readl(fbi, LCCR0); lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM); complete(&fbi->disable_done); } #ifdef CONFIG_FB_PXA_SMARTPANEL if (lcsr & LCSR_CMD_INT) complete(&fbi->command_done); #endif lcd_writel(fbi, LCSR, lcsr); #ifdef CONFIG_FB_PXA_OVERLAY { unsigned int lcsr1 = lcd_readl(fbi, LCSR1); if (lcsr1 & LCSR1_BS(1)) complete(&fbi->overlay[0].branch_done); if (lcsr1 & LCSR1_BS(2)) complete(&fbi->overlay[1].branch_done); lcd_writel(fbi, LCSR1, lcsr1); } #endif return IRQ_HANDLED; } /* * This function must be called from task context only, since it will * sleep when disabling the LCD controller, or if we get two contending * processes trying to alter state. */ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) { u_int old_state; mutex_lock(&fbi->ctrlr_lock); old_state = fbi->state; /* * Hack around fbcon initialisation. */ if (old_state == C_STARTUP && state == C_REENABLE) state = C_ENABLE; switch (state) { case C_DISABLE_CLKCHANGE: /* * Disable controller for clock change. If the * controller is already disabled, then do nothing. */ if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { fbi->state = state; /* TODO __pxafb_lcd_power(fbi, 0); */ pxafb_disable_controller(fbi); } break; case C_DISABLE_PM: case C_DISABLE: /* * Disable controller */ if (old_state != C_DISABLE) { fbi->state = state; __pxafb_backlight_power(fbi, 0); __pxafb_lcd_power(fbi, 0); if (old_state != C_DISABLE_CLKCHANGE) pxafb_disable_controller(fbi); } break; case C_ENABLE_CLKCHANGE: /* * Enable the controller after clock change. Only * do this if we were disabled for the clock change. */ if (old_state == C_DISABLE_CLKCHANGE) { fbi->state = C_ENABLE; pxafb_enable_controller(fbi); /* TODO __pxafb_lcd_power(fbi, 1); */ } break; case C_REENABLE: /* * Re-enable the controller only if it was already * enabled. This is so we reprogram the control * registers. */ if (old_state == C_ENABLE) { __pxafb_lcd_power(fbi, 0); pxafb_disable_controller(fbi); pxafb_enable_controller(fbi); __pxafb_lcd_power(fbi, 1); } break; case C_ENABLE_PM: /* * Re-enable the controller after PM. This is not * perfect - think about the case where we were doing * a clock change, and we suspended half-way through. */ if (old_state != C_DISABLE_PM) break; fallthrough; case C_ENABLE: /* * Power up the LCD screen, enable controller, and * turn on the backlight. */ if (old_state != C_ENABLE) { fbi->state = C_ENABLE; pxafb_enable_controller(fbi); __pxafb_lcd_power(fbi, 1); __pxafb_backlight_power(fbi, 1); } break; } mutex_unlock(&fbi->ctrlr_lock); } /* * Our LCD controller task (which is called when we blank or unblank) * via keventd. */ static void pxafb_task(struct work_struct *work) { struct pxafb_info *fbi = container_of(work, struct pxafb_info, task); u_int state = xchg(&fbi->task_state, -1); set_ctrlr_state(fbi, state); } #ifdef CONFIG_CPU_FREQ /* * CPU clock speed change handler. We need to adjust the LCD timing * parameters when the CPU clock is adjusted by the power management * subsystem. * * TODO: Determine why f->new != 10*get_lclk_frequency_10khz() */ static int pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct pxafb_info *fbi = TO_INF(nb, freq_transition); /* TODO struct cpufreq_freqs *f = data; */ u_int pcd; switch (val) { case CPUFREQ_PRECHANGE: #ifdef CONFIG_FB_PXA_OVERLAY if (!(fbi->overlay[0].usage || fbi->overlay[1].usage)) #endif set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); break; case CPUFREQ_POSTCHANGE: pcd = get_pcd(fbi, fbi->fb.var.pixclock); set_hsync_time(fbi, pcd); fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); break; } return 0; } #endif #ifdef CONFIG_PM /* * Power management hooks. Note that we won't be called from IRQ context, * unlike the blank functions above, so we may sleep. */ static int pxafb_suspend(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); set_ctrlr_state(fbi, C_DISABLE_PM); return 0; } static int pxafb_resume(struct device *dev) { struct pxafb_info *fbi = dev_get_drvdata(dev); set_ctrlr_state(fbi, C_ENABLE_PM); return 0; } static const struct dev_pm_ops pxafb_pm_ops = { .suspend = pxafb_suspend, .resume = pxafb_resume, }; #endif static int pxafb_init_video_memory(struct pxafb_info *fbi) { int size = PAGE_ALIGN(fbi->video_mem_size); fbi->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (fbi->video_mem == NULL) return -ENOMEM; fbi->video_mem_phys = virt_to_phys(fbi->video_mem); fbi->video_mem_size = size; fbi->fb.fix.smem_start = fbi->video_mem_phys; fbi->fb.fix.smem_len = fbi->video_mem_size; fbi->fb.screen_base = fbi->video_mem; return fbi->video_mem ? 0 : -ENOMEM; } static void pxafb_decode_mach_info(struct pxafb_info *fbi, struct pxafb_mach_info *inf) { unsigned int lcd_conn = inf->lcd_conn; struct pxafb_mode_info *m; int i; fbi->cmap_inverse = inf->cmap_inverse; fbi->cmap_static = inf->cmap_static; fbi->lccr4 = inf->lccr4; switch (lcd_conn & LCD_TYPE_MASK) { case LCD_TYPE_MONO_STN: fbi->lccr0 = LCCR0_CMS; break; case LCD_TYPE_MONO_DSTN: fbi->lccr0 = LCCR0_CMS | LCCR0_SDS; break; case LCD_TYPE_COLOR_STN: fbi->lccr0 = 0; break; case LCD_TYPE_COLOR_DSTN: fbi->lccr0 = LCCR0_SDS; break; case LCD_TYPE_COLOR_TFT: fbi->lccr0 = LCCR0_PAS; break; case LCD_TYPE_SMART_PANEL: fbi->lccr0 = LCCR0_LCDT | LCCR0_PAS; break; default: /* fall back to backward compatibility way */ fbi->lccr0 = inf->lccr0; fbi->lccr3 = inf->lccr3; goto decode_mode; } if (lcd_conn == LCD_MONO_STN_8BPP) fbi->lccr0 |= LCCR0_DPD; fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0; fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; decode_mode: pxafb_setmode(&fbi->fb.var, &inf->modes[0]); /* decide video memory size as follows: * 1. default to mode of maximum resolution * 2. allow platform to override * 3. allow module parameter to override */ for (i = 0, m = &inf->modes[0]; i < inf->num_modes; i++, m++) fbi->video_mem_size = max_t(size_t, fbi->video_mem_size, m->xres * m->yres * m->bpp / 8); if (inf->video_mem_size > fbi->video_mem_size) fbi->video_mem_size = inf->video_mem_size; if (video_mem_size > fbi->video_mem_size) fbi->video_mem_size = video_mem_size; } static struct pxafb_info *pxafb_init_fbinfo(struct device *dev, struct pxafb_mach_info *inf) { struct pxafb_info *fbi; void *addr; /* Alloc the pxafb_info and pseudo_palette in one step */ fbi = devm_kzalloc(dev, sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); if (!fbi) return ERR_PTR(-ENOMEM); fbi->dev = dev; fbi->inf = inf; fbi->clk = devm_clk_get(dev, NULL); if (IS_ERR(fbi->clk)) return ERR_CAST(fbi->clk); strcpy(fbi->fb.fix.id, PXA_NAME); fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS; fbi->fb.fix.type_aux = 0; fbi->fb.fix.xpanstep = 0; fbi->fb.fix.ypanstep = 1; fbi->fb.fix.ywrapstep = 0; fbi->fb.fix.accel = FB_ACCEL_NONE; fbi->fb.var.nonstd = 0; fbi->fb.var.activate = FB_ACTIVATE_NOW; fbi->fb.var.height = -1; fbi->fb.var.width = -1; fbi->fb.var.accel_flags = FB_ACCELF_TEXT; fbi->fb.var.vmode = FB_VMODE_NONINTERLACED; fbi->fb.fbops = &pxafb_ops; fbi->fb.node = -1; addr = fbi; addr = addr + sizeof(struct pxafb_info); fbi->fb.pseudo_palette = addr; fbi->state = C_STARTUP; fbi->task_state = (u_char)-1; pxafb_decode_mach_info(fbi, inf); #ifdef CONFIG_FB_PXA_OVERLAY /* place overlay(s) on top of base */ if (pxafb_overlay_supported()) fbi->lccr0 |= LCCR0_OUC; #endif init_waitqueue_head(&fbi->ctrlr_wait); INIT_WORK(&fbi->task, pxafb_task); mutex_init(&fbi->ctrlr_lock); init_completion(&fbi->disable_done); return fbi; } #ifdef CONFIG_FB_PXA_PARAMETERS static int parse_opt_mode(struct device *dev, const char *this_opt, struct pxafb_mach_info *inf) { const char *name = this_opt+5; unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0; unsigned int xres = 0, yres = 0, bpp = 0; int yres_specified = 0; int i; for (i = namelen-1; i >= 0; i--) { switch (name[i]) { case '-': namelen = i; if (!bpp_specified && !yres_specified) { bpp = simple_strtoul(&name[i+1], NULL, 0); bpp_specified = 1; } else goto done; break; case 'x': if (!yres_specified) { yres = simple_strtoul(&name[i+1], NULL, 0); yres_specified = 1; } else goto done; break; case '0' ... '9': break; default: goto done; } } if (i < 0 && yres_specified) { xres = simple_strtoul(name, NULL, 0); res_specified = 1; } done: if (res_specified) { dev_info(dev, "overriding resolution: %dx%d\n", xres, yres); inf->modes[0].xres = xres; inf->modes[0].yres = yres; } if (bpp_specified) switch (bpp) { case 1: case 2: case 4: case 8: case 16: inf->modes[0].bpp = bpp; dev_info(dev, "overriding bit depth: %d\n", bpp); break; default: dev_err(dev, "Depth %d is not valid\n", bpp); return -EINVAL; } return 0; } static int parse_opt(struct device *dev, char *this_opt, struct pxafb_mach_info *inf) { struct pxafb_mode_info *mode = &inf->modes[0]; char s[64]; s[0] = '\0'; if (!strncmp(this_opt, "vmem:", 5)) { video_mem_size = memparse(this_opt + 5, NULL); } else if (!strncmp(this_opt, "mode:", 5)) { return parse_opt_mode(dev, this_opt, inf); } else if (!strncmp(this_opt, "pixclock:", 9)) { mode->pixclock = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "pixclock: %ld\n", mode->pixclock); } else if (!strncmp(this_opt, "left:", 5)) { mode->left_margin = simple_strtoul(this_opt+5, NULL, 0); sprintf(s, "left: %u\n", mode->left_margin); } else if (!strncmp(this_opt, "right:", 6)) { mode->right_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "right: %u\n", mode->right_margin); } else if (!strncmp(this_opt, "upper:", 6)) { mode->upper_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "upper: %u\n", mode->upper_margin); } else if (!strncmp(this_opt, "lower:", 6)) { mode->lower_margin = simple_strtoul(this_opt+6, NULL, 0); sprintf(s, "lower: %u\n", mode->lower_margin); } else if (!strncmp(this_opt, "hsynclen:", 9)) { mode->hsync_len = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "hsynclen: %u\n", mode->hsync_len); } else if (!strncmp(this_opt, "vsynclen:", 9)) { mode->vsync_len = simple_strtoul(this_opt+9, NULL, 0); sprintf(s, "vsynclen: %u\n", mode->vsync_len); } else if (!strncmp(this_opt, "hsync:", 6)) { if (simple_strtoul(this_opt+6, NULL, 0) == 0) { sprintf(s, "hsync: Active Low\n"); mode->sync &= ~FB_SYNC_HOR_HIGH_ACT; } else { sprintf(s, "hsync: Active High\n"); mode->sync |= FB_SYNC_HOR_HIGH_ACT; } } else if (!strncmp(this_opt, "vsync:", 6)) { if (simple_strtoul(this_opt+6, NULL, 0) == 0) { sprintf(s, "vsync: Active Low\n"); mode->sync &= ~FB_SYNC_VERT_HIGH_ACT; } else { sprintf(s, "vsync: Active High\n"); mode->sync |= FB_SYNC_VERT_HIGH_ACT; } } else if (!strncmp(this_opt, "dpc:", 4)) { if (simple_strtoul(this_opt+4, NULL, 0) == 0) { sprintf(s, "double pixel clock: false\n"); inf->lccr3 &= ~LCCR3_DPC; } else { sprintf(s, "double pixel clock: true\n"); inf->lccr3 |= LCCR3_DPC; } } else if (!strncmp(this_opt, "outputen:", 9)) { if (simple_strtoul(this_opt+9, NULL, 0) == 0) { sprintf(s, "output enable: active low\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL; } else { sprintf(s, "output enable: active high\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH; } } else if (!strncmp(this_opt, "pixclockpol:", 12)) { if (simple_strtoul(this_opt+12, NULL, 0) == 0) { sprintf(s, "pixel clock polarity: falling edge\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg; } else { sprintf(s, "pixel clock polarity: rising edge\n"); inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg; } } else if (!strncmp(this_opt, "color", 5)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color; } else if (!strncmp(this_opt, "mono", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono; } else if (!strncmp(this_opt, "active", 6)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act; } else if (!strncmp(this_opt, "passive", 7)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas; } else if (!strncmp(this_opt, "single", 6)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl; } else if (!strncmp(this_opt, "dual", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual; } else if (!strncmp(this_opt, "4pix", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono; } else if (!strncmp(this_opt, "8pix", 4)) { inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono; } else { dev_err(dev, "unknown option: %s\n", this_opt); return -EINVAL; } if (s[0] != '\0') dev_info(dev, "override %s", s); return 0; } static int pxafb_parse_options(struct device *dev, char *options, struct pxafb_mach_info *inf) { char *this_opt; int ret; if (!options || !*options) return 0; dev_dbg(dev, "options are \"%s\"\n", options ? options : "null"); /* could be made table driven or similar?... */ while ((this_opt = strsep(&options, ",")) != NULL) { ret = parse_opt(dev, this_opt, inf); if (ret) return ret; } return 0; } static char g_options[256] = ""; #ifndef MODULE static int __init pxafb_setup_options(void) { char *options = NULL; if (fb_get_options("pxafb", &options)) return -ENODEV; if (options) strscpy(g_options, options, sizeof(g_options)); return 0; } #else #define pxafb_setup_options() (0) module_param_string(options, g_options, sizeof(g_options), 0); MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.rst)"); #endif #else #define pxafb_parse_options(...) (0) #define pxafb_setup_options() (0) #endif #ifdef DEBUG_VAR /* Check for various illegal bit-combinations. Currently only * a warning is given. */ static void pxafb_check_options(struct device *dev, struct pxafb_mach_info *inf) { if (inf->lcd_conn) return; if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) dev_warn(dev, "machine LCCR0 setting contains " "illegal bits: %08x\n", inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) dev_warn(dev, "machine LCCR3 setting contains " "illegal bits: %08x\n", inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); if (inf->lccr0 & LCCR0_DPD && ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) dev_warn(dev, "Double Pixel Data (DPD) mode is " "only valid in passive mono" " single panel mode\n"); if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) dev_warn(dev, "Dual panel only valid in passive mode\n"); if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && (inf->modes->upper_margin || inf->modes->lower_margin)) dev_warn(dev, "Upper and lower margins must be 0 in " "passive mode\n"); } #else #define pxafb_check_options(...) do {} while (0) #endif #if defined(CONFIG_OF) static const char * const lcd_types[] = { "unknown", "mono-stn", "mono-dstn", "color-stn", "color-dstn", "color-tft", "smart-panel", NULL }; static int of_get_pxafb_display(struct device *dev, struct device_node *disp, struct pxafb_mach_info *info, u32 bus_width) { struct display_timings *timings; struct videomode vm; int i, ret = -EINVAL; const char *s; ret = of_property_read_string(disp, "lcd-type", &s); if (ret) s = "color-tft"; i = match_string(lcd_types, -1, s); if (i < 0) { dev_err(dev, "lcd-type %s is unknown\n", s); return i; } info->lcd_conn |= LCD_CONN_TYPE(i); info->lcd_conn |= LCD_CONN_WIDTH(bus_width); timings = of_get_display_timings(disp); if (!timings) return -EINVAL; ret = -ENOMEM; info->modes = devm_kcalloc(dev, timings->num_timings, sizeof(info->modes[0]), GFP_KERNEL); if (!info->modes) goto out; info->num_modes = timings->num_timings; for (i = 0; i < timings->num_timings; i++) { ret = videomode_from_timings(timings, &vm, i); if (ret) { dev_err(dev, "videomode_from_timings %d failed: %d\n", i, ret); goto out; } if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) info->lcd_conn |= LCD_PCLK_EDGE_RISE; if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) info->lcd_conn |= LCD_PCLK_EDGE_FALL; if (vm.flags & DISPLAY_FLAGS_DE_HIGH) info->lcd_conn |= LCD_BIAS_ACTIVE_HIGH; if (vm.flags & DISPLAY_FLAGS_DE_LOW) info->lcd_conn |= LCD_BIAS_ACTIVE_LOW; if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH) info->modes[i].sync |= FB_SYNC_HOR_HIGH_ACT; if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH) info->modes[i].sync |= FB_SYNC_VERT_HIGH_ACT; info->modes[i].pixclock = 1000000000UL / (vm.pixelclock / 1000); info->modes[i].xres = vm.hactive; info->modes[i].yres = vm.vactive; info->modes[i].hsync_len = vm.hsync_len; info->modes[i].left_margin = vm.hback_porch; info->modes[i].right_margin = vm.hfront_porch; info->modes[i].vsync_len = vm.vsync_len; info->modes[i].upper_margin = vm.vback_porch; info->modes[i].lower_margin = vm.vfront_porch; } ret = 0; out: display_timings_release(timings); return ret; } static int of_get_pxafb_mode_info(struct device *dev, struct pxafb_mach_info *info) { struct device_node *display, *np; u32 bus_width; int ret, i; np = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); if (!np) { dev_err(dev, "could not find endpoint\n"); return -EINVAL; } ret = of_property_read_u32(np, "bus-width", &bus_width); if (ret) { dev_err(dev, "no bus-width specified: %d\n", ret); of_node_put(np); return ret; } display = of_graph_get_remote_port_parent(np); of_node_put(np); if (!display) { dev_err(dev, "no display defined\n"); return -EINVAL; } ret = of_get_pxafb_display(dev, display, info, bus_width); of_node_put(display); if (ret) return ret; for (i = 0; i < info->num_modes; i++) info->modes[i].bpp = bus_width; return 0; } static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev) { int ret; struct pxafb_mach_info *info; if (!dev->of_node) return NULL; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); ret = of_get_pxafb_mode_info(dev, info); if (ret) return ERR_PTR(ret); /* * On purpose, neither lccrX registers nor video memory size can be * specified through device-tree, they are considered more a debug hack * available through command line. */ return info; } #else static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev) { return NULL; } #endif static int pxafb_probe(struct platform_device *dev) { struct pxafb_info *fbi; struct pxafb_mach_info *inf, *pdata; int i, irq, ret; dev_dbg(&dev->dev, "pxafb_probe\n"); ret = -ENOMEM; pdata = dev_get_platdata(&dev->dev); inf = devm_kmalloc(&dev->dev, sizeof(*inf), GFP_KERNEL); if (!inf) goto failed; if (pdata) { *inf = *pdata; inf->modes = devm_kmalloc_array(&dev->dev, pdata->num_modes, sizeof(inf->modes[0]), GFP_KERNEL); if (!inf->modes) goto failed; for (i = 0; i < inf->num_modes; i++) inf->modes[i] = pdata->modes[i]; } else { inf = of_pxafb_of_mach_info(&dev->dev); } if (IS_ERR_OR_NULL(inf)) goto failed; ret = pxafb_parse_options(&dev->dev, g_options, inf); if (ret < 0) goto failed; pxafb_check_options(&dev->dev, inf); dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", inf->modes->xres, inf->modes->yres, inf->modes->bpp); if (inf->modes->xres == 0 || inf->modes->yres == 0 || inf->modes->bpp == 0) { dev_err(&dev->dev, "Invalid resolution or bit depth\n"); ret = -EINVAL; goto failed; } fbi = pxafb_init_fbinfo(&dev->dev, inf); if (IS_ERR(fbi)) { dev_err(&dev->dev, "Failed to initialize framebuffer device\n"); ret = PTR_ERR(fbi); goto failed; } if (cpu_is_pxa3xx() && inf->acceleration_enabled) fbi->fb.fix.accel = FB_ACCEL_PXA3XX; fbi->backlight_power = inf->pxafb_backlight_power; fbi->lcd_power = inf->pxafb_lcd_power; fbi->lcd_supply = devm_regulator_get_optional(&dev->dev, "lcd"); if (IS_ERR(fbi->lcd_supply)) { if (PTR_ERR(fbi->lcd_supply) == -EPROBE_DEFER) return -EPROBE_DEFER; fbi->lcd_supply = NULL; } fbi->mmio_base = devm_platform_ioremap_resource(dev, 0); if (IS_ERR(fbi->mmio_base)) { dev_err(&dev->dev, "failed to get I/O memory\n"); ret = PTR_ERR(fbi->mmio_base); goto failed; } fbi->dma_buff_size = PAGE_ALIGN(sizeof(struct pxafb_dma_buff)); fbi->dma_buff = dma_alloc_coherent(fbi->dev, fbi->dma_buff_size, &fbi->dma_buff_phys, GFP_KERNEL); if (fbi->dma_buff == NULL) { dev_err(&dev->dev, "failed to allocate memory for DMA\n"); ret = -ENOMEM; goto failed; } ret = pxafb_init_video_memory(fbi); if (ret) { dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); ret = -ENOMEM; goto failed_free_dma; } irq = platform_get_irq(dev, 0); if (irq < 0) { ret = -ENODEV; goto failed_free_mem; } ret = devm_request_irq(&dev->dev, irq, pxafb_handle_irq, 0, "LCD", fbi); if (ret) { dev_err(&dev->dev, "request_irq failed: %d\n", ret); ret = -EBUSY; goto failed_free_mem; } ret = pxafb_smart_init(fbi); if (ret) { dev_err(&dev->dev, "failed to initialize smartpanel\n"); goto failed_free_mem; } /* * This makes sure that our colour bitfield * descriptors are correctly initialised. */ ret = pxafb_check_var(&fbi->fb.var, &fbi->fb); if (ret) { dev_err(&dev->dev, "failed to get suitable mode\n"); goto failed_free_mem; } ret = pxafb_set_par(&fbi->fb); if (ret) { dev_err(&dev->dev, "Failed to set parameters\n"); goto failed_free_mem; } platform_set_drvdata(dev, fbi); ret = register_framebuffer(&fbi->fb); if (ret < 0) { dev_err(&dev->dev, "Failed to register framebuffer device: %d\n", ret); goto failed_free_cmap; } pxafb_overlay_init(fbi); #ifdef CONFIG_CPU_FREQ fbi->freq_transition.notifier_call = pxafb_freq_transition; cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); #endif /* * Ok, now enable the LCD controller */ set_ctrlr_state(fbi, C_ENABLE); return 0; failed_free_cmap: if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); failed_free_mem: free_pages_exact(fbi->video_mem, fbi->video_mem_size); failed_free_dma: dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, fbi->dma_buff_phys); failed: return ret; } static void pxafb_remove(struct platform_device *dev) { struct pxafb_info *fbi = platform_get_drvdata(dev); struct fb_info *info; if (!fbi) return; info = &fbi->fb; pxafb_overlay_exit(fbi); cancel_work_sync(&fbi->task); unregister_framebuffer(info); pxafb_disable_controller(fbi); if (fbi->fb.cmap.len) fb_dealloc_cmap(&fbi->fb.cmap); free_pages_exact(fbi->video_mem, fbi->video_mem_size); dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, fbi->dma_buff_phys); } static const struct of_device_id pxafb_of_dev_id[] = { { .compatible = "marvell,pxa270-lcdc", }, { .compatible = "marvell,pxa300-lcdc", }, { .compatible = "marvell,pxa2xx-lcdc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pxafb_of_dev_id); static struct platform_driver pxafb_driver = { .probe = pxafb_probe, .remove = pxafb_remove, .driver = { .name = "pxa2xx-fb", .of_match_table = pxafb_of_dev_id, #ifdef CONFIG_PM .pm = &pxafb_pm_ops, #endif }, }; static int __init pxafb_init(void) { if (pxafb_setup_options()) return -EINVAL; return platform_driver_register(&pxafb_driver); } static void __exit pxafb_exit(void) { platform_driver_unregister(&pxafb_driver); } module_init(pxafb_init); module_exit(pxafb_exit); MODULE_DESCRIPTION("loadable framebuffer driver for PXA"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020-2021 Intel Corporation. */ #include <linux/wwan.h> #include "iosm_ipc_trace.h" /* sub buffer size and number of sub buffer */ #define IOSM_TRC_SUB_BUFF_SIZE 131072 #define IOSM_TRC_N_SUB_BUFF 32 #define IOSM_TRC_FILE_PERM 0600 #define IOSM_TRC_DEBUGFS_TRACE "trace" #define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl" /** * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer * @ipc_imem: Pointer to iosm_imem structure * @skb: Pointer to struct sk_buff */ void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb) { struct iosm_trace *ipc_trace = ipc_imem->trace; if (ipc_trace->ipc_rchan) relay_write(ipc_trace->ipc_rchan, skb->data, skb->len); dev_kfree_skb(skb); } /* Creates relay file in debugfs. */ static struct dentry * ipc_trace_create_buf_file_handler(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global) { *is_global = 1; return debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); } /* Removes relay file from debugfs. */ static int ipc_trace_remove_buf_file_handler(struct dentry *dentry) { debugfs_remove(dentry); return 0; } static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { if (relay_buf_full(buf)) { pr_err_ratelimited("Relay_buf full dropping traces"); return 0; } return 1; } /* Relay interface callbacks */ static struct rchan_callbacks relay_callbacks = { .subbuf_start = ipc_trace_subbuf_start_handler, .create_buf_file = ipc_trace_create_buf_file_handler, .remove_buf_file = ipc_trace_remove_buf_file_handler, }; /* Copy the trace control mode to user buffer */ static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct iosm_trace *ipc_trace = filp->private_data; char buf[16]; int len; mutex_lock(&ipc_trace->trc_mutex); len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode); mutex_unlock(&ipc_trace->trc_mutex); return simple_read_from_buffer(buffer, count, ppos, buf, len); } /* Open and close the trace channel depending on user input */ static ssize_t ipc_trace_ctrl_file_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct iosm_trace *ipc_trace = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(buffer, count, 10, &val); if (ret) return ret; mutex_lock(&ipc_trace->trc_mutex); if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) { ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem, ipc_trace->chl_id, IPC_HP_CDEV_OPEN); if (!ipc_trace->channel) { ret = -EIO; goto unlock; } ipc_trace->mode = TRACE_ENABLE; } else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) { ipc_trace->mode = TRACE_DISABLE; /* close trace channel */ ipc_imem_sys_port_close(ipc_trace->ipc_imem, ipc_trace->channel); relay_flush(ipc_trace->ipc_rchan); } ret = count; unlock: mutex_unlock(&ipc_trace->trc_mutex); return ret; } static const struct file_operations ipc_trace_fops = { .open = simple_open, .write = ipc_trace_ctrl_file_write, .read = ipc_trace_ctrl_file_read, }; /** * ipc_trace_init - Create trace interface & debugfs entries * @ipc_imem: Pointer to iosm_imem structure * * Returns: Pointer to trace instance on success else NULL */ struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem) { struct ipc_chnl_cfg chnl_cfg = { 0 }; struct iosm_trace *ipc_trace; ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3); ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg, IRQ_MOD_OFF); ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL); if (!ipc_trace) return NULL; ipc_trace->mode = TRACE_DISABLE; ipc_trace->dev = ipc_imem->dev; ipc_trace->ipc_imem = ipc_imem; ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3; mutex_init(&ipc_trace->trc_mutex); ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL, IOSM_TRC_FILE_PERM, ipc_imem->debugfs_dir, ipc_trace, &ipc_trace_fops); ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE, ipc_imem->debugfs_dir, IOSM_TRC_SUB_BUFF_SIZE, IOSM_TRC_N_SUB_BUFF, &relay_callbacks, NULL); return ipc_trace; } /** * ipc_trace_deinit - Closing relayfs, removing debugfs entries * @ipc_trace: Pointer to the iosm_trace data struct */ void ipc_trace_deinit(struct iosm_trace *ipc_trace) { if (!ipc_trace) return; debugfs_remove(ipc_trace->ctrl_file); relay_close(ipc_trace->ipc_rchan); mutex_destroy(&ipc_trace->trc_mutex); kfree(ipc_trace); }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) STMicroelectronics SA 2014 * Authors: Fabien Dessenne <[email protected]> for STMicroelectronics. */ struct bdisp_node { /* 0 - General */ u32 nip; u32 cic; u32 ins; u32 ack; /* 1 - Target */ u32 tba; u32 tty; u32 txy; u32 tsz; /* 2 - Color Fill */ u32 s1cf; u32 s2cf; /* 3 - Source 1 */ u32 s1ba; u32 s1ty; u32 s1xy; u32 s1sz_tsz; /* 4 - Source 2 */ u32 s2ba; u32 s2ty; u32 s2xy; u32 s2sz; /* 5 - Source 3 */ u32 s3ba; u32 s3ty; u32 s3xy; u32 s3sz; /* 6 - Clipping */ u32 cwo; u32 cws; /* 7 - CLUT */ u32 cco; u32 cml; /* 8 - Filter & Mask */ u32 fctl; u32 pmk; /* 9 - Chroma Filter */ u32 rsf; u32 rzi; u32 hfp; u32 vfp; /* 10 - Luma Filter */ u32 y_rsf; u32 y_rzi; u32 y_hfp; u32 y_vfp; /* 11 - Flicker */ u32 ff0; u32 ff1; u32 ff2; u32 ff3; /* 12 - Color Key */ u32 key1; u32 key2; /* 14 - Static Address & User */ u32 sar; u32 usr; /* 15 - Input Versatile Matrix */ u32 ivmx0; u32 ivmx1; u32 ivmx2; u32 ivmx3; /* 16 - Output Versatile Matrix */ u32 ovmx0; u32 ovmx1; u32 ovmx2; u32 ovmx3; /* 17 - Pace */ u32 pace; /* 18 - VC1R & DEI */ u32 vc1r; u32 dei; /* 19 - Gradient Fill */ u32 hgf; u32 vgf; }; /* HW registers : static */ #define BLT_CTL 0x0A00 #define BLT_ITS 0x0A04 #define BLT_STA1 0x0A08 #define BLT_AQ1_CTL 0x0A60 #define BLT_AQ1_IP 0x0A64 #define BLT_AQ1_LNA 0x0A68 #define BLT_AQ1_STA 0x0A6C #define BLT_ITM0 0x0AD0 /* HW registers : plugs */ #define BLT_PLUGS1_OP2 0x0B04 #define BLT_PLUGS1_CHZ 0x0B08 #define BLT_PLUGS1_MSZ 0x0B0C #define BLT_PLUGS1_PGZ 0x0B10 #define BLT_PLUGS2_OP2 0x0B24 #define BLT_PLUGS2_CHZ 0x0B28 #define BLT_PLUGS2_MSZ 0x0B2C #define BLT_PLUGS2_PGZ 0x0B30 #define BLT_PLUGS3_OP2 0x0B44 #define BLT_PLUGS3_CHZ 0x0B48 #define BLT_PLUGS3_MSZ 0x0B4C #define BLT_PLUGS3_PGZ 0x0B50 #define BLT_PLUGT_OP2 0x0B84 #define BLT_PLUGT_CHZ 0x0B88 #define BLT_PLUGT_MSZ 0x0B8C #define BLT_PLUGT_PGZ 0x0B90 /* HW registers : node */ #define BLT_NIP 0x0C00 #define BLT_CIC 0x0C04 #define BLT_INS 0x0C08 #define BLT_ACK 0x0C0C #define BLT_TBA 0x0C10 #define BLT_TTY 0x0C14 #define BLT_TXY 0x0C18 #define BLT_TSZ 0x0C1C #define BLT_S1BA 0x0C28 #define BLT_S1TY 0x0C2C #define BLT_S1XY 0x0C30 #define BLT_S2BA 0x0C38 #define BLT_S2TY 0x0C3C #define BLT_S2XY 0x0C40 #define BLT_S2SZ 0x0C44 #define BLT_S3BA 0x0C48 #define BLT_S3TY 0x0C4C #define BLT_S3XY 0x0C50 #define BLT_S3SZ 0x0C54 #define BLT_FCTL 0x0C68 #define BLT_RSF 0x0C70 #define BLT_RZI 0x0C74 #define BLT_HFP 0x0C78 #define BLT_VFP 0x0C7C #define BLT_Y_RSF 0x0C80 #define BLT_Y_RZI 0x0C84 #define BLT_Y_HFP 0x0C88 #define BLT_Y_VFP 0x0C8C #define BLT_IVMX0 0x0CC0 #define BLT_IVMX1 0x0CC4 #define BLT_IVMX2 0x0CC8 #define BLT_IVMX3 0x0CCC #define BLT_OVMX0 0x0CD0 #define BLT_OVMX1 0x0CD4 #define BLT_OVMX2 0x0CD8 #define BLT_OVMX3 0x0CDC #define BLT_DEI 0x0CEC /* HW registers : filters */ #define BLT_HFC_N 0x0D00 #define BLT_VFC_N 0x0D90 #define BLT_Y_HFC_N 0x0E00 #define BLT_Y_VFC_N 0x0E90 #define BLT_NB_H_COEF 16 #define BLT_NB_V_COEF 10 /* Registers values */ #define BLT_CTL_RESET BIT(31) /* Global soft reset */ #define BLT_ITS_AQ1_LNA BIT(12) /* AQ1 LNA reached */ #define BLT_STA1_IDLE BIT(0) /* BDISP idle */ #define BLT_AQ1_CTL_CFG 0x80400003 /* Enable, P3, LNA reached */ #define BLT_INS_S1_MASK (BIT(0) | BIT(1) | BIT(2)) #define BLT_INS_S1_OFF 0x00000000 /* src1 disabled */ #define BLT_INS_S1_MEM 0x00000001 /* src1 fetched from memory */ #define BLT_INS_S1_CF 0x00000003 /* src1 color fill */ #define BLT_INS_S1_COPY 0x00000004 /* src1 direct copy */ #define BLT_INS_S1_FILL 0x00000007 /* src1 firect fill */ #define BLT_INS_S2_MASK (BIT(3) | BIT(4)) #define BLT_INS_S2_OFF 0x00000000 /* src2 disabled */ #define BLT_INS_S2_MEM 0x00000008 /* src2 fetched from memory */ #define BLT_INS_S2_CF 0x00000018 /* src2 color fill */ #define BLT_INS_S3_MASK BIT(5) #define BLT_INS_S3_OFF 0x00000000 /* src3 disabled */ #define BLT_INS_S3_MEM 0x00000020 /* src3 fetched from memory */ #define BLT_INS_IVMX BIT(6) /* Input versatile matrix */ #define BLT_INS_CLUT BIT(7) /* Color Look Up Table */ #define BLT_INS_SCALE BIT(8) /* Scaling */ #define BLT_INS_FLICK BIT(9) /* Flicker filter */ #define BLT_INS_CLIP BIT(10) /* Clipping */ #define BLT_INS_CKEY BIT(11) /* Color key */ #define BLT_INS_OVMX BIT(12) /* Output versatile matrix */ #define BLT_INS_DEI BIT(13) /* Deinterlace */ #define BLT_INS_PMASK BIT(14) /* Plane mask */ #define BLT_INS_VC1R BIT(17) /* VC1 Range mapping */ #define BLT_INS_ROTATE BIT(18) /* Rotation */ #define BLT_INS_GRAD BIT(19) /* Gradient fill */ #define BLT_INS_AQLOCK BIT(29) /* AQ lock */ #define BLT_INS_PACE BIT(30) /* Pace down */ #define BLT_INS_IRQ BIT(31) /* Raise IRQ when node done */ #define BLT_CIC_ALL_GRP 0x000FDFFC /* all valid groups present */ #define BLT_ACK_BYPASS_S2S3 0x00000007 /* Bypass src2 and src3 */ #define BLT_TTY_COL_SHIFT 16 /* Color format */ #define BLT_TTY_COL_MASK 0x001F0000 /* Color format mask */ #define BLT_TTY_ALPHA_R BIT(21) /* Alpha range */ #define BLT_TTY_CR_NOT_CB BIT(22) /* CR not Cb */ #define BLT_TTY_MB BIT(23) /* MB frame / field*/ #define BLT_TTY_HSO BIT(24) /* H scan order */ #define BLT_TTY_VSO BIT(25) /* V scan order */ #define BLT_TTY_DITHER BIT(26) /* Dithering */ #define BLT_TTY_CHROMA BIT(27) /* Write chroma / luma */ #define BLT_TTY_BIG_END BIT(30) /* Big endianness */ #define BLT_S1TY_A1_SUBSET BIT(22) /* A1 subset */ #define BLT_S1TY_CHROMA_EXT BIT(26) /* Chroma Extended */ #define BTL_S1TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */ #define BLT_S1TY_RGB_EXP BIT(29) /* RGB expansion mode */ #define BLT_S2TY_A1_SUBSET BIT(22) /* A1 subset */ #define BLT_S2TY_CHROMA_EXT BIT(26) /* Chroma Extended */ #define BTL_S2TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */ #define BLT_S2TY_RGB_EXP BIT(29) /* RGB expansion mode */ #define BLT_S3TY_BLANK_ACC BIT(26) /* Blank access */ #define BLT_FCTL_HV_SCALE 0x00000055 /* H/V resize + color filter */ #define BLT_FCTL_Y_HV_SCALE 0x33000000 /* Luma version */ #define BLT_FCTL_HV_SAMPLE 0x00000044 /* H/V resize */ #define BLT_FCTL_Y_HV_SAMPLE 0x22000000 /* Luma version */ #define BLT_RZI_DEFAULT 0x20003000 /* H/VNB_repeat = 3/2 */ /* Color format */ #define BDISP_RGB565 0x00 /* RGB565 */ #define BDISP_RGB888 0x01 /* RGB888 */ #define BDISP_XRGB8888 0x02 /* RGB888_32 */ #define BDISP_ARGB8888 0x05 /* ARGB888 */ #define BDISP_NV12 0x16 /* YCbCr42x R2B */ #define BDISP_YUV_3B 0x1E /* YUV (3 buffer) */
// SPDX-License-Identifier: GPL-2.0-only /* * An rtc driver for the Dallas DS1553 * * Copyright (C) 2006 Atsushi Nemoto <[email protected]> */ #include <linux/bcd.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #define RTC_REG_SIZE 0x2000 #define RTC_OFFSET 0x1ff0 #define RTC_FLAGS (RTC_OFFSET + 0) #define RTC_SECONDS_ALARM (RTC_OFFSET + 2) #define RTC_MINUTES_ALARM (RTC_OFFSET + 3) #define RTC_HOURS_ALARM (RTC_OFFSET + 4) #define RTC_DATE_ALARM (RTC_OFFSET + 5) #define RTC_INTERRUPTS (RTC_OFFSET + 6) #define RTC_WATCHDOG (RTC_OFFSET + 7) #define RTC_CONTROL (RTC_OFFSET + 8) #define RTC_CENTURY (RTC_OFFSET + 8) #define RTC_SECONDS (RTC_OFFSET + 9) #define RTC_MINUTES (RTC_OFFSET + 10) #define RTC_HOURS (RTC_OFFSET + 11) #define RTC_DAY (RTC_OFFSET + 12) #define RTC_DATE (RTC_OFFSET + 13) #define RTC_MONTH (RTC_OFFSET + 14) #define RTC_YEAR (RTC_OFFSET + 15) #define RTC_CENTURY_MASK 0x3f #define RTC_SECONDS_MASK 0x7f #define RTC_DAY_MASK 0x07 /* Bits in the Control/Century register */ #define RTC_WRITE 0x80 #define RTC_READ 0x40 /* Bits in the Seconds register */ #define RTC_STOP 0x80 /* Bits in the Flags register */ #define RTC_FLAGS_AF 0x40 #define RTC_FLAGS_BLF 0x10 /* Bits in the Interrupts register */ #define RTC_INTS_AE 0x80 struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; unsigned long last_jiffies; int irq; unsigned int irqen; int alrm_sec; int alrm_min; int alrm_hour; int alrm_mday; spinlock_t lock; }; static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); void __iomem *ioaddr = pdata->ioaddr; u8 century; century = bin2bcd((tm->tm_year + 1900) / 100); writeb(RTC_WRITE, pdata->ioaddr + RTC_CONTROL); writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR); writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH); writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY); writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE); writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS); writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES); writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS); /* RTC_CENTURY and RTC_CONTROL share same register */ writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY); writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); return 0; } static int ds1553_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); void __iomem *ioaddr = pdata->ioaddr; unsigned int year, month, day, hour, minute, second, week; unsigned int century; /* give enough time to update RTC in case of continuous read */ if (pdata->last_jiffies == jiffies) msleep(1); pdata->last_jiffies = jiffies; writeb(RTC_READ, ioaddr + RTC_CONTROL); second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK; minute = readb(ioaddr + RTC_MINUTES); hour = readb(ioaddr + RTC_HOURS); day = readb(ioaddr + RTC_DATE); week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK; month = readb(ioaddr + RTC_MONTH); year = readb(ioaddr + RTC_YEAR); century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(0, ioaddr + RTC_CONTROL); tm->tm_sec = bcd2bin(second); tm->tm_min = bcd2bin(minute); tm->tm_hour = bcd2bin(hour); tm->tm_mday = bcd2bin(day); tm->tm_wday = bcd2bin(week); tm->tm_mon = bcd2bin(month) - 1; /* year is 1900 + tm->tm_year */ tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900; return 0; } static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata) { void __iomem *ioaddr = pdata->ioaddr; unsigned long flags; spin_lock_irqsave(&pdata->lock, flags); writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_mday), ioaddr + RTC_DATE_ALARM); writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_hour), ioaddr + RTC_HOURS_ALARM); writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_min), ioaddr + RTC_MINUTES_ALARM); writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_sec), ioaddr + RTC_SECONDS_ALARM); writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS); readb(ioaddr + RTC_FLAGS); /* clear interrupts */ spin_unlock_irqrestore(&pdata->lock, flags); } static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); if (pdata->irq <= 0) return -EINVAL; pdata->alrm_mday = alrm->time.tm_mday; pdata->alrm_hour = alrm->time.tm_hour; pdata->alrm_min = alrm->time.tm_min; pdata->alrm_sec = alrm->time.tm_sec; if (alrm->enabled) pdata->irqen |= RTC_AF; ds1553_rtc_update_alarm(pdata); return 0; } static int ds1553_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); if (pdata->irq <= 0) return -EINVAL; alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday; alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour; alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min; alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec; alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0; return 0; } static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; unsigned long events = 0; spin_lock(&pdata->lock); /* read and clear interrupt */ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) { events = RTC_IRQF; if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80) events |= RTC_UF; else events |= RTC_AF; rtc_update_irq(pdata->rtc, 1, events); } spin_unlock(&pdata->lock); return events ? IRQ_HANDLED : IRQ_NONE; } static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rtc_plat_data *pdata = dev_get_drvdata(dev); if (pdata->irq <= 0) return -EINVAL; if (enabled) pdata->irqen |= RTC_AF; else pdata->irqen &= ~RTC_AF; ds1553_rtc_update_alarm(pdata); return 0; } static const struct rtc_class_ops ds1553_rtc_ops = { .read_time = ds1553_rtc_read_time, .set_time = ds1553_rtc_set_time, .read_alarm = ds1553_rtc_read_alarm, .set_alarm = ds1553_rtc_set_alarm, .alarm_irq_enable = ds1553_rtc_alarm_irq_enable, }; static int ds1553_nvram_read(void *priv, unsigned int pos, void *val, size_t bytes) { struct platform_device *pdev = priv; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; u8 *buf = val; for (; bytes; bytes--) *buf++ = readb(ioaddr + pos++); return 0; } static int ds1553_nvram_write(void *priv, unsigned int pos, void *val, size_t bytes) { struct platform_device *pdev = priv; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; u8 *buf = val; for (; bytes; bytes--) writeb(*buf++, ioaddr + pos++); return 0; } static int ds1553_rtc_probe(struct platform_device *pdev) { unsigned int cen, sec; struct rtc_plat_data *pdata; void __iomem *ioaddr; int ret = 0; struct nvmem_config nvmem_cfg = { .name = "ds1553_nvram", .word_size = 1, .stride = 1, .size = RTC_OFFSET, .reg_read = ds1553_nvram_read, .reg_write = ds1553_nvram_write, .priv = pdev, }; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ioaddr)) return PTR_ERR(ioaddr); pdata->ioaddr = ioaddr; pdata->irq = platform_get_irq(pdev, 0); /* turn RTC on if it was not on */ sec = readb(ioaddr + RTC_SECONDS); if (sec & RTC_STOP) { sec &= RTC_SECONDS_MASK; cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(RTC_WRITE, ioaddr + RTC_CONTROL); writeb(sec, ioaddr + RTC_SECONDS); writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); } if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF) dev_warn(&pdev->dev, "voltage-low detected.\n"); spin_lock_init(&pdata->lock); pdata->last_jiffies = jiffies; platform_set_drvdata(pdev, pdata); pdata->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(pdata->rtc)) return PTR_ERR(pdata->rtc); pdata->rtc->ops = &ds1553_rtc_ops; ret = devm_rtc_register_device(pdata->rtc); if (ret) return ret; if (pdata->irq > 0) { writeb(0, ioaddr + RTC_INTERRUPTS); if (devm_request_irq(&pdev->dev, pdata->irq, ds1553_rtc_interrupt, 0, pdev->name, pdev) < 0) { dev_warn(&pdev->dev, "interrupt not available.\n"); pdata->irq = 0; } } devm_rtc_nvmem_register(pdata->rtc, &nvmem_cfg); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:rtc-ds1553"); static struct platform_driver ds1553_rtc_driver = { .probe = ds1553_rtc_probe, .driver = { .name = "rtc-ds1553", }, }; module_platform_driver(ds1553_rtc_driver); MODULE_AUTHOR("Atsushi Nemoto <[email protected]>"); MODULE_DESCRIPTION("Dallas DS1553 RTC driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len); int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len);
// SPDX-License-Identifier: GPL-2.0-only /* * Linux network driver for QLogic BR-series Converged Network Adapter. */ /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved * www.qlogic.com */ #include <linux/firmware.h> #include "bnad.h" #include "bfi.h" #include "cna.h" const struct firmware *bfi_fw; static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna; static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size; static u32 * cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; u32 n; if (request_firmware(&fw, fw_name, &pdev->dev)) { dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name); goto error; } *bfi_image = (u32 *)fw->data; *bfi_image_size = fw->size/sizeof(u32); bfi_fw = fw; /* Convert loaded firmware to host order as it is stored in file * as sequence of LE32 integers. */ for (n = 0; n < *bfi_image_size; n++) le32_to_cpus(*bfi_image + n); return *bfi_image; error: return NULL; } u32 * cna_get_firmware_buf(struct pci_dev *pdev) { if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { if (bfi_image_ct2_cna_size == 0) cna_read_firmware(pdev, &bfi_image_ct2_cna, &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2); return bfi_image_ct2_cna; } else if (bfa_asic_id_ct(pdev->device)) { if (bfi_image_ct_cna_size == 0) cna_read_firmware(pdev, &bfi_image_ct_cna, &bfi_image_ct_cna_size, CNA_FW_FILE_CT); return bfi_image_ct_cna; } return NULL; } u32 * bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) { switch (asic_gen) { case BFI_ASIC_GEN_CT: return (bfi_image_ct_cna + off); case BFI_ASIC_GEN_CT2: return (bfi_image_ct2_cna + off); default: return NULL; } } u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) { switch (asic_gen) { case BFI_ASIC_GEN_CT: return bfi_image_ct_cna_size; case BFI_ASIC_GEN_CT2: return bfi_image_ct2_cna_size; default: return 0; } }
/* * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #ifndef _MEDIA_VIDEOBUF2_VMALLOC_H #define _MEDIA_VIDEOBUF2_VMALLOC_H #include <media/videobuf2-v4l2.h> extern const struct vb2_mem_ops vb2_vmalloc_memops; #endif
// SPDX-License-Identifier: GPL-2.0-only /******************************************************************************* This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. ST Ethernet IPs are built around a Synopsys IP Core. Copyright(C) 2007-2011 STMicroelectronics Ltd Author: Giuseppe Cavallaro <[email protected]> Documentation available at: http://www.stlinux.com Support available at: https://bugzilla.stlinux.com/ *******************************************************************************/ #include <linux/clk.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/prefetch.h> #include <linux/pinctrl/consumer.h> #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> #endif /* CONFIG_DEBUG_FS */ #include <linux/net_tstamp.h> #include <linux/phylink.h> #include <linux/udp.h> #include <linux/bpf_trace.h> #include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac_fpe.h" #include "stmmac.h" #include "stmmac_xdp.h" #include <linux/reset.h> #include <linux/of_mdio.h> #include "dwmac1000.h" #include "dwxgmac2.h" #include "hwif.h" /* As long as the interface is active, we keep the timestamping counter enabled * with fine resolution and binary rollover. This avoid non-monotonic behavior * (clock jumps) when changing timestamping settings at runtime. */ #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ PTP_TCR_TSCTRLSSR) #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ #define TX_TIMEO 5000 static int watchdog = TX_TIMEO; module_param(watchdog, int, 0644); MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); static int phyaddr = -1; module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) /* Limit to make sure XDP TX and slow path can coexist */ #define STMMAC_XSK_TX_BUDGET_MAX 256 #define STMMAC_TX_XSK_AVAIL 16 #define STMMAC_RX_FILL_BATCH 16 #define STMMAC_XDP_PASS 0 #define STMMAC_XDP_CONSUMED BIT(0) #define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_REDIRECT BIT(2) static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); static int pause = PAUSE_TIME; module_param(pause, int, 0644); MODULE_PARM_DESC(pause, "Flow Control Pause Time"); #define TC_DEFAULT 64 static int tc = TC_DEFAULT; module_param(tc, int, 0644); MODULE_PARM_DESC(tc, "DMA threshold control value"); #define DEFAULT_BUFSIZE 1536 static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, 0644); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); #define STMMAC_RX_COPYBREAK 256 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); #define STMMAC_DEFAULT_LPI_TIMER 1000 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; module_param(eee_timer, int, 0644); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) /* By default the driver will use the ring mode to manage tx and rx descriptors, * but allow user to force to use the chain instead of the ring */ static unsigned int chain_mode; module_param(chain_mode, int, 0444); MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); /* For MSI interrupts handling */ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); static void stmmac_reset_queues_param(struct stmmac_priv *priv); static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, u32 rxmode, u32 chan); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) { int ret = 0; if (enabled) { ret = clk_prepare_enable(priv->plat->stmmac_clk); if (ret) return ret; ret = clk_prepare_enable(priv->plat->pclk); if (ret) { clk_disable_unprepare(priv->plat->stmmac_clk); return ret; } if (priv->plat->clks_config) { ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); if (ret) { clk_disable_unprepare(priv->plat->stmmac_clk); clk_disable_unprepare(priv->plat->pclk); return ret; } } } else { clk_disable_unprepare(priv->plat->stmmac_clk); clk_disable_unprepare(priv->plat->pclk); if (priv->plat->clks_config) priv->plat->clks_config(priv->plat->bsp_priv, enabled); } return ret; } EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of * errors. */ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) flow_ctrl = FLOW_AUTO; else if (likely(flow_ctrl < 0)) flow_ctrl = FLOW_OFF; if (unlikely((pause < 0) || (pause > 0xffff))) pause = PAUSE_TIME; if (eee_timer < 0) eee_timer = STMMAC_DEFAULT_LPI_TIMER; } static void __stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use; u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue; for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; if (stmmac_xdp_is_enabled(priv) && test_bit(queue, priv->af_xdp_zc_qps)) { napi_disable(&ch->rxtx_napi); continue; } if (queue < rx_queues_cnt) napi_disable(&ch->rx_napi); if (queue < tx_queues_cnt) napi_disable(&ch->tx_napi); } } /** * stmmac_disable_all_queues - Disable all queues * @priv: driver private structure */ static void stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; struct stmmac_rx_queue *rx_q; u32 queue; /* synchronize_rcu() needed for pending XDP buffers to drain */ for (queue = 0; queue < rx_queues_cnt; queue++) { rx_q = &priv->dma_conf.rx_queue[queue]; if (rx_q->xsk_pool) { synchronize_rcu(); break; } } __stmmac_disable_all_queues(priv); } /** * stmmac_enable_all_queues - Enable all queues * @priv: driver private structure */ static void stmmac_enable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use; u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue; for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; if (stmmac_xdp_is_enabled(priv) && test_bit(queue, priv->af_xdp_zc_qps)) { napi_enable(&ch->rxtx_napi); continue; } if (queue < rx_queues_cnt) napi_enable(&ch->rx_napi); if (queue < tx_queues_cnt) napi_enable(&ch->tx_napi); } } static void stmmac_service_event_schedule(struct stmmac_priv *priv) { if (!test_bit(STMMAC_DOWN, &priv->state) && !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) queue_work(priv->wq, &priv->service_task); } static void stmmac_global_err(struct stmmac_priv *priv) { netif_carrier_off(priv->dev); set_bit(STMMAC_RESET_REQUESTED, &priv->state); stmmac_service_event_schedule(priv); } /** * stmmac_clk_csr_set - dynamically set the MDC clock * @priv: driver private structure * Description: this is to dynamically set the MDC clock according to the csr * clock input. * Note: * If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed (as reported in the driver * documentation). Viceversa the driver will try to set the MDC * clock dynamically according to the actual clock input. */ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { u32 clk_rate; clk_rate = clk_get_rate(priv->plat->stmmac_clk); /* Platform provided default clk_csr would be assumed valid * for all other cases except for the below mentioned ones. * For values higher than the IEEE 802.3 specified frequency * we can not estimate the proper divider as it is not known * the frequency of clk_csr_i. So we do not change the default * divider. */ if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { if (clk_rate < CSR_F_35M) priv->clk_csr = STMMAC_CSR_20_35M; else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) priv->clk_csr = STMMAC_CSR_35_60M; else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) priv->clk_csr = STMMAC_CSR_60_100M; else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { if (clk_rate > 160000000) priv->clk_csr = 0x03; else if (clk_rate > 80000000) priv->clk_csr = 0x02; else if (clk_rate > 40000000) priv->clk_csr = 0x01; else priv->clk_csr = 0; } if (priv->plat->has_xgmac) { if (clk_rate > 400000000) priv->clk_csr = 0x5; else if (clk_rate > 350000000) priv->clk_csr = 0x4; else if (clk_rate > 300000000) priv->clk_csr = 0x3; else if (clk_rate > 250000000) priv->clk_csr = 0x2; else if (clk_rate > 150000000) priv->clk_csr = 0x1; else priv->clk_csr = 0x0; } } static void print_pkt(unsigned char *buf, int len) { pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; u32 avail; if (tx_q->dirty_tx > tx_q->cur_tx) avail = tx_q->dirty_tx - tx_q->cur_tx - 1; else avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; return avail; } /** * stmmac_rx_dirty - Get RX queue dirty * @priv: driver private structure * @queue: RX queue index */ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; u32 dirty; if (rx_q->dirty_rx <= rx_q->cur_rx) dirty = rx_q->cur_rx - rx_q->dirty_rx; else dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; return dirty; } static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) { int tx_lpi_timer; /* Clear/set the SW EEE timer flag based on LPI ET enablement */ priv->eee_sw_timer_en = en ? 0 : 1; tx_lpi_timer = en ? priv->tx_lpi_timer : 0; stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); } /** * stmmac_enable_eee_mode - check and enter in LPI mode * @priv: driver private structure * Description: this function is to verify and enter in LPI mode in case of * EEE. */ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queue; /* check if all TX queues have the work finished */ for (queue = 0; queue < tx_cnt; queue++) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; if (tx_q->dirty_tx != tx_q->cur_tx) return -EBUSY; /* still unfinished work */ } /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) stmmac_set_eee_mode(priv, priv->hw, priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); return 0; } /** * stmmac_disable_eee_mode - disable and exit from LPI mode * @priv: driver private structure * Description: this function is to exit and disable EEE in case of * LPI state is true. This is called by the xmit. */ void stmmac_disable_eee_mode(struct stmmac_priv *priv) { if (!priv->eee_sw_timer_en) { stmmac_lpi_entry_timer_config(priv, 0); return; } stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; } /** * stmmac_eee_ctrl_timer - EEE TX SW timer. * @t: timer_list struct containing private info * Description: * if there is no data transfer and if we are not in LPI state, * then MAC Transmitter can be moved to LPI state. */ static void stmmac_eee_ctrl_timer(struct timer_list *t) { struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); if (stmmac_enable_eee_mode(priv)) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); } /** * stmmac_eee_init - init EEE * @priv: driver private structure * Description: * if the GMAC supports the EEE (from the HW cap reg) and the phy device * can also manage EEE, this function enable the LPI state and start related * timer. */ bool stmmac_eee_init(struct stmmac_priv *priv) { int eee_tw_timer = priv->eee_tw_timer; /* Check if MAC core supports the EEE feature. */ if (!priv->dma_cap.eee) return false; mutex_lock(&priv->lock); /* Check if it needs to be deactivated */ if (!priv->eee_active) { if (priv->eee_enabled) { netdev_dbg(priv->dev, "disable EEE\n"); stmmac_lpi_entry_timer_config(priv, 0); del_timer_sync(&priv->eee_ctrl_timer); stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); if (priv->hw->xpcs) xpcs_config_eee(priv->hw->xpcs, priv->plat->mult_fact_100ns, false); } mutex_unlock(&priv->lock); return false; } if (priv->eee_active && !priv->eee_enabled) { timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, eee_tw_timer); if (priv->hw->xpcs) xpcs_config_eee(priv->hw->xpcs, priv->plat->mult_fact_100ns, true); } if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; stmmac_lpi_entry_timer_config(priv, 1); } else { stmmac_lpi_entry_timer_config(priv, 0); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); } mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); return true; } /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @p : descriptor pointer * @skb : the socket buffer * Description : * This function will read timestamp from the descriptor & pass it to stack. * and also perform some sanity checks. */ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; bool found = false; u64 ns = 0; if (!priv->hwts_tx_en) return; /* exit if skb doesn't support hw tstamp */ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) return; /* check tx tstamp status */ if (stmmac_get_tx_timestamp_status(priv, p)) { stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); found = true; } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { found = true; } if (found) { ns -= priv->plat->cdc_error_adj; memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } } /* stmmac_get_rx_hwtstamp - get HW RX timestamps * @priv: driver private structure * @p : descriptor pointer * @np : next descriptor pointer * @skb : the socket buffer * Description : * This function will read received packet's timestamp from the descriptor * and pass it to stack. It also perform some sanity checks. */ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct dma_desc *np, struct sk_buff *skb) { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; u64 ns = 0; if (!priv->hwts_rx_en) return; /* For GMAC4, the valid timestamp is from CTX next desc. */ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) desc = np; /* Check if timestamp is available */ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); ns -= priv->plat->cdc_error_adj; netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp->hwtstamp = ns_to_ktime(ns); } else { netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); } } /** * stmmac_hwtstamp_set - control hardware timestamping. * @dev: device pointer. * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * Description: * This function configures the MAC to enable/disable both outgoing(TX) * and incoming(RX) packets time stamping based on user input. * Return Value: * 0 on success and an appropriate -ve integer on failure. */ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; u32 ptp_over_ipv6_udp = 0; u32 ptp_over_ethernet = 0; u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; return -EOPNOTSUPP; } if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", __func__, config.flags, config.tx_type, config.rx_filter); if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; if (priv->adv_ts) { switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* time stamp no incoming packet at all */ config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* 'xmac' hardware can support Sync, Pdelay_Req and * Pdelay_resp by setting bit14 and bits17/16 to 01 * This leaves Delay_Req timestamps out. * Enable all events *and* general purpose message * timestamping */ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: /* PTP v1, UDP, Sync packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: /* PTP v1, UDP, Delay_req packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: /* PTP v2, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: /* PTP v2, UDP, Sync packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: /* PTP v2, UDP, Delay_req packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: /* PTP v2/802.AS1 any layer, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; if (priv->synopsys_id < DWMAC_CORE_4_10) ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; break; case HWTSTAMP_FILTER_PTP_V2_SYNC: /* PTP v2/802.AS1, any layer, Sync packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; break; case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* PTP v2/802.AS1, any layer, Delay_req packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; ts_event_en = PTP_TCR_TSEVNTENA; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; break; case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: /* time stamp any incoming packet */ config.rx_filter = HWTSTAMP_FILTER_ALL; tstamp_all = PTP_TCR_TSENALL; break; default: return -ERANGE; } } else { switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: config.rx_filter = HWTSTAMP_FILTER_NONE; break; default: /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; } } priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; priv->systime_flags = STMMAC_HWTS_ACTIVE; if (priv->hwts_tx_en || priv->hwts_rx_en) { priv->systime_flags |= tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel; } stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); memcpy(&priv->tstamp_config, &config, sizeof(config)); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } /** * stmmac_hwtstamp_get - read hardware timestamping. * @dev: device pointer. * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * Description: * This function obtain the current hardware timestamping settings * as requested. */ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config *config = &priv->tstamp_config; if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } /** * stmmac_init_tstamp_counter - init hardware timestamping counter * @priv: driver private structure * @systime_flags: timestamping flags * Description: * Initialize hardware counter for packet timestamping. * This is valid as long as the interface is open and not suspended. * Will be rerun after resuming from suspend, case in which the timestamping * flags updated by stmmac_hwtstamp_set() also need to be restored. */ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; struct timespec64 now; u32 sec_inc = 0; u64 temp = 0; if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; /* program Sub Second Increment reg */ stmmac_config_sub_second_increment(priv, priv->ptpaddr, priv->plat->clk_ptp_rate, xmac, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); /* Store sub second increment for later use */ priv->sub_second_inc = sec_inc; /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; * where, freq_div_ratio = 1e9ns/sec_inc */ temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); /* initialize system time */ ktime_get_real_ts64(&now); /* lower 32 bits of tv_sec are safe until y2106 */ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); return 0; } EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); /** * stmmac_init_ptp - init PTP * @priv: driver private structure * Description: this is to verify if the HW supports the PTPv1 or PTPv2. * This is done by looking at the HW cap. register. * This function also registers the ptp driver. */ static int stmmac_init_ptp(struct stmmac_priv *priv) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; int ret; if (priv->plat->ptp_clk_freq_config) priv->plat->ptp_clk_freq_config(priv); ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); if (ret) return ret; priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ if (xmac && priv->dma_cap.atime_stamp) priv->adv_ts = 1; /* Dwmac 3.x core with extend_desc can support adv_ts */ else if (priv->extend_desc && priv->dma_cap.atime_stamp) priv->adv_ts = 1; if (priv->dma_cap.time_stamp) netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); if (priv->adv_ts) netdev_info(priv->dev, "IEEE 1588-2008 Advanced Timestamp supported\n"); priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) stmmac_hwtstamp_correct_latency(priv, priv); return 0; } static void stmmac_release_ptp(struct stmmac_priv *priv) { clk_disable_unprepare(priv->plat->clk_ptp_ref); stmmac_ptp_unregister(priv); } /** * stmmac_mac_flow_ctrl - Configure flow control in all queues * @priv: driver private structure * @duplex: duplex passed to the next function * Description: It is used for configuring the flow control in all queues */ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) { u32 tx_cnt = priv->plat->tx_queues_to_use; stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, priv->pause, tx_cnt); } static unsigned long stmmac_mac_get_caps(struct phylink_config *config, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); /* Refresh the MAC-specific capabilities */ stmmac_mac_update_caps(priv); config->mac_capabilities = priv->hw->link.caps; if (priv->plat->max_speed) phylink_limit_mac_speed(config, priv->plat->max_speed); return config->mac_capabilities; } static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); struct phylink_pcs *pcs; if (priv->plat->select_pcs) { pcs = priv->plat->select_pcs(priv, interface); if (!IS_ERR(pcs)) return pcs; } return NULL; } static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { /* Nothing to do, xpcs_config() handles everything */ } static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); stmmac_mac_set(priv, priv->ioaddr, false); priv->eee_active = false; priv->tx_lpi_enabled = false; priv->eee_enabled = stmmac_eee_init(priv); stmmac_set_eee_pls(priv, priv->hw, false); if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, false); } static void stmmac_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); u32 old_ctrl, ctrl; if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && priv->plat->serdes_powerup) priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); ctrl = old_ctrl & ~priv->hw->link.speed_mask; if (interface == PHY_INTERFACE_MODE_USXGMII) { switch (speed) { case SPEED_10000: ctrl |= priv->hw->link.xgmii.speed10000; break; case SPEED_5000: ctrl |= priv->hw->link.xgmii.speed5000; break; case SPEED_2500: ctrl |= priv->hw->link.xgmii.speed2500; break; default: return; } } else if (interface == PHY_INTERFACE_MODE_XLGMII) { switch (speed) { case SPEED_100000: ctrl |= priv->hw->link.xlgmii.speed100000; break; case SPEED_50000: ctrl |= priv->hw->link.xlgmii.speed50000; break; case SPEED_40000: ctrl |= priv->hw->link.xlgmii.speed40000; break; case SPEED_25000: ctrl |= priv->hw->link.xlgmii.speed25000; break; case SPEED_10000: ctrl |= priv->hw->link.xgmii.speed10000; break; case SPEED_2500: ctrl |= priv->hw->link.speed2500; break; case SPEED_1000: ctrl |= priv->hw->link.speed1000; break; default: return; } } else { switch (speed) { case SPEED_2500: ctrl |= priv->hw->link.speed2500; break; case SPEED_1000: ctrl |= priv->hw->link.speed1000; break; case SPEED_100: ctrl |= priv->hw->link.speed100; break; case SPEED_10: ctrl |= priv->hw->link.speed10; break; default: return; } } priv->speed = speed; if (priv->plat->fix_mac_speed) priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); if (!duplex) ctrl &= ~priv->hw->link.duplex; else ctrl |= priv->hw->link.duplex; /* Flow Control operation */ if (rx_pause && tx_pause) priv->flow_ctrl = FLOW_AUTO; else if (rx_pause && !tx_pause) priv->flow_ctrl = FLOW_RX; else if (!rx_pause && tx_pause) priv->flow_ctrl = FLOW_TX; else priv->flow_ctrl = FLOW_OFF; stmmac_mac_flow_ctrl(priv, duplex); if (ctrl != old_ctrl) writel(ctrl, priv->ioaddr + MAC_CTRL_REG); stmmac_mac_set(priv, priv->ioaddr, true); if (phy && priv->dma_cap.eee) { priv->eee_active = phy_init_eee(phy, !(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; priv->eee_enabled = stmmac_eee_init(priv); priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); } if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, true); if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) stmmac_hwtstamp_correct_latency(priv, priv); } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, .mac_link_up = stmmac_mac_link_up, }; /** * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported * @priv: driver private structure * Description: this is to verify if the HW supports the PCS. * Physical Coding Sublayer (PCS) interface that can be used when the MAC is * configured for the TBI, RTBI, or SGMII PHY interface. */ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) { int interface = priv->plat->mac_interface; if (priv->dma_cap.pcs) { if ((interface == PHY_INTERFACE_MODE_RGMII) || (interface == PHY_INTERFACE_MODE_RGMII_ID) || (interface == PHY_INTERFACE_MODE_RGMII_RXID) || (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); priv->hw->pcs = STMMAC_PCS_RGMII; } else if (interface == PHY_INTERFACE_MODE_SGMII) { netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); priv->hw->pcs = STMMAC_PCS_SGMII; } } } /** * stmmac_init_phy - PHY initialization * @dev: net device structure * Description: it initializes the driver's PHY state, and attaches the PHY * to the mac driver. * Return value: * 0 on success */ static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct fwnode_handle *phy_fwnode; struct fwnode_handle *fwnode; int ret; if (!phylink_expects_phy(priv->phylink)) return 0; fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); if (fwnode) phy_fwnode = fwnode_get_phy_node(fwnode); else phy_fwnode = NULL; /* Some DT bindings do not set-up the PHY handle. Let's try to * manually parse it */ if (!phy_fwnode || IS_ERR(phy_fwnode)) { int addr = priv->plat->phy_addr; struct phy_device *phydev; if (addr < 0) { netdev_err(priv->dev, "no phy found\n"); return -ENODEV; } phydev = mdiobus_get_phy(priv->mii, addr); if (!phydev) { netdev_err(priv->dev, "no phy at addr %d\n", addr); return -ENODEV; } if (priv->dma_cap.eee) phy_support_eee(phydev); ret = phylink_connect_phy(priv->phylink, phydev); } else { fwnode_handle_put(phy_fwnode); ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); } if (!priv->plat->pmt) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; phylink_ethtool_get_wol(priv->phylink, &wol); device_set_wakeup_capable(priv->device, !!wol.supported); device_set_wakeup_enable(priv->device, !!wol.wolopts); } return ret; } static int stmmac_phy_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; struct phylink *phylink; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.mac_managed_pm = true; /* Stmmac always requires an RX clock for hardware initialization */ priv->phylink_config.mac_requires_rxc = true; mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) priv->phylink_config.default_an_inband = mdio_bus_data->default_an_inband; /* Set the platform/firmware specified interface mode. Note, phylink * deals with the PHY interface mode, not the MAC interface mode. */ __set_bit(mode, priv->phylink_config.supported_interfaces); /* If we have an xpcs, it defines which PHY interfaces are supported. */ if (priv->hw->xpcs) xpcs_get_interfaces(priv->hw->xpcs, priv->phylink_config.supported_interfaces); fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); priv->phylink = phylink; return 0; } static void stmmac_display_rx_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int desc_size; void *head_rx; u32 queue; /* Display RX rings */ for (queue = 0; queue < rx_cnt; queue++) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; pr_info("\tRX Queue %u rings\n", queue); if (priv->extend_desc) { head_rx = (void *)rx_q->dma_erx; desc_size = sizeof(struct dma_extended_desc); } else { head_rx = (void *)rx_q->dma_rx; desc_size = sizeof(struct dma_desc); } /* Display RX ring */ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } } static void stmmac_display_tx_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 tx_cnt = priv->plat->tx_queues_to_use; unsigned int desc_size; void *head_tx; u32 queue; /* Display TX rings */ for (queue = 0; queue < tx_cnt; queue++) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; pr_info("\tTX Queue %d rings\n", queue); if (priv->extend_desc) { head_tx = (void *)tx_q->dma_etx; desc_size = sizeof(struct dma_extended_desc); } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { head_tx = (void *)tx_q->dma_entx; desc_size = sizeof(struct dma_edesc); } else { head_tx = (void *)tx_q->dma_tx; desc_size = sizeof(struct dma_desc); } stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, tx_q->dma_tx_phy, desc_size); } } static void stmmac_display_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { /* Display RX ring */ stmmac_display_rx_rings(priv, dma_conf); /* Display TX ring */ stmmac_display_tx_rings(priv, dma_conf); } static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; if (mtu >= BUF_SIZE_8KiB) ret = BUF_SIZE_16KiB; else if (mtu >= BUF_SIZE_4KiB) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; else if (mtu > DEFAULT_BUFSIZE) ret = BUF_SIZE_2KiB; else ret = DEFAULT_BUFSIZE; return ret; } /** * stmmac_clear_rx_descriptors - clear RX descriptors * @priv: driver private structure * @dma_conf: structure to take the dma data * @queue: RX queue index * Description: this function is called to clear the RX descriptors * in case of both basic and extended descriptors are used. */ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; /* Clear the RX descriptors */ for (i = 0; i < dma_conf->dma_rx_size; i++) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, (i == dma_conf->dma_rx_size - 1), dma_conf->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, (i == dma_conf->dma_rx_size - 1), dma_conf->dma_buf_sz); } /** * stmmac_clear_tx_descriptors - clear tx descriptors * @priv: driver private structure * @dma_conf: structure to take the dma data * @queue: TX queue index. * Description: this function is called to clear the TX descriptors * in case of both basic and extended descriptors are used. */ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; /* Clear the TX descriptors */ for (i = 0; i < dma_conf->dma_tx_size; i++) { int last = (i == (dma_conf->dma_tx_size - 1)); struct dma_desc *p; if (priv->extend_desc) p = &tx_q->dma_etx[i].basic; else if (tx_q->tbs & STMMAC_TBS_AVAIL) p = &tx_q->dma_entx[i].basic; else p = &tx_q->dma_tx[i]; stmmac_init_tx_desc(priv, p, priv->mode, last); } } /** * stmmac_clear_descriptors - clear descriptors * @priv: driver private structure * @dma_conf: structure to take the dma data * Description: this function is called to clear the TX and RX descriptors * in case of both basic and extended descriptors are used. */ static void stmmac_clear_descriptors(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 rx_queue_cnt = priv->plat->rx_queues_to_use; u32 tx_queue_cnt = priv->plat->tx_queues_to_use; u32 queue; /* Clear the RX descriptors */ for (queue = 0; queue < rx_queue_cnt; queue++) stmmac_clear_rx_descriptors(priv, dma_conf, queue); /* Clear the TX descriptors */ for (queue = 0; queue < tx_queue_cnt; queue++) stmmac_clear_tx_descriptors(priv, dma_conf, queue); } /** * stmmac_init_rx_buffers - init the RX descriptor buffer. * @priv: driver private structure * @dma_conf: structure to take the dma data * @p: descriptor pointer * @i: descriptor index * @flags: gfp flag * @queue: RX queue index * Description: this function is called to allocate a receive buffer, perform * the DMA mapping and init the descriptor. */ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, struct dma_desc *p, int i, gfp_t flags, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); if (priv->dma_cap.host_dma_width <= 32) gfp |= GFP_DMA32; if (!buf->page) { buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->page) return -ENOMEM; buf->page_offset = stmmac_rx_offset(priv); } if (priv->sph && !buf->sec_page) { buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) return -ENOMEM; buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); } else { buf->sec_page = NULL; stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); } buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); return 0; } /** * stmmac_free_rx_buffer - free RX dma buffers * @priv: private structure * @rx_q: RX queue * @i: buffer index. */ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, struct stmmac_rx_queue *rx_q, int i) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (buf->page) page_pool_put_full_page(rx_q->page_pool, buf->page, false); buf->page = NULL; if (buf->sec_page) page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); buf->sec_page = NULL; } /** * stmmac_free_tx_buffer - free RX dma buffers * @priv: private structure * @dma_conf: structure to take the dma data * @queue: RX queue index * @i: buffer index. */ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, int i) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; if (tx_q->tx_skbuff_dma[i].buf && tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { if (tx_q->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[i].buf, tx_q->tx_skbuff_dma[i].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, tx_q->tx_skbuff_dma[i].buf, tx_q->tx_skbuff_dma[i].len, DMA_TO_DEVICE); } if (tx_q->xdpf[i] && (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { xdp_return_frame(tx_q->xdpf[i]); tx_q->xdpf[i] = NULL; } if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) tx_q->xsk_frames_done++; if (tx_q->tx_skbuff[i] && tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { dev_kfree_skb_any(tx_q->tx_skbuff[i]); tx_q->tx_skbuff[i] = NULL; } tx_q->tx_skbuff_dma[i].buf = 0; tx_q->tx_skbuff_dma[i].map_as_page = false; } /** * dma_free_rx_skbufs - free RX dma buffers * @priv: private structure * @dma_conf: structure to take the dma data * @queue: RX queue index */ static void dma_free_rx_skbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; for (i = 0; i < dma_conf->dma_rx_size; i++) stmmac_free_rx_buffer(priv, rx_q, i); } static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, gfp_t flags) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; for (i = 0; i < dma_conf->dma_rx_size; i++) { struct dma_desc *p; int ret; if (priv->extend_desc) p = &((rx_q->dma_erx + i)->basic); else p = rx_q->dma_rx + i; ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, queue); if (ret) return ret; rx_q->buf_alloc_num++; } return 0; } /** * dma_free_rx_xskbufs - free RX dma buffers from XSK pool * @priv: private structure * @dma_conf: structure to take the dma data * @queue: RX queue index */ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; for (i = 0; i < dma_conf->dma_rx_size; i++) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (!buf->xdp) continue; xsk_buff_free(buf->xdp); buf->xdp = NULL; } } static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) * in struct xdp_buff_xsk to stash driver specific information. Thus, * use this macro to make sure no size violations. */ XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); for (i = 0; i < dma_conf->dma_rx_size; i++) { struct stmmac_rx_buffer *buf; dma_addr_t dma_addr; struct dma_desc *p; if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + i); else p = rx_q->dma_rx + i; buf = &rx_q->buf_pool[i]; buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); if (!buf->xdp) return -ENOMEM; dma_addr = xsk_buff_xdp_get_dma(buf->xdp); stmmac_set_desc_addr(priv, p, dma_addr); rx_q->buf_alloc_num++; } return 0; } static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) { if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) return NULL; return xsk_get_pool_from_qid(priv->dev, queue); } /** * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) * @priv: driver private structure * @dma_conf: structure to take the dma data * @queue: RX queue index * @flags: gfp flag. * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, gfp_t flags) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int ret; netif_dbg(priv, probe, priv->dev, "(%s) dma_rx_phy=0x%08x\n", __func__, (u32)rx_q->dma_rx_phy); stmmac_clear_rx_descriptors(priv, dma_conf, queue); xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); if (rx_q->xsk_pool) { WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL)); netdev_info(priv->dev, "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", rx_q->queue_index); xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); } else { WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, MEM_TYPE_PAGE_POOL, rx_q->page_pool)); netdev_info(priv->dev, "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", rx_q->queue_index); } if (rx_q->xsk_pool) { /* RX XDP ZC buffer pool may not be populated, e.g. * xdpsock TX-only. */ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); } else { ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); if (ret < 0) return -ENOMEM; } /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) stmmac_mode_init(priv, rx_q->dma_erx, rx_q->dma_rx_phy, dma_conf->dma_rx_size, 1); else stmmac_mode_init(priv, rx_q->dma_rx, rx_q->dma_rx_phy, dma_conf->dma_rx_size, 0); } return 0; } static int init_dma_rx_desc_rings(struct net_device *dev, struct stmmac_dma_conf *dma_conf, gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; int queue; int ret; /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n"); for (queue = 0; queue < rx_count; queue++) { ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); if (ret) goto err_init_rx_buffers; } return 0; err_init_rx_buffers: while (queue >= 0) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; if (rx_q->xsk_pool) dma_free_rx_xskbufs(priv, dma_conf, queue); else dma_free_rx_skbufs(priv, dma_conf, queue); rx_q->buf_alloc_num = 0; rx_q->xsk_pool = NULL; queue--; } return ret; } /** * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) * @priv: driver private structure * @dma_conf: structure to take the dma data * @queue: TX queue index * Description: this function initializes the DMA TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; netif_dbg(priv, probe, priv->dev, "(%s) dma_tx_phy=0x%08x\n", __func__, (u32)tx_q->dma_tx_phy); /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) stmmac_mode_init(priv, tx_q->dma_etx, tx_q->dma_tx_phy, dma_conf->dma_tx_size, 1); else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) stmmac_mode_init(priv, tx_q->dma_tx, tx_q->dma_tx_phy, dma_conf->dma_tx_size, 0); } tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); for (i = 0; i < dma_conf->dma_tx_size; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((tx_q->dma_etx + i)->basic); else if (tx_q->tbs & STMMAC_TBS_AVAIL) p = &((tx_q->dma_entx + i)->basic); else p = tx_q->dma_tx + i; stmmac_clear_desc(priv, p); tx_q->tx_skbuff_dma[i].buf = 0; tx_q->tx_skbuff_dma[i].map_as_page = false; tx_q->tx_skbuff_dma[i].len = 0; tx_q->tx_skbuff_dma[i].last_segment = false; tx_q->tx_skbuff[i] = NULL; } return 0; } static int init_dma_tx_desc_rings(struct net_device *dev, struct stmmac_dma_conf *dma_conf) { struct stmmac_priv *priv = netdev_priv(dev); u32 tx_queue_cnt; u32 queue; tx_queue_cnt = priv->plat->tx_queues_to_use; for (queue = 0; queue < tx_queue_cnt; queue++) __init_dma_tx_desc_rings(priv, dma_conf, queue); return 0; } /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure * @dma_conf: structure to take the dma data * @flags: gfp flag. * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ static int init_dma_desc_rings(struct net_device *dev, struct stmmac_dma_conf *dma_conf, gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); int ret; ret = init_dma_rx_desc_rings(dev, dma_conf, flags); if (ret) return ret; ret = init_dma_tx_desc_rings(dev, dma_conf); stmmac_clear_descriptors(priv, dma_conf); if (netif_msg_hw(priv)) stmmac_display_rings(priv, dma_conf); return ret; } /** * dma_free_tx_skbufs - free TX dma buffers * @priv: private structure * @dma_conf: structure to take the dma data * @queue: TX queue index */ static void dma_free_tx_skbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; tx_q->xsk_frames_done = 0; for (i = 0; i < dma_conf->dma_tx_size; i++) stmmac_free_tx_buffer(priv, dma_conf, queue, i); if (tx_q->xsk_pool && tx_q->xsk_frames_done) { xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); tx_q->xsk_frames_done = 0; tx_q->xsk_pool = NULL; } } /** * stmmac_free_tx_skbufs - free TX skb buffers * @priv: private structure */ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) { u32 tx_queue_cnt = priv->plat->tx_queues_to_use; u32 queue; for (queue = 0; queue < tx_queue_cnt; queue++) dma_free_tx_skbufs(priv, &priv->dma_conf, queue); } /** * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * @priv: private structure * @dma_conf: structure to take the dma data * @queue: RX queue index */ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; /* Release the DMA RX socket buffers */ if (rx_q->xsk_pool) dma_free_rx_xskbufs(priv, dma_conf, queue); else dma_free_rx_skbufs(priv, dma_conf, queue); rx_q->buf_alloc_num = 0; rx_q->xsk_pool = NULL; /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_desc), rx_q->dma_rx, rx_q->dma_rx_phy); else dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; /* Free RX queue resources */ for (queue = 0; queue < rx_count; queue++) __free_dma_rx_desc_resources(priv, dma_conf, queue); } /** * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * @priv: private structure * @dma_conf: structure to take the dma data * @queue: TX queue index */ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; size_t size; void *addr; /* Release the DMA TX socket buffers */ dma_free_tx_skbufs(priv, dma_conf, queue); if (priv->extend_desc) { size = sizeof(struct dma_extended_desc); addr = tx_q->dma_etx; } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { size = sizeof(struct dma_edesc); addr = tx_q->dma_entx; } else { size = sizeof(struct dma_desc); addr = tx_q->dma_tx; } size *= dma_conf->dma_tx_size; dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); kfree(tx_q->tx_skbuff_dma); kfree(tx_q->tx_skbuff); } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; /* Free TX queue resources */ for (queue = 0; queue < tx_count; queue++) __free_dma_tx_desc_resources(priv, dma_conf, queue); } /** * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * @priv: private structure * @dma_conf: structure to take the dma data * @queue: RX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); struct page_pool_params pp_params = { 0 }; unsigned int num_pages; unsigned int napi_id; int ret; rx_q->queue_index = queue; rx_q->priv_data = priv; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = dma_conf->dma_rx_size; num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; pp_params.offset = stmmac_rx_offset(priv); pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); rx_q->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx_q->page_pool)) { ret = PTR_ERR(rx_q->page_pool); rx_q->page_pool = NULL; return ret; } rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, sizeof(*rx_q->buf_pool), GFP_KERNEL); if (!rx_q->buf_pool) return -ENOMEM; if (priv->extend_desc) { rx_q->dma_erx = dma_alloc_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), &rx_q->dma_rx_phy, GFP_KERNEL); if (!rx_q->dma_erx) return -ENOMEM; } else { rx_q->dma_rx = dma_alloc_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_desc), &rx_q->dma_rx_phy, GFP_KERNEL); if (!rx_q->dma_rx) return -ENOMEM; } if (stmmac_xdp_is_enabled(priv) && test_bit(queue, priv->af_xdp_zc_qps)) napi_id = ch->rxtx_napi.napi_id; else napi_id = ch->rx_napi.napi_id; ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, rx_q->queue_index, napi_id); if (ret) { netdev_err(priv->dev, "Failed to register xdp rxq info\n"); return -EINVAL; } return 0; } static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; int ret; /* RX queues buffers and DMA */ for (queue = 0; queue < rx_count; queue++) { ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); if (ret) goto err_dma; } return 0; err_dma: free_dma_rx_desc_resources(priv, dma_conf); return ret; } /** * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * @priv: private structure * @dma_conf: structure to take the dma data * @queue: TX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; size_t size; void *addr; tx_q->queue_index = queue; tx_q->priv_data = priv; tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) return -ENOMEM; tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!tx_q->tx_skbuff) return -ENOMEM; if (priv->extend_desc) size = sizeof(struct dma_extended_desc); else if (tx_q->tbs & STMMAC_TBS_AVAIL) size = sizeof(struct dma_edesc); else size = sizeof(struct dma_desc); size *= dma_conf->dma_tx_size; addr = dma_alloc_coherent(priv->device, size, &tx_q->dma_tx_phy, GFP_KERNEL); if (!addr) return -ENOMEM; if (priv->extend_desc) tx_q->dma_etx = addr; else if (tx_q->tbs & STMMAC_TBS_AVAIL) tx_q->dma_entx = addr; else tx_q->dma_tx = addr; return 0; } static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; int ret; /* TX queues buffers and DMA */ for (queue = 0; queue < tx_count; queue++) { ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); if (ret) goto err_dma; } return 0; err_dma: free_dma_tx_desc_resources(priv, dma_conf); return ret; } /** * alloc_dma_desc_resources - alloc TX/RX resources. * @priv: private structure * @dma_conf: structure to take the dma data * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ static int alloc_dma_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { /* RX Allocation */ int ret = alloc_dma_rx_desc_resources(priv, dma_conf); if (ret) return ret; ret = alloc_dma_tx_desc_resources(priv, dma_conf); return ret; } /** * free_dma_desc_resources - free dma desc resources * @priv: private structure * @dma_conf: structure to take the dma data */ static void free_dma_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf) { /* Release the DMA TX socket buffers */ free_dma_tx_desc_resources(priv, dma_conf); /* Release the DMA RX socket buffers later * to ensure all pending XDP_TX buffers are returned. */ free_dma_rx_desc_resources(priv, dma_conf); } /** * stmmac_mac_enable_rx_queues - Enable MAC rx queues * @priv: driver private structure * Description: It is used for enabling the rx queues in the MAC */ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) { u32 rx_queues_count = priv->plat->rx_queues_to_use; int queue; u8 mode; for (queue = 0; queue < rx_queues_count; queue++) { mode = priv->plat->rx_queues_cfg[queue].mode_to_use; stmmac_rx_queue_enable(priv, priv->hw, mode, queue); } } /** * stmmac_start_rx_dma - start RX DMA channel * @priv: driver private structure * @chan: RX channel index * Description: * This starts a RX DMA channel */ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); stmmac_start_rx(priv, priv->ioaddr, chan); } /** * stmmac_start_tx_dma - start TX DMA channel * @priv: driver private structure * @chan: TX channel index * Description: * This starts a TX DMA channel */ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); stmmac_start_tx(priv, priv->ioaddr, chan); } /** * stmmac_stop_rx_dma - stop RX DMA channel * @priv: driver private structure * @chan: RX channel index * Description: * This stops a RX DMA channel */ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); stmmac_stop_rx(priv, priv->ioaddr, chan); } /** * stmmac_stop_tx_dma - stop TX DMA channel * @priv: driver private structure * @chan: TX channel index * Description: * This stops a TX DMA channel */ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); stmmac_stop_tx(priv, priv->ioaddr, chan); } static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); u32 chan; for (chan = 0; chan < dma_csr_ch; chan++) { struct stmmac_channel *ch = &priv->channel[chan]; unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); spin_unlock_irqrestore(&ch->lock, flags); } } /** * stmmac_start_all_dma - start all RX and TX DMA channels * @priv: driver private structure * Description: * This starts all the RX and TX DMA channels */ static void stmmac_start_all_dma(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 chan = 0; for (chan = 0; chan < rx_channels_count; chan++) stmmac_start_rx_dma(priv, chan); for (chan = 0; chan < tx_channels_count; chan++) stmmac_start_tx_dma(priv, chan); } /** * stmmac_stop_all_dma - stop all RX and TX DMA channels * @priv: driver private structure * Description: * This stops the RX and TX DMA channels */ static void stmmac_stop_all_dma(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 chan = 0; for (chan = 0; chan < rx_channels_count; chan++) stmmac_stop_rx_dma(priv, chan); for (chan = 0; chan < tx_channels_count; chan++) stmmac_stop_tx_dma(priv, chan); } /** * stmmac_dma_operation_mode - HW DMA operation mode * @priv: driver private structure * Description: it is used for configuring the DMA operation mode register in * order to program the tx/rx DMA thresholds or Store-And-Forward mode. */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; int txfifosz = priv->plat->tx_fifo_size; u32 txmode = 0; u32 rxmode = 0; u32 chan = 0; u8 qmode = 0; if (rxfifosz == 0) rxfifosz = priv->dma_cap.rx_fifo_size; if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; } if (priv->plat->force_thresh_dma_mode) { txmode = tc; rxmode = tc; } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: * 1) TX COE if actually supported * 2) There is no bugged Jumbo frame support * that needs to not insert csum in the TDES. */ txmode = SF_DMA_MODE; rxmode = SF_DMA_MODE; priv->xstats.threshold = SF_DMA_MODE; } else { txmode = tc; rxmode = SF_DMA_MODE; } /* configure all channels */ for (chan = 0; chan < rx_channels_count; chan++) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; u32 buf_size; qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); if (rx_q->xsk_pool) { buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); stmmac_set_dma_bfsize(priv, priv->ioaddr, buf_size, chan); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_conf.dma_buf_sz, chan); } } for (chan = 0; chan < tx_channels_count; chan++) { qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, qmode); } } static void stmmac_xsk_request_timestamp(void *_priv) { struct stmmac_metadata_request *meta_req = _priv; stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); *meta_req->set_ic = true; } static u64 stmmac_xsk_fill_timestamp(void *_priv) { struct stmmac_xsk_tx_complete *tx_compl = _priv; struct stmmac_priv *priv = tx_compl->priv; struct dma_desc *desc = tx_compl->desc; bool found = false; u64 ns = 0; if (!priv->hwts_tx_en) return 0; /* check tx tstamp status */ if (stmmac_get_tx_timestamp_status(priv, desc)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); found = true; } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { found = true; } if (found) { ns -= priv->plat->cdc_error_adj; return ns_to_ktime(ns); } return 0; } static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = { .tmo_request_timestamp = stmmac_xsk_request_timestamp, .tmo_fill_timestamp = stmmac_xsk_fill_timestamp, }; static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); budget = min(budget, stmmac_tx_avail(priv, queue)); while (budget-- > 0) { struct stmmac_metadata_request meta_req; struct xsk_tx_metadata *meta = NULL; dma_addr_t dma_addr; bool set_ic; /* We are sharing with slow path and stop XSK TX desc submission when * available TX ring is less than threshold. */ if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || !netif_carrier_ok(priv->dev)) { work_done = false; break; } if (!xsk_tx_peek_desc(pool, &xdp_desc)) break; if (priv->est && priv->est->enable && priv->est->max_sdu[queue] && xdp_desc.len > priv->est->max_sdu[queue]) { priv->xstats.max_sdu_txq_drop[queue]++; continue; } if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) tx_desc = &tx_q->dma_entx[entry].basic; else tx_desc = tx_q->dma_tx + entry; dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); meta = xsk_buff_get_metadata(pool, xdp_desc.addr); xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; /* To return XDP buffer to XSK pool, we simple call * xsk_tx_completed(), so we don't need to fill up * 'buf' and 'xdpf'. */ tx_q->tx_skbuff_dma[entry].buf = 0; tx_q->xdpf[entry] = NULL; tx_q->tx_skbuff_dma[entry].map_as_page = false; tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; tx_q->tx_skbuff_dma[entry].last_segment = true; tx_q->tx_skbuff_dma[entry].is_jumbo = false; stmmac_set_desc_addr(priv, tx_desc, dma_addr); tx_q->tx_count_frames++; if (!priv->tx_coal_frames[queue]) set_ic = false; else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) set_ic = true; else set_ic = false; meta_req.priv = priv; meta_req.tx_desc = tx_desc; meta_req.set_ic = &set_ic; xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops, &meta_req); if (set_ic) { tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); tx_set_ic_bit++; } stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, true, priv->mode, true, true, xdp_desc.len); stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); xsk_tx_metadata_to_compl(meta, &tx_q->tx_skbuff_dma[entry].xsk_meta); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } u64_stats_update_begin(&txq_stats->napi_syncp); u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); u64_stats_update_end(&txq_stats->napi_syncp); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); xsk_tx_release(pool); } /* Return true if all of the 3 conditions are met * a) TX Budget is still available * b) work_done = true when XSK TX desc peek is empty (no more * pending XSK TX for transmission) */ return !!budget && work_done; } static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) { if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { tc += 64; if (priv->plat->force_thresh_dma_mode) stmmac_set_dma_operation_mode(priv, tc, tc, chan); else stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, chan); priv->xstats.threshold = tc; } } /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure * @budget: napi budget limiting this functions packet handling * @queue: TX queue index * @pending_packets: signal to arm the TX coal timer * Description: it reclaims the transmit resources after transmission completes. * If some packets still needs to be handled, due to TX coalesce, set * pending_packets to true to make NAPI arm the TX coal timer. */ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, bool *pending_packets) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); tx_q->xsk_frames_done = 0; entry = tx_q->dirty_tx; /* Try to clean all TX complete frame in 1 shot */ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { struct xdp_frame *xdpf; struct sk_buff *skb; struct dma_desc *p; int status; if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { xdpf = tx_q->xdpf[entry]; skb = NULL; } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { xdpf = NULL; skb = tx_q->tx_skbuff[entry]; } else { xdpf = NULL; skb = NULL; } if (priv->extend_desc) p = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) p = &tx_q->dma_entx[entry].basic; else p = tx_q->dma_tx + entry; status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ if (unlikely(status & tx_dma_own)) break; count++; /* Make sure descriptor fields are read after reading * the own bit. */ dma_rmb(); /* Just consider the last segment and ...*/ if (likely(!(status & tx_not_ls))) { /* ... verify the status error condition */ if (unlikely(status & tx_err)) { tx_errors++; if (unlikely(status & tx_err_bump_tc)) stmmac_bump_dma_threshold(priv, queue); } else { tx_packets++; } if (skb) { stmmac_get_tx_hwtstamp(priv, p, skb); } else if (tx_q->xsk_pool && xp_tx_metadata_enabled(tx_q->xsk_pool)) { struct stmmac_xsk_tx_complete tx_compl = { .priv = priv, .desc = p, }; xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, &stmmac_xsk_tx_metadata_ops, &tx_compl); } } if (likely(tx_q->tx_skbuff_dma[entry].buf && tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { if (tx_q->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[entry].buf, tx_q->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, tx_q->tx_skbuff_dma[entry].buf, tx_q->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); tx_q->tx_skbuff_dma[entry].buf = 0; tx_q->tx_skbuff_dma[entry].len = 0; tx_q->tx_skbuff_dma[entry].map_as_page = false; } stmmac_clean_desc3(priv, tx_q, p); tx_q->tx_skbuff_dma[entry].last_segment = false; tx_q->tx_skbuff_dma[entry].is_jumbo = false; if (xdpf && tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { xdp_return_frame_rx_napi(xdpf); tx_q->xdpf[entry] = NULL; } if (xdpf && tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { xdp_return_frame(xdpf); tx_q->xdpf[entry] = NULL; } if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) tx_q->xsk_frames_done++; if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { if (likely(skb)) { pkts_compl++; bytes_compl += skb->len; dev_consume_skb_any(skb); tx_q->tx_skbuff[entry] = NULL; } } stmmac_release_tx_desc(priv, p, priv->mode); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); } tx_q->dirty_tx = entry; netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), pkts_compl, bytes_compl); if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, queue))) && stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { netif_dbg(priv, tx_done, priv->dev, "%s: restart transmit\n", __func__); netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } if (tx_q->xsk_pool) { bool work_done; if (tx_q->xsk_frames_done) xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); if (xsk_uses_need_wakeup(tx_q->xsk_pool)) xsk_set_tx_need_wakeup(tx_q->xsk_pool); /* For XSK TX, we try to send as many as possible. * If XSK work done (XSK TX desc empty and budget still * available), return "budget - 1" to reenable TX IRQ. * Else, return "budget" to make NAPI continue polling. */ work_done = stmmac_xdp_xmit_zc(priv, queue, STMMAC_XSK_TX_BUDGET_MAX); if (work_done) xmits = budget - 1; else xmits = budget; } if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) { if (stmmac_enable_eee_mode(priv)) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); } /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) *pending_packets = true; u64_stats_update_begin(&txq_stats->napi_syncp); u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); u64_stats_inc(&txq_stats->napi.tx_clean); u64_stats_update_end(&txq_stats->napi_syncp); priv->xstats.tx_errors += tx_errors; __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); /* Combine decisions from TX clean and XSK TX */ return max(count, xmits); } /** * stmmac_tx_err - to manage the tx error * @priv: driver private structure * @chan: channel index * Description: it cleans the descriptors and restarts the transmission * in case of transmission errors. */ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); stmmac_stop_tx_dma(priv, chan); dma_free_tx_skbufs(priv, &priv->dma_conf, chan); stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); stmmac_reset_tx_queue(priv, chan); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); stmmac_start_tx_dma(priv, chan); priv->xstats.tx_errors++; netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); } /** * stmmac_set_dma_operation_mode - Set DMA operation mode by channel * @priv: driver private structure * @txmode: TX operating mode * @rxmode: RX operating mode * @chan: channel index * Description: it is used for configuring of the DMA operation mode in * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward * mode. */ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, u32 rxmode, u32 chan) { u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; int txfifosz = priv->plat->tx_fifo_size; if (rxfifosz == 0) rxfifosz = priv->dma_cap.rx_fifo_size; if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; /* Adjust for real per queue fifo size */ rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); } static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) { int ret; ret = stmmac_safety_feat_irq_status(priv, priv->dev, priv->ioaddr, priv->dma_cap.asp, &priv->sstats); if (ret && (ret != -EINVAL)) { stmmac_global_err(priv); return true; } return false; } static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) { int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, &priv->xstats, chan, dir); struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; struct stmmac_channel *ch = &priv->channel[chan]; struct napi_struct *rx_napi; struct napi_struct *tx_napi; unsigned long flags; rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { if (napi_schedule_prep(rx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); __napi_schedule(rx_napi); } } if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { if (napi_schedule_prep(tx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); __napi_schedule(tx_napi); } } return status; } /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure * Description: this is the DMA ISR. It is called by the main ISR. * It calls the dwmac dma routine and schedule poll method in case of some * work can be done. */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; u32 rx_channel_count = priv->plat->rx_queues_to_use; u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; /* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) channels_to_check = ARRAY_SIZE(status); for (chan = 0; chan < channels_to_check; chan++) status[chan] = stmmac_napi_check(priv, chan, DMA_DIR_RXTX); for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status[chan] == tx_hard_error)) { stmmac_tx_err(priv, chan); } } } /** * stmmac_mmc_setup: setup the Mac Management Counters (MMC) * @priv: driver private structure * Description: this masks the MMC irq, in fact, the counters are managed in SW. */ static void stmmac_mmc_setup(struct stmmac_priv *priv) { unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); if (priv->dma_cap.rmon) { stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else netdev_info(priv->dev, "No MAC Management Counters available\n"); } /** * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. * @priv: driver private structure * Description: * new GMAC chip generations have a new register to indicate the * presence of the optional feature/functions. * This can be also used to override the value passed through the * platform and necessary for old MAC10/100 and GMAC chips. */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; } /** * stmmac_check_ether_addr - check if the MAC addr is valid * @priv: driver private structure * Description: * it is to verify if the MAC address is valid, in case of failures it * generates a random MAC address */ static void stmmac_check_ether_addr(struct stmmac_priv *priv) { u8 addr[ETH_ALEN]; if (!is_valid_ether_addr(priv->dev->dev_addr)) { stmmac_get_umac_addr(priv, priv->hw, addr, 0); if (is_valid_ether_addr(addr)) eth_hw_addr_set(priv->dev, addr); else eth_hw_addr_random(priv->dev); dev_info(priv->device, "device MAC address %pM\n", priv->dev->dev_addr); } } /** * stmmac_init_dma_engine - DMA init. * @priv: driver private structure * Description: * It inits the DMA invoking the specific MAC/GMAC callback. * Some DMA parameters can be passed from the platform; * in case of these are not passed a default is kept for the MAC or GMAC. */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 chan = 0; int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { dev_err(priv->device, "Invalid DMA configuration\n"); return -EINVAL; } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) priv->plat->dma_cfg->atds = 1; ret = stmmac_reset(priv, priv->ioaddr); if (ret) { dev_err(priv->device, "Failed to reset the dma\n"); return ret; } /* DMA Configuration */ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); /* DMA CSR Channel configuration */ for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); } /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { rx_q = &priv->dma_conf.rx_queue[chan]; stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); } /* DMA TX Channel Configuration */ for (chan = 0; chan < tx_channels_count; chan++) { tx_q = &priv->dma_conf.tx_queue[chan]; stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); } return ret; } static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; u32 tx_coal_timer = priv->tx_coal_timer[queue]; struct stmmac_channel *ch; struct napi_struct *napi; if (!tx_coal_timer) return; ch = &priv->channel[tx_q->queue_index]; napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; /* Arm timer only if napi is not already scheduled. * Try to cancel any timer if napi is scheduled, timer will be armed * again in the next scheduled napi. */ if (unlikely(!napi_is_scheduled(napi))) hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(tx_coal_timer), HRTIMER_MODE_REL); else hrtimer_try_to_cancel(&tx_q->txtimer); } /** * stmmac_tx_timer - mitigation sw timer for tx. * @t: data pointer * Description: * This is the timer handler to directly invoke the stmmac_tx_clean. */ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) { struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_channel *ch; struct napi_struct *napi; ch = &priv->channel[tx_q->queue_index]; napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; if (likely(napi_schedule_prep(napi))) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); __napi_schedule(napi); } return HRTIMER_NORESTART; } /** * stmmac_init_coalesce - init mitigation options. * @priv: driver private structure * Description: * This inits the coalesce parameters: i.e. timer rate, * timer handler and default threshold used for enabling the * interrupt on completion bit. */ static void stmmac_init_coalesce(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; u32 rx_channel_count = priv->plat->rx_queues_to_use; u32 chan; for (chan = 0; chan < tx_channel_count; chan++) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tx_q->txtimer.function = stmmac_tx_timer; } for (chan = 0; chan < rx_channel_count; chan++) priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; } static void stmmac_set_rings_length(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 chan; /* set TX ring length */ for (chan = 0; chan < tx_channels_count; chan++) stmmac_set_tx_ring_len(priv, priv->ioaddr, (priv->dma_conf.dma_tx_size - 1), chan); /* set RX ring length */ for (chan = 0; chan < rx_channels_count; chan++) stmmac_set_rx_ring_len(priv, priv->ioaddr, (priv->dma_conf.dma_rx_size - 1), chan); } /** * stmmac_set_tx_queue_weight - Set TX queue weight * @priv: driver private structure * Description: It is used for setting TX queues weight */ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) { u32 tx_queues_count = priv->plat->tx_queues_to_use; u32 weight; u32 queue; for (queue = 0; queue < tx_queues_count; queue++) { weight = priv->plat->tx_queues_cfg[queue].weight; stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); } } /** * stmmac_configure_cbs - Configure CBS in TX queue * @priv: driver private structure * Description: It is used for configuring CBS in AVB TX queues */ static void stmmac_configure_cbs(struct stmmac_priv *priv) { u32 tx_queues_count = priv->plat->tx_queues_to_use; u32 mode_to_use; u32 queue; /* queue 0 is reserved for legacy traffic */ for (queue = 1; queue < tx_queues_count; queue++) { mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; if (mode_to_use == MTL_QUEUE_DCB) continue; stmmac_config_cbs(priv, priv->hw, priv->plat->tx_queues_cfg[queue].send_slope, priv->plat->tx_queues_cfg[queue].idle_slope, priv->plat->tx_queues_cfg[queue].high_credit, priv->plat->tx_queues_cfg[queue].low_credit, queue); } } /** * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel * @priv: driver private structure * Description: It is used for mapping RX queues to RX dma channels */ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) { u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 queue; u32 chan; for (queue = 0; queue < rx_queues_count; queue++) { chan = priv->plat->rx_queues_cfg[queue].chan; stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); } } /** * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority * @priv: driver private structure * Description: It is used for configuring the RX Queue Priority */ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) { u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 queue; u32 prio; for (queue = 0; queue < rx_queues_count; queue++) { if (!priv->plat->rx_queues_cfg[queue].use_prio) continue; prio = priv->plat->rx_queues_cfg[queue].prio; stmmac_rx_queue_prio(priv, priv->hw, prio, queue); } } /** * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority * @priv: driver private structure * Description: It is used for configuring the TX Queue Priority */ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) { u32 tx_queues_count = priv->plat->tx_queues_to_use; u32 queue; u32 prio; for (queue = 0; queue < tx_queues_count; queue++) { if (!priv->plat->tx_queues_cfg[queue].use_prio) continue; prio = priv->plat->tx_queues_cfg[queue].prio; stmmac_tx_queue_prio(priv, priv->hw, prio, queue); } } /** * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing * @priv: driver private structure * Description: It is used for configuring the RX queue routing */ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) { u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 queue; u8 packet; for (queue = 0; queue < rx_queues_count; queue++) { /* no specific packet type routing specified for the queue */ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; stmmac_rx_queue_routing(priv, priv->hw, packet, queue); } } static void stmmac_mac_config_rss(struct stmmac_priv *priv) { if (!priv->dma_cap.rssen || !priv->plat->rss_en) { priv->rss.enable = false; return; } if (priv->dev->features & NETIF_F_RXHASH) priv->rss.enable = true; else priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } /** * stmmac_mtl_configuration - Configure MTL * @priv: driver private structure * Description: It is used for configurring MTL */ static void stmmac_mtl_configuration(struct stmmac_priv *priv) { u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; if (tx_queues_count > 1) stmmac_set_tx_queue_weight(priv); /* Configure MTL RX algorithms */ if (rx_queues_count > 1) stmmac_prog_mtl_rx_algorithms(priv, priv->hw, priv->plat->rx_sched_algorithm); /* Configure MTL TX algorithms */ if (tx_queues_count > 1) stmmac_prog_mtl_tx_algorithms(priv, priv->hw, priv->plat->tx_sched_algorithm); /* Configure CBS in AVB TX queues */ if (tx_queues_count > 1) stmmac_configure_cbs(priv); /* Map RX MTL to DMA channels */ stmmac_rx_queue_dma_chan_map(priv); /* Enable MAC RX Queues */ stmmac_mac_enable_rx_queues(priv); /* Set RX priorities */ if (rx_queues_count > 1) stmmac_mac_config_rx_queues_prio(priv); /* Set TX priorities */ if (tx_queues_count > 1) stmmac_mac_config_tx_queues_prio(priv); /* Set RX routing */ if (rx_queues_count > 1) stmmac_mac_config_rx_queues_routing(priv); /* Receive Side Scaling */ if (rx_queues_count > 1) stmmac_mac_config_rss(priv); } static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) { if (priv->dma_cap.asp) { netdev_info(priv->dev, "Enabling Safety Features\n"); stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, priv->plat->safety_feat_cfg); } else { netdev_info(priv->dev, "No Safety Features support found\n"); } } /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. * @ptp_register: register PTP if set * Description: * this is the main function to setup the HW in a usable state because the * dma engine is reset, the core registers are configured (e.g. AXI, * Checksum features, timers). The DMA is ready to start receiving and * transmitting. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; bool sph_en; u32 chan; int ret; /* Make sure RX clock is enabled */ if (priv->hw->phylink_pcs) phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { netdev_err(priv->dev, "%s: DMA engine initialization failed\n", __func__); return ret; } /* Copy the MAC addr into the HW */ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { int speed = priv->plat->mac_port_sel_speed; if ((speed == SPEED_10) || (speed == SPEED_100) || (speed == SPEED_1000)) { priv->hw->ps = speed; } else { dev_warn(priv->device, "invalid port speed\n"); priv->hw->ps = 0; } } /* Initialize the MAC Core */ stmmac_core_init(priv, priv->hw, dev); /* Initialize MTL*/ stmmac_mtl_configuration(priv); /* Initialize Safety Features */ stmmac_safety_feat_configuration(priv); ret = stmmac_rx_ipc(priv, priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); priv->plat->rx_coe = STMMAC_RX_COE_NONE; priv->hw->rx_csum = 0; } /* Enable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); stmmac_mmc_setup(priv); if (ptp_register) { ret = clk_prepare_enable(priv->plat->clk_ptp_ref); if (ret < 0) netdev_warn(priv->dev, "failed to enable PTP reference clock: %pe\n", ERR_PTR(ret)); } ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_info(priv->dev, "PTP not supported by HW\n"); else if (ret) netdev_warn(priv->dev, "PTP init failed\n"); else if (ptp_register) stmmac_ptp_register(priv); priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; /* Convert the timer from msec to usec */ if (!priv->tx_lpi_timer) priv->tx_lpi_timer = eee_timer * 1000; if (priv->use_riwt) { u32 queue; for (queue = 0; queue < rx_cnt; queue++) { if (!priv->rx_riwt[queue]) priv->rx_riwt[queue] = DEF_DMA_RIWT; stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt[queue], queue); } } if (priv->hw->pcs) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); /* set TX and RX rings length */ stmmac_set_rings_length(priv); /* Enable TSO */ if (priv->tso) { for (chan = 0; chan < tx_cnt; chan++) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; /* TSO and TBS cannot co-exist */ if (tx_q->tbs & STMMAC_TBS_AVAIL) continue; stmmac_enable_tso(priv, priv->ioaddr, 1, chan); } } /* Enable Split Header */ sph_en = (priv->hw->rx_csum > 0) && priv->sph; for (chan = 0; chan < rx_cnt; chan++) stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); /* VLAN Tag Insertion */ if (priv->dma_cap.vlins) stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); /* TBS */ for (chan = 0; chan < tx_cnt; chan++) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; int enable = tx_q->tbs & STMMAC_TBS_AVAIL; stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); } /* Configure real RX and TX queues */ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); /* Start the ball rolling... */ stmmac_start_all_dma(priv); stmmac_set_hw_vlan_mode(priv, priv->hw); return 0; } static void stmmac_hw_teardown(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); clk_disable_unprepare(priv->plat->clk_ptp_ref); } static void stmmac_free_irq(struct net_device *dev, enum request_irq_err irq_err, int irq_idx) { struct stmmac_priv *priv = netdev_priv(dev); int j; switch (irq_err) { case REQ_IRQ_ERR_ALL: irq_idx = priv->plat->tx_queues_to_use; fallthrough; case REQ_IRQ_ERR_TX: for (j = irq_idx - 1; j >= 0; j--) { if (priv->tx_irq[j] > 0) { irq_set_affinity_hint(priv->tx_irq[j], NULL); free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); } } irq_idx = priv->plat->rx_queues_to_use; fallthrough; case REQ_IRQ_ERR_RX: for (j = irq_idx - 1; j >= 0; j--) { if (priv->rx_irq[j] > 0) { irq_set_affinity_hint(priv->rx_irq[j], NULL); free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); } } if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) free_irq(priv->sfty_ue_irq, dev); fallthrough; case REQ_IRQ_ERR_SFTY_UE: if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) free_irq(priv->sfty_ce_irq, dev); fallthrough; case REQ_IRQ_ERR_SFTY_CE: if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) free_irq(priv->lpi_irq, dev); fallthrough; case REQ_IRQ_ERR_LPI: if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) free_irq(priv->wol_irq, dev); fallthrough; case REQ_IRQ_ERR_SFTY: if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) free_irq(priv->sfty_irq, dev); fallthrough; case REQ_IRQ_ERR_WOL: free_irq(dev->irq, dev); fallthrough; case REQ_IRQ_ERR_MAC: case REQ_IRQ_ERR_NO: /* If MAC IRQ request error, no more IRQ to free */ break; } } static int stmmac_request_irq_multi_msi(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); enum request_irq_err irq_err; cpumask_t cpu_mask; int irq_idx = 0; char *int_name; int ret; int i; /* For common interrupt */ int_name = priv->int_name_mac; sprintf(int_name, "%s:%s", dev->name, "mac"); ret = request_irq(dev->irq, stmmac_mac_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc mac MSI %d (error: %d)\n", __func__, dev->irq, ret); irq_err = REQ_IRQ_ERR_MAC; goto irq_error; } /* Request the Wake IRQ in case of another line * is used for WoL */ priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { int_name = priv->int_name_wol; sprintf(int_name, "%s:%s", dev->name, "wol"); ret = request_irq(priv->wol_irq, stmmac_mac_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc wol MSI %d (error: %d)\n", __func__, priv->wol_irq, ret); irq_err = REQ_IRQ_ERR_WOL; goto irq_error; } } /* Request the LPI IRQ in case of another line * is used for LPI */ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { int_name = priv->int_name_lpi; sprintf(int_name, "%s:%s", dev->name, "lpi"); ret = request_irq(priv->lpi_irq, stmmac_mac_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc lpi MSI %d (error: %d)\n", __func__, priv->lpi_irq, ret); irq_err = REQ_IRQ_ERR_LPI; goto irq_error; } } /* Request the common Safety Feature Correctible/Uncorrectible * Error line in case of another line is used */ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { int_name = priv->int_name_sfty; sprintf(int_name, "%s:%s", dev->name, "safety"); ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc sfty MSI %d (error: %d)\n", __func__, priv->sfty_irq, ret); irq_err = REQ_IRQ_ERR_SFTY; goto irq_error; } } /* Request the Safety Feature Correctible Error line in * case of another line is used */ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { int_name = priv->int_name_sfty_ce; sprintf(int_name, "%s:%s", dev->name, "safety-ce"); ret = request_irq(priv->sfty_ce_irq, stmmac_safety_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc sfty ce MSI %d (error: %d)\n", __func__, priv->sfty_ce_irq, ret); irq_err = REQ_IRQ_ERR_SFTY_CE; goto irq_error; } } /* Request the Safety Feature Uncorrectible Error line in * case of another line is used */ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { int_name = priv->int_name_sfty_ue; sprintf(int_name, "%s:%s", dev->name, "safety-ue"); ret = request_irq(priv->sfty_ue_irq, stmmac_safety_interrupt, 0, int_name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc sfty ue MSI %d (error: %d)\n", __func__, priv->sfty_ue_irq, ret); irq_err = REQ_IRQ_ERR_SFTY_UE; goto irq_error; } } /* Request Rx MSI irq */ for (i = 0; i < priv->plat->rx_queues_to_use; i++) { if (i >= MTL_MAX_RX_QUEUES) break; if (priv->rx_irq[i] == 0) continue; int_name = priv->int_name_rx_irq[i]; sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); ret = request_irq(priv->rx_irq[i], stmmac_msi_intr_rx, 0, int_name, &priv->dma_conf.rx_queue[i]); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc rx-%d MSI %d (error: %d)\n", __func__, i, priv->rx_irq[i], ret); irq_err = REQ_IRQ_ERR_RX; irq_idx = i; goto irq_error; } cpumask_clear(&cpu_mask); cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); } /* Request Tx MSI irq */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { if (i >= MTL_MAX_TX_QUEUES) break; if (priv->tx_irq[i] == 0) continue; int_name = priv->int_name_tx_irq[i]; sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); ret = request_irq(priv->tx_irq[i], stmmac_msi_intr_tx, 0, int_name, &priv->dma_conf.tx_queue[i]); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc tx-%d MSI %d (error: %d)\n", __func__, i, priv->tx_irq[i], ret); irq_err = REQ_IRQ_ERR_TX; irq_idx = i; goto irq_error; } cpumask_clear(&cpu_mask); cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); } return 0; irq_error: stmmac_free_irq(dev, irq_err, irq_idx); return ret; } static int stmmac_request_irq_single(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); enum request_irq_err irq_err; int ret; ret = request_irq(dev->irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); irq_err = REQ_IRQ_ERR_MAC; goto irq_error; } /* Request the Wake IRQ in case of another line * is used for WoL */ priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: ERROR: allocating the WoL IRQ %d (%d)\n", __func__, priv->wol_irq, ret); irq_err = REQ_IRQ_ERR_WOL; goto irq_error; } } /* Request the IRQ lines */ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n", __func__, priv->lpi_irq, ret); irq_err = REQ_IRQ_ERR_LPI; goto irq_error; } } /* Request the common Safety Feature Correctible/Uncorrectible * Error line in case of another line is used */ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: ERROR: allocating the sfty IRQ %d (%d)\n", __func__, priv->sfty_irq, ret); irq_err = REQ_IRQ_ERR_SFTY; goto irq_error; } } return 0; irq_error: stmmac_free_irq(dev, irq_err, 0); return ret; } static int stmmac_request_irq(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int ret; /* Request the IRQ lines */ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) ret = stmmac_request_irq_multi_msi(dev); else ret = stmmac_request_irq_single(dev); return ret; } /** * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue * @priv: driver private structure * @mtu: MTU to setup the dma queue and buf with * Description: Allocate and generate a dma_conf based on the provided MTU. * Allocate the Tx/Rx DMA queue and init them. * Return value: * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. */ static struct stmmac_dma_conf * stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) { struct stmmac_dma_conf *dma_conf; int chan, bfsize, ret; dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); if (!dma_conf) { netdev_err(priv->dev, "%s: DMA conf allocation failed\n", __func__); return ERR_PTR(-ENOMEM); } bfsize = stmmac_set_16kib_bfsize(priv, mtu); if (bfsize < 0) bfsize = 0; if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(mtu, 0); dma_conf->dma_buf_sz = bfsize; /* Chose the tx/rx size from the already defined one in the * priv struct. (if defined) */ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; if (!dma_conf->dma_tx_size) dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; if (!dma_conf->dma_rx_size) dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; /* Earlier check for TBS */ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; /* Setup per-TXQ tbs flag before TX descriptor alloc */ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; } ret = alloc_dma_desc_resources(priv, dma_conf); if (ret < 0) { netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", __func__); goto alloc_error; } ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); if (ret < 0) { netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", __func__); goto init_error; } return dma_conf; init_error: free_dma_desc_resources(priv, dma_conf); alloc_error: kfree(dma_conf); return ERR_PTR(ret); } /** * __stmmac_open - open entry point of the driver * @dev : pointer to the device structure. * @dma_conf : structure to take the dma data * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int __stmmac_open(struct net_device *dev, struct stmmac_dma_conf *dma_conf) { struct stmmac_priv *priv = netdev_priv(dev); int mode = priv->plat->phy_interface; u32 chan; int ret; ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; if ((!priv->hw->xpcs || xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, "%s: Cannot attach to PHY (error: %d)\n", __func__, ret); goto init_phy_error; } } priv->rx_copybreak = STMMAC_RX_COPYBREAK; buf_sz = dma_conf->dma_buf_sz; for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); stmmac_reset_queues_param(priv); if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && priv->plat->serdes_powerup) { ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); if (ret < 0) { netdev_err(priv->dev, "%s: Serdes powerup failed\n", __func__); goto init_error; } } ret = stmmac_hw_setup(dev, true); if (ret < 0) { netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); goto init_error; } stmmac_init_coalesce(priv); phylink_start(priv->phylink); /* We may have called phylink_speed_down before */ phylink_speed_up(priv->phylink); ret = stmmac_request_irq(dev); if (ret) goto irq_error; stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); stmmac_enable_all_dma_irq(priv); return 0; irq_error: phylink_stop(priv->phylink); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); stmmac_hw_teardown(dev); init_error: phylink_disconnect_phy(priv->phylink); init_phy_error: pm_runtime_put(priv->device); return ret; } static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_dma_conf *dma_conf; int ret; dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); if (IS_ERR(dma_conf)) return PTR_ERR(dma_conf); ret = __stmmac_open(dev, dma_conf); if (ret) free_dma_desc_resources(priv, dma_conf); kfree(dma_conf); return ret; } /** * stmmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ phylink_stop(priv->phylink); phylink_disconnect_phy(priv->phylink); stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); netif_tx_disable(dev); /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; del_timer_sync(&priv->eee_ctrl_timer); } /* Stop TX/RX DMA and clear the descriptors */ stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv, &priv->dma_conf); /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); /* Powerdown Serdes if there is */ if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); stmmac_release_ptp(priv); if (stmmac_fpe_supported(priv)) timer_shutdown_sync(&priv->fpe_cfg.verify_timer); pm_runtime_put(priv->device); return 0; } static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, struct stmmac_tx_queue *tx_q) { u16 tag = 0x0, inner_tag = 0x0; u32 inner_type = 0x0; struct dma_desc *p; if (!priv->dma_cap.vlins) return false; if (!skb_vlan_tag_present(skb)) return false; if (skb->vlan_proto == htons(ETH_P_8021AD)) { inner_tag = skb_vlan_tag_get(skb); inner_type = STMMAC_VLAN_INSERT; } tag = skb_vlan_tag_get(skb); if (tx_q->tbs & STMMAC_TBS_AVAIL) p = &tx_q->dma_entx[tx_q->cur_tx].basic; else p = &tx_q->dma_tx[tx_q->cur_tx]; if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) return false; stmmac_set_tx_owner(priv, p); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); return true; } /** * stmmac_tso_allocator - close entry point of the driver * @priv: driver private structure * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segment: condition for the last descriptor * @queue: TX queue index * Description: * This function fills descriptor and request new descriptors according to * buffer length to fill */ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, int total_len, bool last_segment, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct dma_desc *desc; u32 buff_size; int tmp_len; tmp_len = total_len; while (tmp_len > 0) { dma_addr_t curr_addr; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[tx_q->cur_tx].basic; else desc = &tx_q->dma_tx[tx_q->cur_tx]; curr_addr = des + (total_len - tmp_len); if (priv->dma_cap.addr64 <= 32) desc->des0 = cpu_to_le32(curr_addr); else stmmac_set_desc_addr(priv, desc, curr_addr); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len; stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 0, 1, (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; } } static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; int desc_size; if (likely(priv->extend_desc)) desc_size = sizeof(struct dma_extended_desc); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc_size = sizeof(struct dma_edesc); else desc_size = sizeof(struct dma_desc); /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ wmb(); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); } /** * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) * @skb : the socket buffer * @dev : device pointer * Description: this is the transmit function that is called on TSO frames * (support available on GMAC4 and newer chips). * Diagram below show the ring programming in case of TSO frames: * * First Descriptor * -------- * | DES0 |---> buffer1 = L2/L3/L4 header * | DES1 |---> TCP Payload (can continue on next descr...) * | DES2 |---> buffer 1 and 2 len * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] * -------- * | * ... * | * -------- * | DES0 | --| Split TCP Payload on Buffers 1 and 2 * | DES1 | --| * | DES2 | --> buffer 1 and 2 len * | DES3 | * -------- * * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); int tmp_pay_len = 0, first_tx, nfrags; unsigned int first_entry, tx_packets; struct stmmac_txq_stats *txq_stats; struct stmmac_tx_queue *tx_q; u32 pay_len, mss, queue; dma_addr_t tso_des, des; u8 proto_hdr_len, hdr; bool set_ic; int i; /* Always insert VLAN tag to SKB payload for TSO frames. * * Never insert VLAN tag by HW, since segments splited by * TSO engine will be un-tagged by mistake. */ if (skb_vlan_tag_present(skb)) { skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { priv->xstats.tx_dropped++; return NETDEV_TX_OK; } } nfrags = skb_shinfo(skb)->nr_frags; queue = skb_get_queue_mapping(skb); tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; /* Compute header lengths */ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); hdr = sizeof(struct udphdr); } else { proto_hdr_len = skb_tcp_all_headers(skb); hdr = tcp_hdrlen(skb); } /* Desc availability based on threshold should be enough safe */ if (unlikely(stmmac_tx_avail(priv, queue) < (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ mss = skb_shinfo(skb)->gso_size; /* set new MSS value if needed */ if (mss != tx_q->mss) { if (tx_q->tbs & STMMAC_TBS_AVAIL) mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; else mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); } if (netif_msg_tx_queued(priv)) { pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", __func__, hdr, proto_hdr_len, pay_len, mss); pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, skb->data_len); } first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[first_entry].basic; else desc = &tx_q->dma_tx[first_entry]; first = desc; /* first descriptor: fill Headers on Buf1 */ des = dma_map_single(priv->device, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; if (priv->dma_cap.addr64 <= 32) { first->des0 = cpu_to_le32(des); /* Fill start of payload in buff2 of first descriptor */ if (pay_len) first->des1 = cpu_to_le32(des + proto_hdr_len); /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; tso_des = des; } else { stmmac_set_desc_addr(priv, first, des); tmp_pay_len = pay_len; tso_des = des + proto_hdr_len; pay_len = 0; } stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue); /* In case two or more DMA transmit descriptors are allocated for this * non-paged SKB data, the DMA buffer address should be saved to * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee * that stmmac_tx_clean() does not unmap the entire DMA buffer too early * since the tail areas of the DMA buffer can be accessed by DMA engine * sooner or later. * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf * corresponding to the last descriptor, stmmac_tx_clean() will unmap * this DMA buffer right after the DMA engine completely finishes the * full buffer transmission. */ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; /* Prepare fragments */ for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; stmmac_tso_allocator(priv, des, skb_frag_size(frag), (i == nfrags - 1), queue); tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; } tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[tx_q->cur_tx] = skb; tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; /* Manage tx mitigation */ tx_packets = (tx_q->cur_tx + 1) - first_tx; tx_q->tx_count_frames += tx_packets; if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; else if (!priv->tx_coal_frames[queue]) set_ic = false; else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; else if ((tx_q->tx_count_frames % priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; if (set_ic) { if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[tx_q->cur_tx].basic; else desc = &tx_q->dma_tx[tx_q->cur_tx]; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); } /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", __func__); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } u64_stats_update_begin(&txq_stats->q_syncp); u64_stats_add(&txq_stats->q.tx_bytes, skb->len); u64_stats_inc(&txq_stats->q.tx_tso_frames); u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) u64_stats_inc(&txq_stats->q.tx_set_ic_bit); u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); skb_tx_timestamp(skb); if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; stmmac_enable_tx_timestamp(priv, first); } /* Complete the first descriptor before granting the DMA */ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, pay_len, 1, tx_q->tx_skbuff_dma[first_entry].last_segment, hdr / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ if (mss_desc) { /* Make sure that first descriptor has been completely * written, including its own bit. This is because MSS is * actually before first descriptor, so we need to make * sure that MSS's own bit is the last thing written. */ dma_wmb(); stmmac_set_tx_owner(priv, mss_desc); } if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, tx_q->cur_tx, first, nfrags); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb_headlen(skb)); } netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; dma_map_err: dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; } /** * stmmac_has_ip_ethertype() - Check if packet has IP ethertype * @skb: socket buffer to check * * Check if a packet has an ethertype that will trigger the IP header checks * and IP/TCP checksum engine of the stmmac core. * * Return: true if the ethertype can trigger the checksum engine, false * otherwise */ static bool stmmac_has_ip_ethertype(struct sk_buff *skb) { int depth = 0; __be16 proto; proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb), &depth); return (depth <= ETH_HLEN) && (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6)); } /** * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer * @dev : device pointer * Description : this is the tx entry point of the driver. * It programs the chain or the ring and supports oversized frames * and SG feature. */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int first_entry, tx_packets, enh_desc; struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; struct stmmac_txq_stats *txq_stats; struct dma_edesc *tbs_desc = NULL; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) stmmac_disable_eee_mode(priv); /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) return stmmac_tso_xmit(skb, dev); if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) return stmmac_tso_xmit(skb, dev); } if (priv->est && priv->est->enable && priv->est->max_sdu[queue] && skb->len > priv->est->max_sdu[queue]){ priv->xstats.max_sdu_txq_drop[queue]++; goto max_sdu_err; } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } /* Check if VLAN can be inserted by HW */ has_vlan = stmmac_vlan_insert(priv, skb, tx_q); entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); /* DWMAC IPs can be synthesized to support tx coe only for a few tx * queues. In that case, checksum offloading for those queues that don't * support tx coe needs to fallback to software checksum calculation. * * Packets that won't trigger the COE e.g. most DSA-tagged packets will * also have to be checksummed in software. */ if (csum_insertion && (priv->plat->tx_queues_cfg[queue].coe_unsupported || !stmmac_has_ip_ethertype(skb))) { if (unlikely(skb_checksum_help(skb))) goto dma_map_err; csum_insertion = !csum_insertion; } if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; first = desc; if (has_vlan) stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); if (unlikely(is_jumbo)) { entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); if (unlikely(entry < 0) && (entry != -EINVAL)) goto dma_map_err; } for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); bool last_segment = (i == (nfrags - 1)); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[entry]); if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; des = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ tx_q->tx_skbuff_dma[entry].buf = des; stmmac_set_desc_addr(priv, desc, des); tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].last_segment = last_segment; tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, priv->mode, 1, last_segment, skb->len); } /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* According to the coalesce parameter the IC bit for the latest * segment is reset and the timer re-started to clean the tx status. * This approach takes care about the fragments: desc is the first * element in case of no SG. */ tx_packets = (entry + 1) - first_tx; tx_q->tx_count_frames += tx_packets; if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; else if (!priv->tx_coal_frames[queue]) set_ic = false; else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; else if ((tx_q->tx_count_frames % priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; if (set_ic) { if (likely(priv->extend_desc)) desc = &tx_q->dma_etx[entry].basic; else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = &tx_q->dma_tx[entry]; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); } /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { netdev_dbg(priv->dev, "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, entry, first, nfrags); netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", __func__); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } u64_stats_update_begin(&txq_stats->q_syncp); u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) u64_stats_inc(&txq_stats->q.tx_set_ic_bit); u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); skb_tx_timestamp(skb); /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be * passed to the DMA engine. */ if (likely(!is_jumbo)) { bool last_segment = (nfrags == 0); des = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; tx_q->tx_skbuff_dma[first_entry].map_as_page = false; stmmac_set_desc_addr(priv, first, des); tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; stmmac_enable_tx_timestamp(priv, first); } /* Prepare the first descriptor setting the OWN bit too */ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, csum_insertion, priv->mode, 0, last_segment, skb->len); } if (tx_q->tbs & STMMAC_TBS_EN) { struct timespec64 ts = ns_to_timespec64(skb->tstamp); tbs_desc = &tx_q->dma_entx[first_entry]; stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); } stmmac_set_tx_owner(priv, first); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); max_sdu_err: dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; } static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) { struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); __be16 vlan_proto = veth->h_vlan_proto; u16 vlanid; if ((vlan_proto == htons(ETH_P_8021Q) && dev->features & NETIF_F_HW_VLAN_CTAG_RX) || (vlan_proto == htons(ETH_P_8021AD) && dev->features & NETIF_F_HW_VLAN_STAG_RX)) { /* pop the vlan tag */ vlanid = ntohs(veth->h_vlan_TCI); memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); } } /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure * @queue: RX queue index * Description : this is to reallocate the skb for the reception process * that is based on zero-copy. */ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); if (priv->dma_cap.host_dma_width <= 32) gfp |= GFP_DMA32; while (dirty-- > 0) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; struct dma_desc *p; bool use_rx_wd; if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else p = rx_q->dma_rx + entry; if (!buf->page) { buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->page) break; } if (priv->sph && !buf->sec_page) { buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) break; buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); } buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); if (priv->sph) stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); else stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; rx_q->rx_count_frames += priv->rx_coal_frames[queue]; if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) rx_q->rx_count_frames = 0; use_rx_wd = !priv->rx_coal_frames[queue]; use_rx_wd |= rx_q->rx_count_frames > 0; if (!priv->use_riwt) use_rx_wd = false; dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); } rx_q->dirty_rx = entry; rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->dirty_rx * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, struct dma_desc *p, int status, unsigned int len) { unsigned int plen = 0, hlen = 0; int coe = priv->hw->rx_csum; /* Not first descriptor, buffer is always zero */ if (priv->sph && len) return 0; /* First descriptor, get split header length */ stmmac_get_rx_header_len(priv, p, &hlen); if (priv->sph && hlen) { priv->xstats.rx_split_hdr_pkt_n++; return hlen; } /* First descriptor, not last descriptor and not split header */ if (status & rx_not_ls) return priv->dma_conf.dma_buf_sz; plen = stmmac_get_rx_frame_len(priv, p, coe); /* First descriptor and last descriptor and not split header */ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); } static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, struct dma_desc *p, int status, unsigned int len) { int coe = priv->hw->rx_csum; unsigned int plen = 0; /* Not split header, buffer is not available */ if (!priv->sph) return 0; /* Not last descriptor */ if (status & rx_not_ls) return priv->dma_conf.dma_buf_sz; plen = stmmac_get_rx_frame_len(priv, p, coe); /* Last descriptor */ return plen - len; } static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, struct xdp_frame *xdpf, bool dma_map) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; dma_addr_t dma_addr; bool set_ic; if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED; if (priv->est && priv->est->enable && priv->est->max_sdu[queue] && xdpf->len > priv->est->max_sdu[queue]) { priv->xstats.max_sdu_txq_drop[queue]++; return STMMAC_XDP_CONSUMED; } if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) tx_desc = &tx_q->dma_entx[entry].basic; else tx_desc = tx_q->dma_tx + entry; if (dma_map) { dma_addr = dma_map_single(priv->device, xdpf->data, xdpf->len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, dma_addr)) return STMMAC_XDP_CONSUMED; tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; } else { struct page *page = virt_to_page(xdpf->data); dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + xdpf->headroom; dma_sync_single_for_device(priv->device, dma_addr, xdpf->len, DMA_BIDIRECTIONAL); tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; } tx_q->tx_skbuff_dma[entry].buf = dma_addr; tx_q->tx_skbuff_dma[entry].map_as_page = false; tx_q->tx_skbuff_dma[entry].len = xdpf->len; tx_q->tx_skbuff_dma[entry].last_segment = true; tx_q->tx_skbuff_dma[entry].is_jumbo = false; tx_q->xdpf[entry] = xdpf; stmmac_set_desc_addr(priv, tx_desc, dma_addr); stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, true, priv->mode, true, true, xdpf->len); tx_q->tx_count_frames++; if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) set_ic = true; else set_ic = false; if (set_ic) { tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); u64_stats_update_begin(&txq_stats->q_syncp); u64_stats_inc(&txq_stats->q.tx_set_ic_bit); u64_stats_update_end(&txq_stats->q_syncp); } stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; return STMMAC_XDP_TX; } static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, int cpu) { int index = cpu; if (unlikely(index < 0)) index = 0; while (index >= priv->plat->tx_queues_to_use) index -= priv->plat->tx_queues_to_use; return index; } static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, struct xdp_buff *xdp) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); int cpu = smp_processor_id(); struct netdev_queue *nq; int queue; int res; if (unlikely(!xdpf)) return STMMAC_XDP_CONSUMED; queue = stmmac_xdp_get_tx_queue(priv, cpu); nq = netdev_get_tx_queue(priv->dev, queue); __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); if (res == STMMAC_XDP_TX) stmmac_flush_tx_descriptors(priv, queue); __netif_tx_unlock(nq); return res; } static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, struct bpf_prog *prog, struct xdp_buff *xdp) { u32 act; int res; act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: res = STMMAC_XDP_PASS; break; case XDP_TX: res = stmmac_xdp_xmit_back(priv, xdp); break; case XDP_REDIRECT: if (xdp_do_redirect(priv->dev, xdp, prog) < 0) res = STMMAC_XDP_CONSUMED; else res = STMMAC_XDP_REDIRECT; break; default: bpf_warn_invalid_xdp_action(priv->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->dev, prog, act); fallthrough; case XDP_DROP: res = STMMAC_XDP_CONSUMED; break; } return res; } static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, struct xdp_buff *xdp) { struct bpf_prog *prog; int res; prog = READ_ONCE(priv->xdp_prog); if (!prog) { res = STMMAC_XDP_PASS; goto out; } res = __stmmac_xdp_run_prog(priv, prog, xdp); out: return ERR_PTR(-res); } static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, int xdp_status) { int cpu = smp_processor_id(); int queue; queue = stmmac_xdp_get_tx_queue(priv, cpu); if (xdp_status & STMMAC_XDP_TX) stmmac_tx_timer_arm(priv, queue); if (xdp_status & STMMAC_XDP_REDIRECT) xdp_do_flush(); } static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, struct xdp_buff *xdp) { unsigned int metasize = xdp->data - xdp->data_meta; unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; skb = napi_alloc_skb(&ch->rxtx_napi, xdp->data_end - xdp->data_hard_start); if (unlikely(!skb)) return NULL; skb_reserve(skb, xdp->data - xdp->data_hard_start); memcpy(__skb_put(skb, datasize), xdp->data, datasize); if (metasize) skb_metadata_set(skb, metasize); return skb; } static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, struct dma_desc *p, struct dma_desc *np, struct xdp_buff *xdp) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; struct sk_buff *skb; u32 hash; skb = stmmac_construct_skb_zc(ch, xdp); if (!skb) { priv->xstats.rx_dropped++; return; } stmmac_get_rx_hwtstamp(priv, p, np, skb); if (priv->hw->hw_vlan_en) /* MAC level stripping. */ stmmac_rx_hw_vlan(priv, priv->hw, p, skb); else /* Driver level stripping. */ stmmac_rx_vlan(priv->dev, skb); skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) skb_checksum_none_assert(skb); else skb->ip_summed = CHECKSUM_UNNECESSARY; if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) skb_set_hash(skb, hash, hash_type); skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); u64_stats_update_begin(&rxq_stats->napi_syncp); u64_stats_inc(&rxq_stats->napi.rx_pkt_n); u64_stats_add(&rxq_stats->napi.rx_bytes, len); u64_stats_update_end(&rxq_stats->napi_syncp); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int entry = rx_q->dirty_rx; struct dma_desc *rx_desc = NULL; bool ret = true; budget = min(budget, stmmac_rx_dirty(priv, queue)); while (budget-- > 0 && entry != rx_q->cur_rx) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; dma_addr_t dma_addr; bool use_rx_wd; if (!buf->xdp) { buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); if (!buf->xdp) { ret = false; break; } } if (priv->extend_desc) rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); else rx_desc = rx_q->dma_rx + entry; dma_addr = xsk_buff_xdp_get_dma(buf->xdp); stmmac_set_desc_addr(priv, rx_desc, dma_addr); stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); stmmac_refill_desc3(priv, rx_q, rx_desc); rx_q->rx_count_frames++; rx_q->rx_count_frames += priv->rx_coal_frames[queue]; if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) rx_q->rx_count_frames = 0; use_rx_wd = !priv->rx_coal_frames[queue]; use_rx_wd |= rx_q->rx_count_frames > 0; if (!priv->use_riwt) use_rx_wd = false; dma_wmb(); stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); } if (rx_desc) { rx_q->dirty_rx = entry; rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->dirty_rx * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } return ret; } static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) { /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used * to represent incoming packet, whereas cb field in the same structure * is used to store driver specific info. Thus, struct stmmac_xdp_buff * is laid on top of xdp and cb fields of struct xdp_buff_xsk. */ return (struct stmmac_xdp_buff *)xdp; } static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int count = 0, error = 0, len = 0; int dirty = stmmac_rx_dirty(priv, queue); unsigned int next_entry = rx_q->cur_rx; u32 rx_errors = 0, rx_dropped = 0; unsigned int desc_size; struct bpf_prog *prog; bool failure = false; int xdp_status = 0; int status = 0; if (netif_msg_rx_status(priv)) { void *rx_head; netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); if (priv->extend_desc) { rx_head = (void *)rx_q->dma_erx; desc_size = sizeof(struct dma_extended_desc); } else { rx_head = (void *)rx_q->dma_rx; desc_size = sizeof(struct dma_desc); } stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } while (count < limit) { struct stmmac_rx_buffer *buf; struct stmmac_xdp_buff *ctx; unsigned int buf1_len = 0; struct dma_desc *np, *p; int entry; int res; if (!count && rx_q->state_saved) { error = rx_q->state.error; len = rx_q->state.len; } else { rx_q->state_saved = false; error = 0; len = 0; } if (count >= limit) break; read_again: buf1_len = 0; entry = next_entry; buf = &rx_q->buf_pool[entry]; if (dirty >= STMMAC_RX_FILL_BATCH) { failure = failure || !stmmac_rx_refill_zc(priv, queue, dirty); dirty = 0; } if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ status = stmmac_rx_status(priv, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; /* Prefetch the next RX descriptor */ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, priv->dma_conf.dma_rx_size); next_entry = rx_q->cur_rx; if (priv->extend_desc) np = (struct dma_desc *)(rx_q->dma_erx + next_entry); else np = rx_q->dma_rx + next_entry; prefetch(np); /* Ensure a valid XSK buffer before proceed */ if (!buf->xdp) break; if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { xsk_buff_free(buf->xdp); buf->xdp = NULL; dirty++; error = 1; if (!priv->hwts_rx_en) rx_errors++; } if (unlikely(error && (status & rx_not_ls))) goto read_again; if (unlikely(error)) { count++; continue; } /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); buf->xdp = NULL; dirty++; count++; goto read_again; } ctx = xsk_buff_to_stmmac_ctx(buf->xdp); ctx->priv = priv; ctx->desc = p; ctx->ndesc = np; /* XDP ZC Frame only support primary buffers for now */ buf1_len = stmmac_rx_buf1_len(priv, p, status, len); len += buf1_len; /* ACS is disabled; strip manually. */ if (likely(!(status & rx_not_ls))) { buf1_len -= ETH_FCS_LEN; len -= ETH_FCS_LEN; } /* RX buffer is good and fit into a XSK pool buffer */ buf->xdp->data_end = buf->xdp->data + buf1_len; xsk_buff_dma_sync_for_cpu(buf->xdp); prog = READ_ONCE(priv->xdp_prog); res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); switch (res) { case STMMAC_XDP_PASS: stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); xsk_buff_free(buf->xdp); break; case STMMAC_XDP_CONSUMED: xsk_buff_free(buf->xdp); rx_dropped++; break; case STMMAC_XDP_TX: case STMMAC_XDP_REDIRECT: xdp_status |= res; break; } buf->xdp = NULL; dirty++; count++; } if (status & rx_not_ls) { rx_q->state_saved = true; rx_q->state.error = error; rx_q->state.len = len; } stmmac_finalize_xdp_rx(priv, xdp_status); u64_stats_update_begin(&rxq_stats->napi_syncp); u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) xsk_set_rx_need_wakeup(rx_q->xsk_pool); else xsk_clear_rx_need_wakeup(rx_q->xsk_pool); return (int)count; } return failure ? limit : (int)count; } /** * stmmac_rx - manage the receive process * @priv: driver private structure * @limit: napi bugget * @queue: RX queue index. * Description : this the function called by the napi poll method. * It gets all the frames inside the ring. */ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int count = 0, error = 0, len = 0; int status = 0, coe = priv->hw->rx_csum; unsigned int next_entry = rx_q->cur_rx; enum dma_data_direction dma_dir; unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; int xdp_status = 0; int buf_sz; dma_dir = page_pool_get_dma_dir(rx_q->page_pool); buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); if (netif_msg_rx_status(priv)) { void *rx_head; netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); if (priv->extend_desc) { rx_head = (void *)rx_q->dma_erx; desc_size = sizeof(struct dma_extended_desc); } else { rx_head = (void *)rx_q->dma_rx; desc_size = sizeof(struct dma_desc); } stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } while (count < limit) { unsigned int buf1_len = 0, buf2_len = 0; enum pkt_hash_types hash_type; struct stmmac_rx_buffer *buf; struct dma_desc *np, *p; int entry; u32 hash; if (!count && rx_q->state_saved) { skb = rx_q->state.skb; error = rx_q->state.error; len = rx_q->state.len; } else { rx_q->state_saved = false; skb = NULL; error = 0; len = 0; } read_again: if (count >= limit) break; buf1_len = 0; buf2_len = 0; entry = next_entry; buf = &rx_q->buf_pool[entry]; if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ status = stmmac_rx_status(priv, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, priv->dma_conf.dma_rx_size); next_entry = rx_q->cur_rx; if (priv->extend_desc) np = (struct dma_desc *)(rx_q->dma_erx + next_entry); else np = rx_q->dma_rx + next_entry; prefetch(np); if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { page_pool_recycle_direct(rx_q->page_pool, buf->page); buf->page = NULL; error = 1; if (!priv->hwts_rx_en) rx_errors++; } if (unlikely(error && (status & rx_not_ls))) goto read_again; if (unlikely(error)) { dev_kfree_skb(skb); skb = NULL; count++; continue; } /* Buffer is good. Go on. */ prefetch(page_address(buf->page) + buf->page_offset); if (buf->sec_page) prefetch(page_address(buf->sec_page)); buf1_len = stmmac_rx_buf1_len(priv, p, status, len); len += buf1_len; buf2_len = stmmac_rx_buf2_len(priv, p, status, len); len += buf2_len; /* ACS is disabled; strip manually. */ if (likely(!(status & rx_not_ls))) { if (buf2_len) { buf2_len -= ETH_FCS_LEN; len -= ETH_FCS_LEN; } else if (buf1_len) { buf1_len -= ETH_FCS_LEN; len -= ETH_FCS_LEN; } } if (!skb) { unsigned int pre_len, sync_len; dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); xdp_prepare_buff(&ctx.xdp, page_address(buf->page), buf->page_offset, buf1_len, true); pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - buf->page_offset; ctx.priv = priv; ctx.desc = p; ctx.ndesc = np; skb = stmmac_xdp_run_prog(priv, &ctx.xdp); /* Due xdp_adjust_tail: DMA sync for_device * cover max len CPU touch */ sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - buf->page_offset; sync_len = max(sync_len, pre_len); /* For Not XDP_PASS verdict */ if (IS_ERR(skb)) { unsigned int xdp_res = -PTR_ERR(skb); if (xdp_res & STMMAC_XDP_CONSUMED) { page_pool_put_page(rx_q->page_pool, virt_to_head_page(ctx.xdp.data), sync_len, true); buf->page = NULL; rx_dropped++; /* Clear skb as it was set as * status by XDP program. */ skb = NULL; if (unlikely((status & rx_not_ls))) goto read_again; count++; continue; } else if (xdp_res & (STMMAC_XDP_TX | STMMAC_XDP_REDIRECT)) { xdp_status |= xdp_res; buf->page = NULL; skb = NULL; count++; continue; } } } if (!skb) { /* XDP program may expand or reduce tail */ buf1_len = ctx.xdp.data_end - ctx.xdp.data; skb = napi_alloc_skb(&ch->rx_napi, buf1_len); if (!skb) { rx_dropped++; count++; goto drain_data; } /* XDP program may adjust header */ skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); skb_put(skb, buf1_len); /* Data payload copied into SKB, page ready for recycle */ page_pool_recycle_direct(rx_q->page_pool, buf->page); buf->page = NULL; } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ skb_mark_for_recycle(skb); buf->page = NULL; } if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, buf2_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ skb_mark_for_recycle(skb); buf->sec_page = NULL; } drain_data: if (likely(status & rx_not_ls)) goto read_again; if (!skb) continue; /* Got entire packet into SKB. Finish it. */ stmmac_get_rx_hwtstamp(priv, p, np, skb); if (priv->hw->hw_vlan_en) /* MAC level stripping. */ stmmac_rx_hw_vlan(priv, priv->hw, p, skb); else /* Driver level stripping. */ stmmac_rx_vlan(priv->dev, skb); skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) skb_checksum_none_assert(skb); else skb->ip_summed = CHECKSUM_UNNECESSARY; if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) skb_set_hash(skb, hash, hash_type); skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rx_napi, skb); skb = NULL; rx_packets++; rx_bytes += len; count++; } if (status & rx_not_ls || skb) { rx_q->state_saved = true; rx_q->state.skb = skb; rx_q->state.error = error; rx_q->state.len = len; } stmmac_finalize_xdp_rx(priv, xdp_status); stmmac_rx_refill(priv, queue); u64_stats_update_begin(&rxq_stats->napi_syncp); u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; return count; } static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) { struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rx_napi); struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; int work_done; rxq_stats = &priv->xstats.rxq_stats[chan]; u64_stats_update_begin(&rxq_stats->napi_syncp); u64_stats_inc(&rxq_stats->napi.poll); u64_stats_update_end(&rxq_stats->napi_syncp); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); } return work_done; } static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) { struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, tx_napi); struct stmmac_priv *priv = ch->priv_data; struct stmmac_txq_stats *txq_stats; bool pending_packets = false; u32 chan = ch->index; int work_done; txq_stats = &priv->xstats.txq_stats[chan]; u64_stats_update_begin(&txq_stats->napi_syncp); u64_stats_inc(&txq_stats->napi.poll); u64_stats_update_end(&txq_stats->napi_syncp); work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); } /* TX still have packet to handle, check if we need to arm tx timer */ if (pending_packets) stmmac_tx_timer_arm(priv, chan); return work_done; } static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) { struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rxtx_napi); struct stmmac_priv *priv = ch->priv_data; bool tx_pending_packets = false; int rx_done, tx_done, rxtx_done; struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; rxq_stats = &priv->xstats.rxq_stats[chan]; u64_stats_update_begin(&rxq_stats->napi_syncp); u64_stats_inc(&rxq_stats->napi.poll); u64_stats_update_end(&rxq_stats->napi_syncp); txq_stats = &priv->xstats.txq_stats[chan]; u64_stats_update_begin(&txq_stats->napi_syncp); u64_stats_inc(&txq_stats->napi.poll); u64_stats_update_end(&txq_stats->napi_syncp); tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); rx_done = stmmac_rx_zc(priv, budget, chan); rxtx_done = max(tx_done, rx_done); /* If either TX or RX work is not complete, return budget * and keep pooling */ if (rxtx_done >= budget) return budget; /* all work done, exit the polling mode */ if (napi_complete_done(napi, rxtx_done)) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); /* Both RX and TX work done are compelte, * so enable both RX & TX IRQs. */ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); spin_unlock_irqrestore(&ch->lock, flags); } /* TX still have packet to handle, check if we need to arm tx timer */ if (tx_pending_packets) stmmac_tx_timer_arm(priv, chan); return min(rxtx_done, budget - 1); } /** * stmmac_tx_timeout * @dev : Pointer to net device structure * @txqueue: the index of the hanging transmit queue * Description: this function is called when a packet transmission fails to * complete within a reasonable time. The driver will mark the error in the * netdev structure and arrange for the device to be reset to a sane state * in order to transmit a new packet. */ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct stmmac_priv *priv = netdev_priv(dev); stmmac_global_err(priv); } /** * stmmac_set_rx_mode - entry point for multicast addressing * @dev : pointer to the device structure * Description: * This function is a driver entry point which gets called by the kernel * whenever multicast addresses must be enabled/disabled. * Return value: * void. */ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); stmmac_set_filter(priv, priv->hw, dev); } /** * stmmac_change_mtu - entry point to change MTU size for the device. * @dev : device pointer. * @new_mtu : the new MTU size for the device. * Description: the Maximum Transfer Unit (MTU) is used by the network layer * to drive packet transmission. Ethernet has an MTU of 1500 octets * (ETH_DATA_LEN). This value can be changed with ifconfig. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); int txfifosz = priv->plat->tx_fifo_size; struct stmmac_dma_conf *dma_conf; const int mtu = new_mtu; int ret; if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; txfifosz /= priv->plat->tx_queues_to_use; if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); return -EINVAL; } new_mtu = STMMAC_ALIGN(new_mtu); /* If condition true, FIFO is too small or MTU too large */ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) return -EINVAL; if (netif_running(dev)) { netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); /* Try to allocate the new DMA conf with the new mtu */ dma_conf = stmmac_setup_dma_desc(priv, mtu); if (IS_ERR(dma_conf)) { netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", mtu); return PTR_ERR(dma_conf); } stmmac_release(dev); ret = __stmmac_open(dev, dma_conf); if (ret) { free_dma_desc_resources(priv, dma_conf); kfree(dma_conf); netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); return ret; } kfree(dma_conf); stmmac_set_rx_mode(dev); } WRITE_ONCE(dev->mtu, mtu); netdev_update_features(dev); return 0; } static netdev_features_t stmmac_fix_features(struct net_device *dev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(dev); if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) features &= ~NETIF_F_RXCSUM; if (!priv->plat->tx_coe) features &= ~NETIF_F_CSUM_MASK; /* Some GMAC devices have a bugged Jumbo frame support that * needs to have the Tx COE disabled for oversized frames * (due to limited buffer sizes). In this case we disable * the TX csum insertion in the TDES and not use SF. */ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; /* Disable tso if asked by ethtool */ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { if (features & NETIF_F_TSO) priv->tso = true; else priv->tso = false; } return features; } static int stmmac_set_features(struct net_device *netdev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(netdev); /* Keep the COE Type in case of csum is supporting */ if (features & NETIF_F_RXCSUM) priv->hw->rx_csum = priv->plat->rx_coe; else priv->hw->rx_csum = 0; /* No check needed because rx_coe has been set before and it will be * fixed in case of issue. */ stmmac_rx_ipc(priv, priv->hw); if (priv->sph_cap) { bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; u32 chan; for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); } if (features & NETIF_F_HW_VLAN_CTAG_RX) priv->hw->hw_vlan_en = true; else priv->hw->hw_vlan_en = false; stmmac_set_hw_vlan_mode(priv, priv->hw); return 0; } static void stmmac_common_interrupt(struct stmmac_priv *priv) { u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queues_count; u32 queue; bool xmac; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; if (priv->irq_wake) pm_wakeup_event(priv->device, 0); if (priv->dma_cap.estsel) stmmac_est_irq_status(priv, priv, priv->dev, &priv->xstats, tx_cnt); if (stmmac_fpe_supported(priv)) stmmac_fpe_irq_status(priv); /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); if (unlikely(status)) { /* For LPI we need to save the tx status */ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) priv->tx_path_in_lpi_mode = true; if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; } for (queue = 0; queue < queues_count; queue++) stmmac_host_mtl_irq_status(priv, priv->hw, queue); /* PCS link status */ if (priv->hw->pcs && !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { if (priv->xstats.pcs_link) netif_carrier_on(priv->dev); else netif_carrier_off(priv->dev); } stmmac_timestamp_interrupt(priv, priv); } } /** * stmmac_interrupt - main ISR * @irq: interrupt number. * @dev_id: to pass the net device pointer. * Description: this is the main driver interrupt service routine. * It can call: * o DMA service routine (to manage incoming frame reception and transmission * status) * o Core interrupts to manage: remote wake-up, management counter, LPI * interrupts. */ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; /* Check ASP error if it isn't delivered via an individual IRQ */ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) return IRQ_HANDLED; /* To handle Common interrupts */ stmmac_common_interrupt(priv); /* To handle DMA interrupts */ stmmac_dma_interrupt(priv); return IRQ_HANDLED; } static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; /* To handle Common interrupts */ stmmac_common_interrupt(priv); return IRQ_HANDLED; } static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; /* Check if a fatal error happened */ stmmac_safety_feat_interrupt(priv); return IRQ_HANDLED; } static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; struct stmmac_dma_conf *dma_conf; int chan = tx_q->queue_index; struct stmmac_priv *priv; int status; dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; status = stmmac_napi_check(priv, chan, DMA_DIR_TX); if (unlikely(status & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status == tx_hard_error)) { stmmac_tx_err(priv, chan); } return IRQ_HANDLED; } static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) { struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; struct stmmac_dma_conf *dma_conf; int chan = rx_q->queue_index; struct stmmac_priv *priv; dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; stmmac_napi_check(priv, chan, DMA_DIR_RX); return IRQ_HANDLED; } /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. * @rq: An IOCTL specefic structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * @cmd: IOCTL command * Description: * Currently it supports the phy_mii_ioctl(...) and HW time stamping. */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct stmmac_priv *priv = netdev_priv (dev); int ret = -EOPNOTSUPP; if (!netif_running(dev)) return -EINVAL; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: ret = phylink_mii_ioctl(priv->phylink, rq, cmd); break; case SIOCSHWTSTAMP: ret = stmmac_hwtstamp_set(dev, rq); break; case SIOCGHWTSTAMP: ret = stmmac_hwtstamp_get(dev, rq); break; default: break; } return ret; } static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct stmmac_priv *priv = cb_priv; int ret = -EOPNOTSUPP; if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) return ret; __stmmac_disable_all_queues(priv); switch (type) { case TC_SETUP_CLSU32: ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); break; case TC_SETUP_CLSFLOWER: ret = stmmac_tc_setup_cls(priv, priv, type_data); break; default: break; } stmmac_enable_all_queues(priv); return ret; } static LIST_HEAD(stmmac_block_cb_list); static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { struct stmmac_priv *priv = netdev_priv(ndev); switch (type) { case TC_QUERY_CAPS: return stmmac_tc_query_caps(priv, priv, type_data); case TC_SETUP_QDISC_MQPRIO: return stmmac_tc_setup_mqprio(priv, priv, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &stmmac_block_cb_list, stmmac_setup_tc_block_cb, priv, priv, true); case TC_SETUP_QDISC_CBS: return stmmac_tc_setup_cbs(priv, priv, type_data); case TC_SETUP_QDISC_TAPRIO: return stmmac_tc_setup_taprio(priv, priv, type_data); case TC_SETUP_QDISC_ETF: return stmmac_tc_setup_etf(priv, priv, type_data); default: return -EOPNOTSUPP; } } static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { int gso = skb_shinfo(skb)->gso_type; if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { /* * There is no way to determine the number of TSO/USO * capable Queues. Let's use always the Queue 0 * because if TSO/USO is supported then at least this * one will be capable. */ return 0; } return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; } static int stmmac_set_mac_address(struct net_device *ndev, void *addr) { struct stmmac_priv *priv = netdev_priv(ndev); int ret = 0; ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; ret = eth_mac_addr(ndev, addr); if (ret) goto set_mac_error; stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); set_mac_error: pm_runtime_put(priv->device); return ret; } #ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; static void sysfs_display_ring(void *head, int size, int extend_desc, struct seq_file *seq, dma_addr_t dma_phy_addr) { struct dma_extended_desc *ep = (struct dma_extended_desc *)head; struct dma_desc *p = (struct dma_desc *)head; unsigned int desc_size; dma_addr_t dma_addr; int i; desc_size = extend_desc ? sizeof(*ep) : sizeof(*p); for (i = 0; i < size; i++) { dma_addr = dma_phy_addr + i * desc_size; seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", i, &dma_addr, le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); if (extend_desc) p = &(++ep)->basic; else p++; } } static int stmmac_rings_status_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; if ((dev->flags & IFF_UP) == 0) return 0; for (queue = 0; queue < rx_count; queue++) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; seq_printf(seq, "RX Queue %d:\n", queue); if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_erx, priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); } else { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_rx, priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); } } for (queue = 0; queue < tx_count; queue++) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; seq_printf(seq, "TX Queue %d:\n", queue); if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_etx, priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_tx, priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); } } return 0; } DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); static int stmmac_dma_cap_show(struct seq_file *seq, void *v) { static const char * const dwxgmac_timestamp_source[] = { "None", "Internal", "External", "Both", }; static const char * const dwxgmac_safety_feature_desc[] = { "No", "All Safety Features with ECC and Parity", "All Safety Features without ECC or Parity", "All Safety Features with Parity Only", "ECC Only", "UNDEFINED", "UNDEFINED", "UNDEFINED", }; struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); if (!priv->hw_cap_support) { seq_printf(seq, "DMA HW features not supported\n"); return 0; } seq_printf(seq, "==============================\n"); seq_printf(seq, "\tDMA HW features\n"); seq_printf(seq, "==============================\n"); seq_printf(seq, "\t10/100 Mbps: %s\n", (priv->dma_cap.mbps_10_100) ? "Y" : "N"); seq_printf(seq, "\t1000 Mbps: %s\n", (priv->dma_cap.mbps_1000) ? "Y" : "N"); seq_printf(seq, "\tHalf duplex: %s\n", (priv->dma_cap.half_duplex) ? "Y" : "N"); if (priv->plat->has_xgmac) { seq_printf(seq, "\tNumber of Additional MAC address registers: %d\n", priv->dma_cap.multi_addr); } else { seq_printf(seq, "\tHash Filter: %s\n", (priv->dma_cap.hash_filter) ? "Y" : "N"); seq_printf(seq, "\tMultiple MAC address registers: %s\n", (priv->dma_cap.multi_addr) ? "Y" : "N"); } seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", (priv->dma_cap.sma_mdio) ? "Y" : "N"); seq_printf(seq, "\tPMT Remote wake up: %s\n", (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); seq_printf(seq, "\tPMT Magic Frame: %s\n", (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); seq_printf(seq, "\tRMON module: %s\n", (priv->dma_cap.rmon) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", (priv->dma_cap.time_stamp) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", (priv->dma_cap.atime_stamp) ? "Y" : "N"); if (priv->plat->has_xgmac) seq_printf(seq, "\tTimestamp System Time Source: %s\n", dwxgmac_timestamp_source[priv->dma_cap.tssrc]); seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", (priv->dma_cap.eee) ? "Y" : "N"); seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); if (priv->synopsys_id >= DWMAC_CORE_4_00 || priv->plat->has_xgmac) { seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", (priv->dma_cap.rx_coe) ? "Y" : "N"); } else { seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); } seq_printf(seq, "\tNumber of Additional RX channel: %d\n", priv->dma_cap.number_rx_channel); seq_printf(seq, "\tNumber of Additional TX channel: %d\n", priv->dma_cap.number_tx_channel); seq_printf(seq, "\tNumber of Additional RX queues: %d\n", priv->dma_cap.number_rx_queues); seq_printf(seq, "\tNumber of Additional TX queues: %d\n", priv->dma_cap.number_tx_queues); seq_printf(seq, "\tEnhanced descriptors: %s\n", (priv->dma_cap.enh_desc) ? "Y" : "N"); seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); seq_printf(seq, "\tNumber of PPS Outputs: %d\n", priv->dma_cap.pps_out_num); seq_printf(seq, "\tSafety Features: %s\n", dwxgmac_safety_feature_desc[priv->dma_cap.asp]); seq_printf(seq, "\tFlexible RX Parser: %s\n", priv->dma_cap.frpsel ? "Y" : "N"); seq_printf(seq, "\tEnhanced Addressing: %d\n", priv->dma_cap.host_dma_width); seq_printf(seq, "\tReceive Side Scaling: %s\n", priv->dma_cap.rssen ? "Y" : "N"); seq_printf(seq, "\tVLAN Hash Filtering: %s\n", priv->dma_cap.vlhash ? "Y" : "N"); seq_printf(seq, "\tSplit Header: %s\n", priv->dma_cap.sphen ? "Y" : "N"); seq_printf(seq, "\tVLAN TX Insertion: %s\n", priv->dma_cap.vlins ? "Y" : "N"); seq_printf(seq, "\tDouble VLAN: %s\n", priv->dma_cap.dvlan ? "Y" : "N"); seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", priv->dma_cap.l3l4fnum); seq_printf(seq, "\tARP Offloading: %s\n", priv->dma_cap.arpoffsel ? "Y" : "N"); seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", priv->dma_cap.estsel ? "Y" : "N"); seq_printf(seq, "\tFrame Preemption (FPE): %s\n", priv->dma_cap.fpesel ? "Y" : "N"); seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", priv->dma_cap.tbssel ? "Y" : "N"); seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", priv->dma_cap.tbs_ch_num); seq_printf(seq, "\tPer-Stream Filtering: %s\n", priv->dma_cap.sgfsel ? "Y" : "N"); seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", BIT(priv->dma_cap.ttsfd) >> 1); seq_printf(seq, "\tNumber of Traffic Classes: %d\n", priv->dma_cap.numtc); seq_printf(seq, "\tDCB Feature: %s\n", priv->dma_cap.dcben ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", priv->dma_cap.advthword ? "Y" : "N"); seq_printf(seq, "\tPTP Offload: %s\n", priv->dma_cap.ptoen ? "Y" : "N"); seq_printf(seq, "\tOne-Step Timestamping: %s\n", priv->dma_cap.osten ? "Y" : "N"); seq_printf(seq, "\tPriority-Based Flow Control: %s\n", priv->dma_cap.pfcen ? "Y" : "N"); seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", BIT(priv->dma_cap.frpes) << 6); seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", BIT(priv->dma_cap.frpbs) << 6); seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", priv->dma_cap.frppipe_num); seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", priv->dma_cap.nrvf_num ? (BIT(priv->dma_cap.nrvf_num) << 1) : 0); seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); seq_printf(seq, "\tDepth of GCL: %lu\n", priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", priv->dma_cap.cbtisel ? "Y" : "N"); seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", priv->dma_cap.aux_snapshot_n); seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", priv->dma_cap.pou_ost_en ? "Y" : "N"); seq_printf(seq, "\tEnhanced DMA: %s\n", priv->dma_cap.edma ? "Y" : "N"); seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", priv->dma_cap.ediffc ? "Y" : "N"); seq_printf(seq, "\tVxLAN/NVGRE: %s\n", priv->dma_cap.vxn ? "Y" : "N"); seq_printf(seq, "\tDebug Memory Interface: %s\n", priv->dma_cap.dbgmem ? "Y" : "N"); seq_printf(seq, "\tNumber of Policing Counters: %lu\n", priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); return 0; } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); /* Use network device events to rename debugfs file entries. */ static int stmmac_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct stmmac_priv *priv = netdev_priv(dev); if (dev->netdev_ops != &stmmac_netdev_ops) goto done; switch (event) { case NETDEV_CHANGENAME: if (priv->dbgfs_dir) priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, priv->dbgfs_dir, stmmac_fs_dir, dev->name); break; } done: return NOTIFY_DONE; } static struct notifier_block stmmac_notifier = { .notifier_call = stmmac_device_event, }; static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); rtnl_lock(); /* Create per netdev entries */ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); /* Entry to report DMA RX/TX rings */ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, &stmmac_rings_status_fops); /* Entry to report the DMA HW features */ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); rtnl_unlock(); } static void stmmac_exit_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ static u32 stmmac_vid_crc32_le(__le16 vid_le) { unsigned char *data = (unsigned char *)&vid_le; unsigned char data_byte = 0; u32 crc = ~0x0; u32 temp = 0; int i, bits; bits = get_bitmask_order(VLAN_VID_MASK); for (i = 0; i < bits; i++) { if ((i % 8) == 0) data_byte = data[i / 8]; temp = ((crc & 1) ^ data_byte) & 1; crc >>= 1; data_byte >>= 1; if (temp) crc ^= 0xedb88320; } return crc; } static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) { u32 crc, hash = 0; u16 pmatch = 0; int count = 0; u16 vid = 0; for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { __le16 vid_le = cpu_to_le16(vid); crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; hash |= (1 << crc); count++; } if (!priv->dma_cap.vlhash) { if (count > 2) /* VID = 0 always passes filter */ return -EOPNOTSUPP; pmatch = vid; hash = 0; } return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); } static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); bool is_double = false; int ret; ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; set_bit(vid, priv->active_vlans); ret = stmmac_vlan_update(priv, is_double); if (ret) { clear_bit(vid, priv->active_vlans); goto err_pm_put; } if (priv->hw->num_vlan) { ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); if (ret) goto err_pm_put; } err_pm_put: pm_runtime_put(priv->device); return ret; } static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); bool is_double = false; int ret; ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; clear_bit(vid, priv->active_vlans); if (priv->hw->num_vlan) { ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); if (ret) goto del_vlan_error; } ret = stmmac_vlan_update(priv, is_double); del_vlan_error: pm_runtime_put(priv->device); return ret; } static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) { struct stmmac_priv *priv = netdev_priv(dev); switch (bpf->command) { case XDP_SETUP_PROG: return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); case XDP_SETUP_XSK_POOL: return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, bpf->xsk.queue_id); default: return -EOPNOTSUPP; } } static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags) { struct stmmac_priv *priv = netdev_priv(dev); int cpu = smp_processor_id(); struct netdev_queue *nq; int i, nxmit = 0; int queue; if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; queue = stmmac_xdp_get_tx_queue(priv, cpu); nq = netdev_get_tx_queue(priv->dev, queue); __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { int res; res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); if (res == STMMAC_XDP_CONSUMED) break; nxmit++; } if (flags & XDP_XMIT_FLUSH) { stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); } __netif_tx_unlock(nq); return nxmit; } void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); stmmac_stop_rx_dma(priv, queue); __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); } void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; u32 buf_size; int ret; ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); if (ret) { netdev_err(priv->dev, "Failed to alloc RX desc.\n"); return; } ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); if (ret) { __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); netdev_err(priv->dev, "Failed to init RX desc.\n"); return; } stmmac_reset_rx_queue(priv, queue); stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, rx_q->queue_index); rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, rx_q->queue_index); if (rx_q->xsk_pool && rx_q->buf_alloc_num) { buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); stmmac_set_dma_bfsize(priv, priv->ioaddr, buf_size, rx_q->queue_index); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_conf.dma_buf_sz, rx_q->queue_index); } stmmac_start_rx_dma(priv, queue); spin_lock_irqsave(&ch->lock, flags); stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); } void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); stmmac_stop_tx_dma(priv, queue); __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); } void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; int ret; ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); if (ret) { netdev_err(priv->dev, "Failed to alloc TX desc.\n"); return; } ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); if (ret) { __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); netdev_err(priv->dev, "Failed to init TX desc.\n"); return; } stmmac_reset_tx_queue(priv, queue); stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, tx_q->queue_index); if (tx_q->tbs & STMMAC_TBS_AVAIL) stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, tx_q->queue_index); stmmac_start_tx_dma(priv, queue); spin_lock_irqsave(&ch->lock, flags); stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); } void stmmac_xdp_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; /* Ensure tx function is not running */ netif_tx_disable(dev); /* Disable NAPI process */ stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); /* Stop TX/RX DMA channels */ stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv, &priv->dma_conf); /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); /* set trans_start so we don't get spurious * watchdogs during reset */ netif_trans_update(dev); netif_carrier_off(dev); } int stmmac_xdp_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; u32 dma_csr_ch = max(rx_cnt, tx_cnt); struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 buf_size; bool sph_en; u32 chan; int ret; ret = alloc_dma_desc_resources(priv, &priv->dma_conf); if (ret < 0) { netdev_err(dev, "%s: DMA descriptors allocation failed\n", __func__); goto dma_desc_error; } ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); if (ret < 0) { netdev_err(dev, "%s: DMA descriptors initialization failed\n", __func__); goto init_error; } stmmac_reset_queues_param(priv); /* DMA CSR Channel configuration */ for (chan = 0; chan < dma_csr_ch; chan++) { stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); } /* Adjust Split header */ sph_en = (priv->hw->rx_csum > 0) && priv->sph; /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_cnt; chan++) { rx_q = &priv->dma_conf.rx_queue[chan]; stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); if (rx_q->xsk_pool && rx_q->buf_alloc_num) { buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); stmmac_set_dma_bfsize(priv, priv->ioaddr, buf_size, rx_q->queue_index); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_conf.dma_buf_sz, rx_q->queue_index); } stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); } /* DMA TX Channel Configuration */ for (chan = 0; chan < tx_cnt; chan++) { tx_q = &priv->dma_conf.tx_queue[chan]; stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tx_q->txtimer.function = stmmac_tx_timer; } /* Enable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, true); /* Start Rx & Tx DMA Channels */ stmmac_start_all_dma(priv); ret = stmmac_request_irq(dev); if (ret) goto irq_error; /* Enable NAPI process*/ stmmac_enable_all_queues(priv); netif_carrier_on(dev); netif_tx_start_all_queues(dev); stmmac_enable_all_dma_irq(priv); return 0; irq_error: for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv, &priv->dma_conf); dma_desc_error: return ret; } int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) { struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; struct stmmac_channel *ch; if (test_bit(STMMAC_DOWN, &priv->state) || !netif_carrier_ok(priv->dev)) return -ENETDOWN; if (!stmmac_xdp_is_enabled(priv)) return -EINVAL; if (queue >= priv->plat->rx_queues_to_use || queue >= priv->plat->tx_queues_to_use) return -EINVAL; rx_q = &priv->dma_conf.rx_queue[queue]; tx_q = &priv->dma_conf.tx_queue[queue]; ch = &priv->channel[queue]; if (!rx_q->xsk_pool && !tx_q->xsk_pool) return -EINVAL; if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { /* EQoS does not have per-DMA channel SW interrupt, * so we schedule RX Napi straight-away. */ if (likely(napi_schedule_prep(&ch->rxtx_napi))) __napi_schedule(&ch->rxtx_napi); } return 0; } static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct stmmac_priv *priv = netdev_priv(dev); u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; int q; for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; u64 tx_packets; u64 tx_bytes; do { start = u64_stats_fetch_begin(&txq_stats->q_syncp); tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); do { start = u64_stats_fetch_begin(&txq_stats->napi_syncp); tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; u64 rx_packets; u64 rx_bytes; do { start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; } stats->rx_dropped = priv->xstats.rx_dropped; stats->rx_errors = priv->xstats.rx_errors; stats->tx_dropped = priv->xstats.tx_dropped; stats->tx_errors = priv->xstats.tx_errors; stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; stats->rx_length_errors = priv->xstats.rx_length; stats->rx_crc_errors = priv->xstats.rx_crc_errors; stats->rx_over_errors = priv->xstats.rx_overflow_cntr; stats->rx_missed_errors = priv->xstats.rx_missed_cntr; } static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, .ndo_start_xmit = stmmac_xmit, .ndo_stop = stmmac_release, .ndo_change_mtu = stmmac_change_mtu, .ndo_fix_features = stmmac_fix_features, .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_eth_ioctl = stmmac_ioctl, .ndo_get_stats64 = stmmac_get_stats64, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, .ndo_bpf = stmmac_bpf, .ndo_xdp_xmit = stmmac_xdp_xmit, .ndo_xsk_wakeup = stmmac_xsk_wakeup, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) { if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) return; if (test_bit(STMMAC_DOWN, &priv->state)) return; netdev_err(priv->dev, "Reset adapter.\n"); rtnl_lock(); netif_trans_update(priv->dev); while (test_and_set_bit(STMMAC_RESETING, &priv->state)) usleep_range(1000, 2000); set_bit(STMMAC_DOWN, &priv->state); dev_close(priv->dev); dev_open(priv->dev, NULL); clear_bit(STMMAC_DOWN, &priv->state); clear_bit(STMMAC_RESETING, &priv->state); rtnl_unlock(); } static void stmmac_service_task(struct work_struct *work) { struct stmmac_priv *priv = container_of(work, struct stmmac_priv, service_task); stmmac_reset_subtask(priv); clear_bit(STMMAC_SERVICE_SCHED, &priv->state); } /** * stmmac_hw_init - Init the MAC device * @priv: driver private structure * Description: this function is to configure the MAC device according to * some platform parameters or the HW capability register. It prepares the * driver to use either ring or chain modes and to setup either enhanced or * normal descriptors. */ static int stmmac_hw_init(struct stmmac_priv *priv) { int ret; /* dwmac-sun8i only work in chain mode */ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) chain_mode = 1; priv->chain_mode = chain_mode; /* Initialize HW Interface */ ret = stmmac_hwif_init(priv); if (ret) return ret; /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { dev_info(priv->device, "DMA HW capability register supported\n"); /* We can override some gmac/dma configuration fields: e.g. * enh_desc, tx_coe (e.g. that are passed through the * platform) with the values from the HW capability * register (if supported). */ priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); priv->hw->pmt = priv->plat->pmt; if (priv->dma_cap.hash_tb_sz) { priv->hw->multicast_filter_bins = (BIT(priv->dma_cap.hash_tb_sz) << 5); priv->hw->mcast_bits_log2 = ilog2(priv->hw->multicast_filter_bins); } /* TXCOE doesn't work in thresh DMA mode */ if (priv->plat->force_thresh_dma_mode) priv->plat->tx_coe = 0; else priv->plat->tx_coe = priv->dma_cap.tx_coe; /* In case of GMAC4 rx_coe is from HW cap register. */ priv->plat->rx_coe = priv->dma_cap.rx_coe; if (priv->dma_cap.rx_coe_type2) priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; else if (priv->dma_cap.rx_coe_type1) priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; } else { dev_info(priv->device, "No HW DMA feature register supported\n"); } if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; dev_info(priv->device, "RX Checksum Offload Engine supported\n"); if (priv->synopsys_id < DWMAC_CORE_4_00) dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); } if (priv->plat->tx_coe) dev_info(priv->device, "TX Checksum insertion supported\n"); if (priv->plat->pmt) { dev_info(priv->device, "Wake-Up On Lan supported\n"); device_set_wakeup_capable(priv->device, 1); } if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); priv->hw->vlan_fail_q_en = (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; /* Run HW quirks, if any */ if (priv->hwif_quirks) { ret = priv->hwif_quirks(priv); if (ret) return ret; } /* Rx Watchdog is available in the COREs newer than the 3.40. * In some case, for example on bugged HW this feature * has to be disable and this can be done by passing the * riwt_off field from the platform. */ if (((priv->synopsys_id >= DWMAC_CORE_3_50) || (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; dev_info(priv->device, "Enable RX Mitigation via HW Watchdog Timer\n"); } return 0; } static void stmmac_napi_add(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 queue, maxq; maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; ch->priv_data = priv; ch->index = queue; spin_lock_init(&ch->lock); if (queue < priv->plat->rx_queues_to_use) { netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); } if (queue < priv->plat->tx_queues_to_use) { netif_napi_add_tx(dev, &ch->tx_napi, stmmac_napi_poll_tx); } if (queue < priv->plat->rx_queues_to_use && queue < priv->plat->tx_queues_to_use) { netif_napi_add(dev, &ch->rxtx_napi, stmmac_napi_poll_rxtx); } } } static void stmmac_napi_del(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 queue, maxq; maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; if (queue < priv->plat->rx_queues_to_use) netif_napi_del(&ch->rx_napi); if (queue < priv->plat->tx_queues_to_use) netif_napi_del(&ch->tx_napi); if (queue < priv->plat->rx_queues_to_use && queue < priv->plat->tx_queues_to_use) { netif_napi_del(&ch->rxtx_napi); } } } int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) { struct stmmac_priv *priv = netdev_priv(dev); int ret = 0, i; if (netif_running(dev)) stmmac_release(dev); stmmac_napi_del(dev); priv->plat->rx_queues_to_use = rx_cnt; priv->plat->tx_queues_to_use = tx_cnt; if (!netif_is_rxfh_configured(dev)) for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); stmmac_napi_add(dev); if (netif_running(dev)) ret = stmmac_open(dev); return ret; } int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) { struct stmmac_priv *priv = netdev_priv(dev); int ret = 0; if (netif_running(dev)) stmmac_release(dev); priv->dma_conf.dma_rx_size = rx_size; priv->dma_conf.dma_tx_size = tx_size; if (netif_running(dev)) ret = stmmac_open(dev); return ret; } static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct stmmac_xdp_buff *ctx = (void *)_ctx; struct dma_desc *desc_contains_ts = ctx->desc; struct stmmac_priv *priv = ctx->priv; struct dma_desc *ndesc = ctx->ndesc; struct dma_desc *desc = ctx->desc; u64 ns = 0; if (!priv->hwts_rx_en) return -ENODATA; /* For GMAC4, the valid timestamp is from CTX next desc. */ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) desc_contains_ts = ndesc; /* Check if timestamp is available */ if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); ns -= priv->plat->cdc_error_adj; *timestamp = ns_to_ktime(ns); return 0; } return -ENODATA; } static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, }; /** * stmmac_dvr_probe * @device: device pointer * @plat_dat: platform data pointer * @res: stmmac resource pointer * Description: this is the main probe function used to * call the alloc_etherdev, allocate the priv structure. * Return: * returns 0 on success, otherwise errno. */ int stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { struct net_device *ndev = NULL; struct stmmac_priv *priv; u32 rxq; int i, ret = 0; ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); if (!ndev) return -ENOMEM; SET_NETDEV_DEV(ndev, device); priv = netdev_priv(ndev); priv->device = device; priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); } priv->xstats.pcpu_stats = devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); if (!priv->xstats.pcpu_stats) return -ENOMEM; stmmac_set_ethtool_ops(ndev); priv->pause = pause; priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; priv->plat->dma_cfg->multi_msi_en = (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; priv->lpi_irq = res->lpi_irq; priv->sfty_irq = res->sfty_irq; priv->sfty_ce_irq = res->sfty_ce_irq; priv->sfty_ue_irq = res->sfty_ue_irq; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) priv->rx_irq[i] = res->rx_irq[i]; for (i = 0; i < MTL_MAX_TX_QUEUES; i++) priv->tx_irq[i] = res->tx_irq[i]; if (!is_zero_ether_addr(res->mac)) eth_hw_addr_set(priv->dev, res->mac); dev_set_drvdata(device, priv->dev); /* Verify driver arguments */ stmmac_verify_args(); priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); if (!priv->af_xdp_zc_qps) return -ENOMEM; /* Allocate workqueue */ priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { dev_err(priv->device, "failed to create workqueue\n"); ret = -ENOMEM; goto error_wq_init; } INIT_WORK(&priv->service_task, stmmac_service_task); /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; if (priv->plat->stmmac_rst) { ret = reset_control_assert(priv->plat->stmmac_rst); reset_control_deassert(priv->plat->stmmac_rst); /* Some reset controllers have only reset callback instead of * assert + deassert callbacks pair. */ if (ret == -ENOTSUPP) reset_control_reset(priv->plat->stmmac_rst); } ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); if (ret == -ENOTSUPP) dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", ERR_PTR(ret)); /* Wait a bit for the reset to take effect */ udelay(10); /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); if (ret) goto error_hw_init; /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. */ if (priv->synopsys_id < DWMAC_CORE_5_20) priv->plat->dma_cfg->dche = false; stmmac_check_ether_addr(priv); ndev->netdev_ops = &stmmac_netdev_ops; ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY; ret = stmmac_tc_init(priv, priv); if (!ret) { ndev->hw_features |= NETIF_F_HW_TC; } if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; if (priv->plat->has_gmac4) ndev->hw_features |= NETIF_F_GSO_UDP_L4; priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); } if (priv->dma_cap.sphen && !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { ndev->hw_features |= NETIF_F_GRO; priv->sph_cap = true; priv->sph = priv->sph_cap; dev_info(priv->device, "SPH feature enabled\n"); } /* Ideally our host DMA address width is the same as for the * device. However, it may differ and then we have to use our * host DMA width for allocation and the device DMA width for * register handling. */ if (priv->plat->host_dma_width) priv->dma_cap.host_dma_width = priv->plat->host_dma_width; else priv->dma_cap.host_dma_width = priv->dma_cap.addr64; if (priv->dma_cap.host_dma_width) { ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(priv->dma_cap.host_dma_width)); if (!ret) { dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", priv->dma_cap.host_dma_width, priv->dma_cap.addr64); /* * If more than 32 bits can be addressed, make sure to * enable enhanced addressing mode. */ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) priv->plat->dma_cfg->eame = true; } else { ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); if (ret) { dev_err(priv->device, "Failed to set DMA Mask\n"); goto error_hw_init; } priv->dma_cap.host_dma_width = 32; } } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; if (priv->plat->has_gmac4) { ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; priv->hw->hw_vlan_en = true; } if (priv->dma_cap.vlhash) { ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; } if (priv->dma_cap.vlins) { ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; if (priv->dma_cap.dvlan) ndev->features |= NETIF_F_HW_VLAN_STAG_TX; } #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); priv->xstats.threshold = tc; /* Initialize RSS */ rxq = priv->plat->rx_queues_to_use; netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); if (priv->dma_cap.rssen && priv->plat->rss_en) ndev->features |= NETIF_F_RXHASH; ndev->vlan_features |= ndev->features; /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; if (priv->plat->has_xgmac) ndev->max_mtu = XGMAC_JUMBO_LEN; else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) ndev->max_mtu = JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. */ if ((priv->plat->maxmtu < ndev->max_mtu) && (priv->plat->maxmtu >= ndev->min_mtu)) ndev->max_mtu = priv->plat->maxmtu; else if (priv->plat->maxmtu < ndev->min_mtu) dev_warn(priv->device, "%s: warning: maxmtu having invalid value (%d)\n", __func__, priv->plat->maxmtu); if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* Setup channels NAPI */ stmmac_napi_add(ndev); mutex_init(&priv->lock); stmmac_fpe_init(priv); /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed. Viceversa the driver'll try to * set the MDC clock dynamically according to the csr actual * clock input. */ if (priv->plat->clk_csr >= 0) priv->clk_csr = priv->plat->clk_csr; else stmmac_clk_csr_set(priv); stmmac_check_pcs_mode(priv); pm_runtime_get_noresume(device); pm_runtime_set_active(device); if (!pm_runtime_enabled(device)) pm_runtime_enable(device); ret = stmmac_mdio_register(ndev); if (ret < 0) { dev_err_probe(priv->device, ret, "MDIO bus (id: %d) registration failed\n", priv->plat->bus_id); goto error_mdio_register; } if (priv->plat->speed_mode_2500) priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); ret = stmmac_pcs_setup(ndev); if (ret) goto error_pcs_setup; ret = stmmac_phy_setup(priv); if (ret) { netdev_err(ndev, "failed to setup phy (%d)\n", ret); goto error_phy_setup; } ret = register_netdev(ndev); if (ret) { dev_err(priv->device, "%s: ERROR %i registering the device\n", __func__, ret); goto error_netdev_register; } #ifdef CONFIG_DEBUG_FS stmmac_init_fs(ndev); #endif if (priv->plat->dump_debug_regs) priv->plat->dump_debug_regs(priv->plat->bsp_priv); /* Let pm_runtime_put() disable the clocks. * If CONFIG_PM is not enabled, the clocks will stay powered. */ pm_runtime_put(device); return ret; error_netdev_register: phylink_destroy(priv->phylink); error_phy_setup: stmmac_pcs_clean(ndev); error_pcs_setup: stmmac_mdio_unregister(ndev); error_mdio_register: stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); error_wq_init: bitmap_free(priv->af_xdp_zc_qps); return ret; } EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** * stmmac_dvr_remove * @dev: device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings. */ void stmmac_dvr_remove(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); netdev_info(priv->dev, "%s: removing driver", __func__); pm_runtime_get_sync(dev); stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS stmmac_exit_fs(ndev); #endif phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); reset_control_assert(priv->plat->stmmac_ahb_rst); stmmac_pcs_clean(ndev); stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); } EXPORT_SYMBOL_GPL(stmmac_dvr_remove); /** * stmmac_suspend - suspend callback * @dev: device pointer * Description: this is the function to suspend the device and it is called * by the platform driver to stop the network queue, release the resources, * program the PMT register (for WoL), clean and release driver resources. */ int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; if (!ndev || !netif_running(ndev)) return 0; mutex_lock(&priv->lock); netif_device_detach(ndev); stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; del_timer_sync(&priv->eee_ctrl_timer); } /* Stop TX/RX DMA */ stmmac_stop_all_dma(priv); if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device) && priv->plat->pmt) { stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); } mutex_unlock(&priv->lock); rtnl_lock(); if (device_may_wakeup(priv->device) && priv->plat->pmt) { phylink_suspend(priv->phylink, true); } else { if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); phylink_suspend(priv->phylink, false); } rtnl_unlock(); if (stmmac_fpe_supported(priv)) timer_shutdown_sync(&priv->fpe_cfg.verify_timer); priv->speed = SPEED_UNKNOWN; return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; rx_q->cur_rx = 0; rx_q->dirty_rx = 0; } static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; tx_q->cur_tx = 0; tx_q->dirty_tx = 0; tx_q->mss = 0; netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); } /** * stmmac_reset_queues_param - reset queue parameters * @priv: device pointer */ static void stmmac_reset_queues_param(struct stmmac_priv *priv) { u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queue; for (queue = 0; queue < rx_cnt; queue++) stmmac_reset_rx_queue(priv, queue); for (queue = 0; queue < tx_cnt; queue++) stmmac_reset_tx_queue(priv, queue); } /** * stmmac_resume - resume callback * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE * in a usable state. */ int stmmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); int ret; if (!netif_running(ndev)) return 0; /* Power Down bit, into the PM register, is cleared * automatically as soon as a magic packet or a Wake-up frame * is received. Anyway, it's better to manually clear * this bit because it can generate problems while resuming * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device) && priv->plat->pmt) { mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); mutex_unlock(&priv->lock); priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); } if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && priv->plat->serdes_powerup) { ret = priv->plat->serdes_powerup(ndev, priv->plat->bsp_priv); if (ret < 0) return ret; } rtnl_lock(); if (device_may_wakeup(priv->device) && priv->plat->pmt) { phylink_resume(priv->phylink); } else { phylink_resume(priv->phylink); if (device_may_wakeup(priv->device)) phylink_speed_up(priv->phylink); } rtnl_unlock(); rtnl_lock(); mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv, &priv->dma_conf); stmmac_hw_setup(ndev, false); stmmac_init_coalesce(priv); stmmac_set_rx_mode(ndev); stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); stmmac_enable_all_queues(priv); stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); rtnl_unlock(); netif_device_attach(ndev); return 0; } EXPORT_SYMBOL_GPL(stmmac_resume); #ifndef MODULE static int __init stmmac_cmdline_opt(char *str) { char *opt; if (!str || !*str) return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) goto err; } else if (!strncmp(opt, "phyaddr:", 8)) { if (kstrtoint(opt + 8, 0, &phyaddr)) goto err; } else if (!strncmp(opt, "buf_sz:", 7)) { if (kstrtoint(opt + 7, 0, &buf_sz)) goto err; } else if (!strncmp(opt, "tc:", 3)) { if (kstrtoint(opt + 3, 0, &tc)) goto err; } else if (!strncmp(opt, "watchdog:", 9)) { if (kstrtoint(opt + 9, 0, &watchdog)) goto err; } else if (!strncmp(opt, "flow_ctrl:", 10)) { if (kstrtoint(opt + 10, 0, &flow_ctrl)) goto err; } else if (!strncmp(opt, "pause:", 6)) { if (kstrtoint(opt + 6, 0, &pause)) goto err; } else if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } else if (!strncmp(opt, "chain_mode:", 11)) { if (kstrtoint(opt + 11, 0, &chain_mode)) goto err; } } return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); #endif /* MODULE */ static int __init stmmac_init(void) { #ifdef CONFIG_DEBUG_FS /* Create debugfs main directory if it doesn't exist yet */ if (!stmmac_fs_dir) stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); register_netdevice_notifier(&stmmac_notifier); #endif return 0; } static void __exit stmmac_exit(void) { #ifdef CONFIG_DEBUG_FS unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(stmmac_fs_dir); #endif } module_init(stmmac_init) module_exit(stmmac_exit) MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); MODULE_AUTHOR("Giuseppe Cavallaro <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2019-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #ifndef _LOGICVC_LAYER_H_ #define _LOGICVC_LAYER_H_ #include <linux/of.h> #include <linux/types.h> #include <drm/drm_plane.h> #define LOGICVC_LAYER_COLORSPACE_RGB 0 #define LOGICVC_LAYER_COLORSPACE_YUV 1 #define LOGICVC_LAYER_ALPHA_LAYER 0 #define LOGICVC_LAYER_ALPHA_PIXEL 1 struct logicvc_layer_buffer_setup { u8 buffer_sel; u16 voffset; u16 hoffset; }; struct logicvc_layer_config { u32 colorspace; u32 depth; u32 alpha_mode; u32 base_offset; u32 buffer_offset; bool primary; }; struct logicvc_layer_formats { u32 colorspace; u32 depth; bool alpha; uint32_t *formats; }; struct logicvc_layer { struct logicvc_layer_config config; struct logicvc_layer_formats *formats; struct device_node *of_node; struct drm_plane drm_plane; struct list_head list; u32 index; }; int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc, struct logicvc_layer *layer, struct drm_plane_state *state, struct logicvc_layer_buffer_setup *setup); struct logicvc_layer *logicvc_layer_get_from_index(struct logicvc_drm *logicvc, u32 index); struct logicvc_layer *logicvc_layer_get_from_type(struct logicvc_drm *logicvc, enum drm_plane_type type); struct logicvc_layer *logicvc_layer_get_primary(struct logicvc_drm *logicvc); void logicvc_layers_attach_crtc(struct logicvc_drm *logicvc); int logicvc_layers_init(struct logicvc_drm *logicvc); #endif
// SPDX-License-Identifier: GPL-2.0-only // // aw88395.c -- ALSA SoC AW88395 codec support // // Copyright (c) 2022-2023 AWINIC Technology CO., LTD // // Author: Bruce zhao <[email protected]> // Author: Weidong Wang <[email protected]> // #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/firmware.h> #include <linux/regmap.h> #include <sound/soc.h> #include "aw88395.h" #include "aw88395_device.h" #include "aw88395_lib.h" #include "aw88395_reg.h" static const struct regmap_config aw88395_remap_config = { .val_bits = 16, .reg_bits = 8, .max_register = AW88395_REG_MAX - 1, .reg_format_endian = REGMAP_ENDIAN_LITTLE, .val_format_endian = REGMAP_ENDIAN_BIG, }; static void aw88395_start_pa(struct aw88395 *aw88395) { int ret, i; for (i = 0; i < AW88395_START_RETRIES; i++) { ret = aw88395_dev_start(aw88395->aw_pa); if (ret) { dev_err(aw88395->aw_pa->dev, "aw88395 device start failed. retry = %d", i); ret = aw88395_dev_fw_update(aw88395->aw_pa, AW88395_DSP_FW_UPDATE_ON, true); if (ret < 0) { dev_err(aw88395->aw_pa->dev, "fw update failed"); continue; } } else { dev_info(aw88395->aw_pa->dev, "start success\n"); break; } } } static void aw88395_startup_work(struct work_struct *work) { struct aw88395 *aw88395 = container_of(work, struct aw88395, start_work.work); mutex_lock(&aw88395->lock); aw88395_start_pa(aw88395); mutex_unlock(&aw88395->lock); } static void aw88395_start(struct aw88395 *aw88395, bool sync_start) { int ret; if (aw88395->aw_pa->fw_status != AW88395_DEV_FW_OK) return; if (aw88395->aw_pa->status == AW88395_DEV_PW_ON) return; ret = aw88395_dev_fw_update(aw88395->aw_pa, AW88395_DSP_FW_UPDATE_OFF, true); if (ret < 0) { dev_err(aw88395->aw_pa->dev, "fw update failed."); return; } if (sync_start == AW88395_SYNC_START) aw88395_start_pa(aw88395); else queue_delayed_work(system_wq, &aw88395->start_work, AW88395_START_WORK_DELAY_MS); } static struct snd_soc_dai_driver aw88395_dai[] = { { .name = "aw88395-aif", .id = 1, .playback = { .stream_name = "Speaker_Playback", .channels_min = 1, .channels_max = 2, .rates = AW88395_RATES, .formats = AW88395_FORMATS, }, .capture = { .stream_name = "Speaker_Capture", .channels_min = 1, .channels_max = 2, .rates = AW88395_RATES, .formats = AW88395_FORMATS, }, }, }; static int aw88395_get_fade_in_time(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); struct aw_device *aw_dev = aw88395->aw_pa; ucontrol->value.integer.value[0] = aw_dev->fade_in_time; return 0; } static int aw88395_set_fade_in_time(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct aw_device *aw_dev = aw88395->aw_pa; int time; time = ucontrol->value.integer.value[0]; if (time < mc->min || time > mc->max) return -EINVAL; if (time != aw_dev->fade_in_time) { aw_dev->fade_in_time = time; return 1; } return 0; } static int aw88395_get_fade_out_time(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); struct aw_device *aw_dev = aw88395->aw_pa; ucontrol->value.integer.value[0] = aw_dev->fade_out_time; return 0; } static int aw88395_set_fade_out_time(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct aw_device *aw_dev = aw88395->aw_pa; int time; time = ucontrol->value.integer.value[0]; if (time < mc->min || time > mc->max) return -EINVAL; if (time != aw_dev->fade_out_time) { aw_dev->fade_out_time = time; return 1; } return 0; } static int aw88395_profile_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); char *prof_name, *name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; count = aw88395_dev_get_profile_count(aw88395->aw_pa); if (count <= 0) { uinfo->value.enumerated.items = 0; return 0; } uinfo->value.enumerated.items = count; if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw88395_dev_get_prof_name(aw88395->aw_pa, count, &prof_name); if (ret) { strscpy(uinfo->value.enumerated.name, "null", strlen("null") + 1); return 0; } strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); return 0; } static int aw88395_profile_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); ucontrol->value.integer.value[0] = aw88395_dev_get_profile_index(aw88395->aw_pa); return 0; } static int aw88395_profile_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); int ret; /* pa stop or stopping just set profile */ mutex_lock(&aw88395->lock); ret = aw88395_dev_set_profile_index(aw88395->aw_pa, ucontrol->value.integer.value[0]); if (ret < 0) { dev_dbg(codec->dev, "profile index does not change"); mutex_unlock(&aw88395->lock); return 0; } if (aw88395->aw_pa->status) { aw88395_dev_stop(aw88395->aw_pa); aw88395_start(aw88395, AW88395_SYNC_START); } mutex_unlock(&aw88395->lock); return 1; } static int aw88395_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); struct aw_volume_desc *vol_desc = &aw88395->aw_pa->volume_desc; ucontrol->value.integer.value[0] = vol_desc->ctl_volume; return 0; } static int aw88395_volume_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); struct aw_volume_desc *vol_desc = &aw88395->aw_pa->volume_desc; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int value; value = ucontrol->value.integer.value[0]; if (value < mc->min || value > mc->max) return -EINVAL; if (vol_desc->ctl_volume != value) { vol_desc->ctl_volume = value; aw88395_dev_set_volume(aw88395->aw_pa, vol_desc->ctl_volume); return 1; } return 0; } static int aw88395_get_fade_step(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); ucontrol->value.integer.value[0] = aw88395->aw_pa->fade_step; return 0; } static int aw88395_set_fade_step(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int value; value = ucontrol->value.integer.value[0]; if (value < mc->min || value > mc->max) return -EINVAL; if (aw88395->aw_pa->fade_step != value) { aw88395->aw_pa->fade_step = value; return 1; } return 0; } static int aw88395_re_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); struct aw_device *aw_dev = aw88395->aw_pa; ucontrol->value.integer.value[0] = aw_dev->cali_desc.cali_re; return 0; } static int aw88395_re_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct aw_device *aw_dev = aw88395->aw_pa; int value; value = ucontrol->value.integer.value[0]; if (value < mc->min || value > mc->max) return -EINVAL; if (aw_dev->cali_desc.cali_re != value) { aw_dev->cali_desc.cali_re = value; return 1; } return 0; } static const struct snd_kcontrol_new aw88395_controls[] = { SOC_SINGLE_EXT("PCM Playback Volume", AW88395_SYSCTRL2_REG, 6, AW88395_MUTE_VOL, 0, aw88395_volume_get, aw88395_volume_set), SOC_SINGLE_EXT("Fade Step", 0, 0, AW88395_MUTE_VOL, 0, aw88395_get_fade_step, aw88395_set_fade_step), SOC_SINGLE_EXT("Volume Ramp Up Step", 0, 0, FADE_TIME_MAX, FADE_TIME_MIN, aw88395_get_fade_in_time, aw88395_set_fade_in_time), SOC_SINGLE_EXT("Volume Ramp Down Step", 0, 0, FADE_TIME_MAX, FADE_TIME_MIN, aw88395_get_fade_out_time, aw88395_set_fade_out_time), SOC_SINGLE_EXT("Calib", 0, 0, AW88395_CALI_RE_MAX, 0, aw88395_re_get, aw88395_re_set), AW88395_PROFILE_EXT("Profile Set", aw88395_profile_info, aw88395_profile_get, aw88395_profile_set), }; static int aw88395_playback_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); mutex_lock(&aw88395->lock); switch (event) { case SND_SOC_DAPM_PRE_PMU: aw88395_start(aw88395, AW88395_ASYNC_START); break; case SND_SOC_DAPM_POST_PMD: aw88395_dev_stop(aw88395->aw_pa); break; default: break; } mutex_unlock(&aw88395->lock); return 0; } static const struct snd_soc_dapm_widget aw88395_dapm_widgets[] = { /* playback */ SND_SOC_DAPM_AIF_IN_E("AIF_RX", "Speaker_Playback", 0, 0, 0, 0, aw88395_playback_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_OUTPUT("DAC Output"), /* capture */ SND_SOC_DAPM_AIF_OUT("AIF_TX", "Speaker_Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_INPUT("ADC Input"), }; static const struct snd_soc_dapm_route aw88395_audio_map[] = { {"DAC Output", NULL, "AIF_RX"}, {"AIF_TX", NULL, "ADC Input"}, }; static int aw88395_codec_probe(struct snd_soc_component *component) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(component); int ret; INIT_DELAYED_WORK(&aw88395->start_work, aw88395_startup_work); /* add widgets */ ret = snd_soc_dapm_new_controls(dapm, aw88395_dapm_widgets, ARRAY_SIZE(aw88395_dapm_widgets)); if (ret < 0) return ret; /* add route */ ret = snd_soc_dapm_add_routes(dapm, aw88395_audio_map, ARRAY_SIZE(aw88395_audio_map)); if (ret < 0) return ret; ret = snd_soc_add_component_controls(component, aw88395_controls, ARRAY_SIZE(aw88395_controls)); return ret; } static void aw88395_codec_remove(struct snd_soc_component *aw_codec) { struct aw88395 *aw88395 = snd_soc_component_get_drvdata(aw_codec); cancel_delayed_work_sync(&aw88395->start_work); } static const struct snd_soc_component_driver soc_codec_dev_aw88395 = { .probe = aw88395_codec_probe, .remove = aw88395_codec_remove, }; static struct aw88395 *aw88395_malloc_init(struct i2c_client *i2c) { struct aw88395 *aw88395 = devm_kzalloc(&i2c->dev, sizeof(struct aw88395), GFP_KERNEL); if (!aw88395) return NULL; mutex_init(&aw88395->lock); return aw88395; } static void aw88395_hw_reset(struct aw88395 *aw88395) { if (aw88395->reset_gpio) { gpiod_set_value_cansleep(aw88395->reset_gpio, 0); usleep_range(AW88395_1000_US, AW88395_1000_US + 10); gpiod_set_value_cansleep(aw88395->reset_gpio, 1); usleep_range(AW88395_1000_US, AW88395_1000_US + 10); } else { dev_err(aw88395->aw_pa->dev, "%s failed", __func__); } } static int aw88395_request_firmware_file(struct aw88395 *aw88395) { const struct firmware *cont = NULL; int ret; aw88395->aw_pa->fw_status = AW88395_DEV_FW_FAILED; ret = request_firmware(&cont, AW88395_ACF_FILE, aw88395->aw_pa->dev); if ((ret < 0) || (!cont)) { dev_err(aw88395->aw_pa->dev, "load [%s] failed!", AW88395_ACF_FILE); return ret; } dev_info(aw88395->aw_pa->dev, "loaded %s - size: %zu\n", AW88395_ACF_FILE, cont ? cont->size : 0); aw88395->aw_cfg = devm_kzalloc(aw88395->aw_pa->dev, cont->size + sizeof(int), GFP_KERNEL); if (!aw88395->aw_cfg) { release_firmware(cont); return -ENOMEM; } aw88395->aw_cfg->len = (int)cont->size; memcpy(aw88395->aw_cfg->data, cont->data, cont->size); release_firmware(cont); ret = aw88395_dev_load_acf_check(aw88395->aw_pa, aw88395->aw_cfg); if (ret < 0) { dev_err(aw88395->aw_pa->dev, "Load [%s] failed ....!", AW88395_ACF_FILE); return ret; } dev_dbg(aw88395->aw_pa->dev, "%s : bin load success\n", __func__); mutex_lock(&aw88395->lock); /* aw device init */ ret = aw88395_dev_init(aw88395->aw_pa, aw88395->aw_cfg); if (ret < 0) dev_err(aw88395->aw_pa->dev, "dev init failed"); mutex_unlock(&aw88395->lock); return ret; } static int aw88395_i2c_probe(struct i2c_client *i2c) { struct aw88395 *aw88395; int ret; if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) { dev_err(&i2c->dev, "check_functionality failed"); return -EIO; } aw88395 = aw88395_malloc_init(i2c); if (!aw88395) { dev_err(&i2c->dev, "malloc aw88395 failed"); return -ENOMEM; } i2c_set_clientdata(i2c, aw88395); aw88395->reset_gpio = devm_gpiod_get_optional(&i2c->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(aw88395->reset_gpio)) dev_info(&i2c->dev, "reset gpio not defined\n"); /* hardware reset */ aw88395_hw_reset(aw88395); aw88395->regmap = devm_regmap_init_i2c(i2c, &aw88395_remap_config); if (IS_ERR(aw88395->regmap)) { ret = PTR_ERR(aw88395->regmap); dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); return ret; } /* aw pa init */ ret = aw88395_init(&aw88395->aw_pa, i2c, aw88395->regmap); if (ret < 0) return ret; ret = aw88395_request_firmware_file(aw88395); if (ret < 0) { dev_err(&i2c->dev, "%s failed\n", __func__); return ret; } ret = devm_snd_soc_register_component(&i2c->dev, &soc_codec_dev_aw88395, aw88395_dai, ARRAY_SIZE(aw88395_dai)); if (ret < 0) { dev_err(&i2c->dev, "failed to register aw88395: %d", ret); return ret; } return 0; } static const struct i2c_device_id aw88395_i2c_id[] = { { AW88395_I2C_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, aw88395_i2c_id); static struct i2c_driver aw88395_i2c_driver = { .driver = { .name = AW88395_I2C_NAME, }, .probe = aw88395_i2c_probe, .id_table = aw88395_i2c_id, }; module_i2c_driver(aw88395_i2c_driver); MODULE_DESCRIPTION("ASoC AW88395 Smart PA Driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0+ #include <dt-bindings/clock/aspeed-clock.h> #include <dt-bindings/interrupt-controller/aspeed-scu-ic.h> / { model = "Aspeed BMC"; compatible = "aspeed,ast2500"; #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&vic>; aliases { i2c0 = &i2c0; i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; i2c4 = &i2c4; i2c5 = &i2c5; i2c6 = &i2c6; i2c7 = &i2c7; i2c8 = &i2c8; i2c9 = &i2c9; i2c10 = &i2c10; i2c11 = &i2c11; i2c12 = &i2c12; i2c13 = &i2c13; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; serial5 = &vuart; }; cpus { #address-cells = <1>; #size-cells = <0>; cpu@0 { compatible = "arm,arm1176jzf-s"; device_type = "cpu"; reg = <0>; }; }; memory@80000000 { device_type = "memory"; reg = <0x80000000 0>; }; ahb { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; fmc: spi@1e620000 { reg = <0x1e620000 0xc4>, <0x20000000 0x10000000>; #address-cells = <1>; #size-cells = <0>; compatible = "aspeed,ast2500-fmc"; clocks = <&syscon ASPEED_CLK_AHB>; status = "disabled"; interrupts = <19>; flash@0 { reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; flash@2 { reg = < 2 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; }; spi1: spi@1e630000 { reg = <0x1e630000 0xc4>, <0x30000000 0x08000000>; #address-cells = <1>; #size-cells = <0>; compatible = "aspeed,ast2500-spi"; clocks = <&syscon ASPEED_CLK_AHB>; status = "disabled"; flash@0 { reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; }; spi2: spi@1e631000 { reg = <0x1e631000 0xc4>, <0x38000000 0x08000000>; #address-cells = <1>; #size-cells = <0>; compatible = "aspeed,ast2500-spi"; clocks = <&syscon ASPEED_CLK_AHB>; status = "disabled"; flash@0 { reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; spi-rx-bus-width = <2>; status = "disabled"; }; }; vic: interrupt-controller@1e6c0080 { compatible = "aspeed,ast2400-vic"; interrupt-controller; #interrupt-cells = <1>; valid-sources = <0xfefff7ff 0x0807ffff>; reg = <0x1e6c0080 0x80>; }; cvic: interrupt-controller@1e6c2000 { compatible = "aspeed,ast2500-cvic", "aspeed,cvic"; valid-sources = <0xffffffff>; copro-sw-interrupts = <1>; reg = <0x1e6c2000 0x80>; }; mac0: ethernet@1e660000 { compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; reg = <0x1e660000 0x180>; interrupts = <2>; clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>; status = "disabled"; }; mac1: ethernet@1e680000 { compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; reg = <0x1e680000 0x180>; interrupts = <3>; clocks = <&syscon ASPEED_CLK_GATE_MAC2CLK>; status = "disabled"; }; ehci0: usb@1e6a1000 { compatible = "aspeed,ast2500-ehci", "generic-ehci"; reg = <0x1e6a1000 0x100>; interrupts = <5>; clocks = <&syscon ASPEED_CLK_GATE_USBPORT1CLK>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb2ah_default>; status = "disabled"; }; ehci1: usb@1e6a3000 { compatible = "aspeed,ast2500-ehci", "generic-ehci"; reg = <0x1e6a3000 0x100>; interrupts = <13>; clocks = <&syscon ASPEED_CLK_GATE_USBPORT2CLK>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb2bh_default>; status = "disabled"; }; uhci: usb@1e6b0000 { compatible = "aspeed,ast2500-uhci", "generic-uhci"; reg = <0x1e6b0000 0x100>; interrupts = <14>; #ports = <2>; clocks = <&syscon ASPEED_CLK_GATE_USBUHCICLK>; status = "disabled"; /* * No default pinmux, it will follow EHCI, use an explicit pinmux * override if you don't enable EHCI */ }; vhub: usb-vhub@1e6a0000 { compatible = "aspeed,ast2500-usb-vhub"; reg = <0x1e6a0000 0x300>; interrupts = <5>; clocks = <&syscon ASPEED_CLK_GATE_USBPORT1CLK>; aspeed,vhub-downstream-ports = <5>; aspeed,vhub-generic-endpoints = <15>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb2ad_default>; status = "disabled"; }; apb { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; edac: memory-controller@1e6e0000 { compatible = "aspeed,ast2500-sdram-edac"; reg = <0x1e6e0000 0x174>; interrupts = <0>; status = "disabled"; }; syscon: syscon@1e6e2000 { compatible = "aspeed,ast2500-scu", "syscon", "simple-mfd"; reg = <0x1e6e2000 0x1a8>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x1e6e2000 0x1000>; #clock-cells = <1>; #reset-cells = <1>; scu_ic: interrupt-controller@18 { #interrupt-cells = <1>; compatible = "aspeed,ast2500-scu-ic"; reg = <0x18 0x4>; interrupts = <21>; interrupt-controller; }; p2a: p2a-control@2c { compatible = "aspeed,ast2500-p2a-ctrl"; reg = <0x2c 0x4>; status = "disabled"; }; silicon-id@7c { compatible = "aspeed,ast2500-silicon-id", "aspeed,silicon-id"; reg = <0x7c 0x4 0x150 0x8>; }; pinctrl: pinctrl@80 { compatible = "aspeed,ast2500-pinctrl"; reg = <0x80 0x18>, <0xa0 0x10>; aspeed,external-nodes = <&gfx>, <&lhc>; }; }; rng: hwrng@1e6e2078 { compatible = "timeriomem_rng"; reg = <0x1e6e2078 0x4>; period = <1>; quality = <100>; }; hace: crypto@1e6e3000 { compatible = "aspeed,ast2500-hace"; reg = <0x1e6e3000 0x100>; interrupts = <4>; clocks = <&syscon ASPEED_CLK_GATE_YCLK>; resets = <&syscon ASPEED_RESET_HACE>; }; gfx: display@1e6e6000 { compatible = "aspeed,ast2500-gfx", "syscon"; reg = <0x1e6e6000 0x1000>; reg-io-width = <4>; clocks = <&syscon ASPEED_CLK_GATE_D1CLK>; resets = <&syscon ASPEED_RESET_CRT1>; syscon = <&syscon>; status = "disabled"; interrupts = <0x19>; }; adc: adc@1e6e9000 { compatible = "aspeed,ast2500-adc"; reg = <0x1e6e9000 0xb0>; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_ADC>; #io-channel-cells = <1>; status = "disabled"; }; video: video@1e700000 { compatible = "aspeed,ast2500-video-engine"; reg = <0x1e700000 0x1000>; clocks = <&syscon ASPEED_CLK_GATE_VCLK>, <&syscon ASPEED_CLK_GATE_ECLK>; clock-names = "vclk", "eclk"; interrupts = <7>; status = "disabled"; }; sram: sram@1e720000 { compatible = "mmio-sram"; reg = <0x1e720000 0x9000>; // 36K ranges; #address-cells = <1>; #size-cells = <1>; }; sdmmc: sd-controller@1e740000 { compatible = "aspeed,ast2500-sd-controller"; reg = <0x1e740000 0x100>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x1e740000 0x10000>; clocks = <&syscon ASPEED_CLK_GATE_SDCLK>; status = "disabled"; sdhci0: sdhci@100 { compatible = "aspeed,ast2500-sdhci"; reg = <0x100 0x100>; interrupts = <26>; sdhci,auto-cmd12; clocks = <&syscon ASPEED_CLK_SDIO>; status = "disabled"; }; sdhci1: sdhci@200 { compatible = "aspeed,ast2500-sdhci"; reg = <0x200 0x100>; interrupts = <26>; sdhci,auto-cmd12; clocks = <&syscon ASPEED_CLK_SDIO>; status = "disabled"; }; }; gpio: gpio@1e780000 { #gpio-cells = <2>; gpio-controller; compatible = "aspeed,ast2500-gpio"; reg = <0x1e780000 0x200>; interrupts = <20>; gpio-ranges = <&pinctrl 0 0 232>; clocks = <&syscon ASPEED_CLK_APB>; interrupt-controller; #interrupt-cells = <2>; }; sgpio: sgpio@1e780200 { #gpio-cells = <2>; compatible = "aspeed,ast2500-sgpio"; gpio-controller; interrupts = <40>; reg = <0x1e780200 0x0100>; clocks = <&syscon ASPEED_CLK_APB>; #interrupt-cells = <2>; interrupt-controller; bus-frequency = <12000000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sgpm_default>; status = "disabled"; }; rtc: rtc@1e781000 { compatible = "aspeed,ast2500-rtc"; reg = <0x1e781000 0x18>; status = "disabled"; }; timer: timer@1e782000 { /* This timer is a Faraday FTTMR010 derivative */ compatible = "aspeed,ast2400-timer"; reg = <0x1e782000 0x90>; interrupts = <16 17 18 35 36 37 38 39>; clocks = <&syscon ASPEED_CLK_APB>; clock-names = "PCLK"; }; uart1: serial@1e783000 { compatible = "ns16550a"; reg = <0x1e783000 0x20>; reg-shift = <2>; interrupts = <9>; clocks = <&syscon ASPEED_CLK_GATE_UART1CLK>; resets = <&lpc_reset 4>; no-loopback-test; status = "disabled"; }; uart5: serial@1e784000 { compatible = "ns16550a"; reg = <0x1e784000 0x20>; reg-shift = <2>; interrupts = <10>; clocks = <&syscon ASPEED_CLK_GATE_UART5CLK>; no-loopback-test; status = "disabled"; }; wdt1: watchdog@1e785000 { compatible = "aspeed,ast2500-wdt"; reg = <0x1e785000 0x20>; clocks = <&syscon ASPEED_CLK_APB>; }; wdt2: watchdog@1e785020 { compatible = "aspeed,ast2500-wdt"; reg = <0x1e785020 0x20>; clocks = <&syscon ASPEED_CLK_APB>; }; wdt3: watchdog@1e785040 { compatible = "aspeed,ast2500-wdt"; reg = <0x1e785040 0x20>; clocks = <&syscon ASPEED_CLK_APB>; status = "disabled"; }; pwm_tacho: pwm-tacho-controller@1e786000 { compatible = "aspeed,ast2500-pwm-tacho"; #address-cells = <1>; #size-cells = <0>; reg = <0x1e786000 0x1000>; clocks = <&syscon ASPEED_CLK_24M>; resets = <&syscon ASPEED_RESET_PWM>; status = "disabled"; }; vuart: serial@1e787000 { compatible = "aspeed,ast2500-vuart"; reg = <0x1e787000 0x40>; reg-shift = <2>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_APB>; no-loopback-test; status = "disabled"; }; lpc: lpc@1e789000 { compatible = "aspeed,ast2500-lpc-v2", "simple-mfd", "syscon"; reg = <0x1e789000 0x1000>; reg-io-width = <4>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x1e789000 0x1000>; kcs1: kcs@24 { compatible = "aspeed,ast2500-kcs-bmc-v2"; reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; kcs2: kcs@28 { compatible = "aspeed,ast2500-kcs-bmc-v2"; reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; kcs3: kcs@2c { compatible = "aspeed,ast2500-kcs-bmc-v2"; reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; kcs4: kcs@114 { compatible = "aspeed,ast2500-kcs-bmc-v2"; reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; lpc_ctrl: lpc-ctrl@80 { compatible = "aspeed,ast2500-lpc-ctrl"; reg = <0x80 0x10>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; lpc_snoop: lpc-snoop@90 { compatible = "aspeed,ast2500-lpc-snoop"; reg = <0x90 0x8>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; lpc_reset: reset-controller@98 { compatible = "aspeed,ast2500-lpc-reset"; reg = <0x98 0x4>; #reset-cells = <1>; }; uart_routing: uart-routing@9c { compatible = "aspeed,ast2500-uart-routing"; reg = <0x9c 0x4>; status = "disabled"; }; lhc: lhc@a0 { compatible = "aspeed,ast2500-lhc"; reg = <0xa0 0x24 0xc8 0x8>; }; ibt: ibt@140 { compatible = "aspeed,ast2500-ibt-bmc"; reg = <0x140 0x18>; interrupts = <8>; clocks = <&syscon ASPEED_CLK_GATE_LCLK>; status = "disabled"; }; }; peci0: peci-controller@1e78b000 { compatible = "aspeed,ast2500-peci"; reg = <0x1e78b000 0x60>; interrupts = <15>; clocks = <&syscon ASPEED_CLK_GATE_REFCLK>; resets = <&syscon ASPEED_RESET_PECI>; cmd-timeout-ms = <1000>; clock-frequency = <1000000>; status = "disabled"; }; uart2: serial@1e78d000 { compatible = "ns16550a"; reg = <0x1e78d000 0x20>; reg-shift = <2>; interrupts = <32>; clocks = <&syscon ASPEED_CLK_GATE_UART2CLK>; resets = <&lpc_reset 5>; no-loopback-test; status = "disabled"; }; uart3: serial@1e78e000 { compatible = "ns16550a"; reg = <0x1e78e000 0x20>; reg-shift = <2>; interrupts = <33>; clocks = <&syscon ASPEED_CLK_GATE_UART3CLK>; resets = <&lpc_reset 6>; no-loopback-test; status = "disabled"; }; uart4: serial@1e78f000 { compatible = "ns16550a"; reg = <0x1e78f000 0x20>; reg-shift = <2>; interrupts = <34>; clocks = <&syscon ASPEED_CLK_GATE_UART4CLK>; resets = <&lpc_reset 7>; no-loopback-test; status = "disabled"; }; i2c: bus@1e78a000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x1e78a000 0x1000>; }; }; }; }; &i2c { i2c_ic: interrupt-controller@0 { #interrupt-cells = <1>; compatible = "aspeed,ast2500-i2c-ic"; reg = <0x0 0x40>; interrupts = <12>; interrupt-controller; }; i2c0: i2c@40 { #address-cells = <1>; #size-cells = <0>; reg = <0x40 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <0>; interrupt-parent = <&i2c_ic>; status = "disabled"; /* Does not need pinctrl properties */ }; i2c1: i2c@80 { #address-cells = <1>; #size-cells = <0>; reg = <0x80 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <1>; interrupt-parent = <&i2c_ic>; status = "disabled"; /* Does not need pinctrl properties */ }; i2c2: i2c@c0 { #address-cells = <1>; #size-cells = <0>; reg = <0xc0 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <2>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3_default>; status = "disabled"; }; i2c3: i2c@100 { #address-cells = <1>; #size-cells = <0>; reg = <0x100 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <3>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c4_default>; status = "disabled"; }; i2c4: i2c@140 { #address-cells = <1>; #size-cells = <0>; reg = <0x140 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <4>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c5_default>; status = "disabled"; }; i2c5: i2c@180 { #address-cells = <1>; #size-cells = <0>; reg = <0x180 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <5>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c6_default>; status = "disabled"; }; i2c6: i2c@1c0 { #address-cells = <1>; #size-cells = <0>; reg = <0x1c0 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <6>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c7_default>; status = "disabled"; }; i2c7: i2c@300 { #address-cells = <1>; #size-cells = <0>; reg = <0x300 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <7>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c8_default>; status = "disabled"; }; i2c8: i2c@340 { #address-cells = <1>; #size-cells = <0>; reg = <0x340 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <8>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c9_default>; status = "disabled"; }; i2c9: i2c@380 { #address-cells = <1>; #size-cells = <0>; reg = <0x380 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <9>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c10_default>; status = "disabled"; }; i2c10: i2c@3c0 { #address-cells = <1>; #size-cells = <0>; reg = <0x3c0 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <10>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c11_default>; status = "disabled"; }; i2c11: i2c@400 { #address-cells = <1>; #size-cells = <0>; reg = <0x400 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <11>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c12_default>; status = "disabled"; }; i2c12: i2c@440 { #address-cells = <1>; #size-cells = <0>; reg = <0x440 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <12>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c13_default>; status = "disabled"; }; i2c13: i2c@480 { #address-cells = <1>; #size-cells = <0>; reg = <0x480 0x40>; compatible = "aspeed,ast2500-i2c-bus"; clocks = <&syscon ASPEED_CLK_APB>; resets = <&syscon ASPEED_RESET_I2C>; bus-frequency = <100000>; interrupts = <13>; interrupt-parent = <&i2c_ic>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c14_default>; status = "disabled"; }; }; &pinctrl { pinctrl_acpi_default: acpi_default { function = "ACPI"; groups = "ACPI"; }; pinctrl_adc0_default: adc0_default { function = "ADC0"; groups = "ADC0"; }; pinctrl_adc1_default: adc1_default { function = "ADC1"; groups = "ADC1"; }; pinctrl_adc10_default: adc10_default { function = "ADC10"; groups = "ADC10"; }; pinctrl_adc11_default: adc11_default { function = "ADC11"; groups = "ADC11"; }; pinctrl_adc12_default: adc12_default { function = "ADC12"; groups = "ADC12"; }; pinctrl_adc13_default: adc13_default { function = "ADC13"; groups = "ADC13"; }; pinctrl_adc14_default: adc14_default { function = "ADC14"; groups = "ADC14"; }; pinctrl_adc15_default: adc15_default { function = "ADC15"; groups = "ADC15"; }; pinctrl_adc2_default: adc2_default { function = "ADC2"; groups = "ADC2"; }; pinctrl_adc3_default: adc3_default { function = "ADC3"; groups = "ADC3"; }; pinctrl_adc4_default: adc4_default { function = "ADC4"; groups = "ADC4"; }; pinctrl_adc5_default: adc5_default { function = "ADC5"; groups = "ADC5"; }; pinctrl_adc6_default: adc6_default { function = "ADC6"; groups = "ADC6"; }; pinctrl_adc7_default: adc7_default { function = "ADC7"; groups = "ADC7"; }; pinctrl_adc8_default: adc8_default { function = "ADC8"; groups = "ADC8"; }; pinctrl_adc9_default: adc9_default { function = "ADC9"; groups = "ADC9"; }; pinctrl_bmcint_default: bmcint_default { function = "BMCINT"; groups = "BMCINT"; }; pinctrl_ddcclk_default: ddcclk_default { function = "DDCCLK"; groups = "DDCCLK"; }; pinctrl_ddcdat_default: ddcdat_default { function = "DDCDAT"; groups = "DDCDAT"; }; pinctrl_espi_default: espi_default { function = "ESPI"; groups = "ESPI"; }; pinctrl_fwspics1_default: fwspics1_default { function = "FWSPICS1"; groups = "FWSPICS1"; }; pinctrl_fwspics2_default: fwspics2_default { function = "FWSPICS2"; groups = "FWSPICS2"; }; pinctrl_gpid0_default: gpid0_default { function = "GPID0"; groups = "GPID0"; }; pinctrl_gpid2_default: gpid2_default { function = "GPID2"; groups = "GPID2"; }; pinctrl_gpid4_default: gpid4_default { function = "GPID4"; groups = "GPID4"; }; pinctrl_gpid6_default: gpid6_default { function = "GPID6"; groups = "GPID6"; }; pinctrl_gpie0_default: gpie0_default { function = "GPIE0"; groups = "GPIE0"; }; pinctrl_gpie2_default: gpie2_default { function = "GPIE2"; groups = "GPIE2"; }; pinctrl_gpie4_default: gpie4_default { function = "GPIE4"; groups = "GPIE4"; }; pinctrl_gpie6_default: gpie6_default { function = "GPIE6"; groups = "GPIE6"; }; pinctrl_i2c10_default: i2c10_default { function = "I2C10"; groups = "I2C10"; }; pinctrl_i2c11_default: i2c11_default { function = "I2C11"; groups = "I2C11"; }; pinctrl_i2c12_default: i2c12_default { function = "I2C12"; groups = "I2C12"; }; pinctrl_i2c13_default: i2c13_default { function = "I2C13"; groups = "I2C13"; }; pinctrl_i2c14_default: i2c14_default { function = "I2C14"; groups = "I2C14"; }; pinctrl_i2c3_default: i2c3_default { function = "I2C3"; groups = "I2C3"; }; pinctrl_i2c4_default: i2c4_default { function = "I2C4"; groups = "I2C4"; }; pinctrl_i2c5_default: i2c5_default { function = "I2C5"; groups = "I2C5"; }; pinctrl_i2c6_default: i2c6_default { function = "I2C6"; groups = "I2C6"; }; pinctrl_i2c7_default: i2c7_default { function = "I2C7"; groups = "I2C7"; }; pinctrl_i2c8_default: i2c8_default { function = "I2C8"; groups = "I2C8"; }; pinctrl_i2c9_default: i2c9_default { function = "I2C9"; groups = "I2C9"; }; pinctrl_lad0_default: lad0_default { function = "LAD0"; groups = "LAD0"; }; pinctrl_lad1_default: lad1_default { function = "LAD1"; groups = "LAD1"; }; pinctrl_lad2_default: lad2_default { function = "LAD2"; groups = "LAD2"; }; pinctrl_lad3_default: lad3_default { function = "LAD3"; groups = "LAD3"; }; pinctrl_lclk_default: lclk_default { function = "LCLK"; groups = "LCLK"; }; pinctrl_lframe_default: lframe_default { function = "LFRAME"; groups = "LFRAME"; }; pinctrl_lpchc_default: lpchc_default { function = "LPCHC"; groups = "LPCHC"; }; pinctrl_lpcpd_default: lpcpd_default { function = "LPCPD"; groups = "LPCPD"; }; pinctrl_lpcplus_default: lpcplus_default { function = "LPCPLUS"; groups = "LPCPLUS"; }; pinctrl_lpcpme_default: lpcpme_default { function = "LPCPME"; groups = "LPCPME"; }; pinctrl_lpcrst_default: lpcrst_default { function = "LPCRST"; groups = "LPCRST"; }; pinctrl_lpcsmi_default: lpcsmi_default { function = "LPCSMI"; groups = "LPCSMI"; }; pinctrl_lsirq_default: lsirq_default { function = "LSIRQ"; groups = "LSIRQ"; }; pinctrl_mac1link_default: mac1link_default { function = "MAC1LINK"; groups = "MAC1LINK"; }; pinctrl_mac2link_default: mac2link_default { function = "MAC2LINK"; groups = "MAC2LINK"; }; pinctrl_mdio1_default: mdio1_default { function = "MDIO1"; groups = "MDIO1"; }; pinctrl_mdio2_default: mdio2_default { function = "MDIO2"; groups = "MDIO2"; }; pinctrl_ncts1_default: ncts1_default { function = "NCTS1"; groups = "NCTS1"; }; pinctrl_ncts2_default: ncts2_default { function = "NCTS2"; groups = "NCTS2"; }; pinctrl_ncts3_default: ncts3_default { function = "NCTS3"; groups = "NCTS3"; }; pinctrl_ncts4_default: ncts4_default { function = "NCTS4"; groups = "NCTS4"; }; pinctrl_ndcd1_default: ndcd1_default { function = "NDCD1"; groups = "NDCD1"; }; pinctrl_ndcd2_default: ndcd2_default { function = "NDCD2"; groups = "NDCD2"; }; pinctrl_ndcd3_default: ndcd3_default { function = "NDCD3"; groups = "NDCD3"; }; pinctrl_ndcd4_default: ndcd4_default { function = "NDCD4"; groups = "NDCD4"; }; pinctrl_ndsr1_default: ndsr1_default { function = "NDSR1"; groups = "NDSR1"; }; pinctrl_ndsr2_default: ndsr2_default { function = "NDSR2"; groups = "NDSR2"; }; pinctrl_ndsr3_default: ndsr3_default { function = "NDSR3"; groups = "NDSR3"; }; pinctrl_ndsr4_default: ndsr4_default { function = "NDSR4"; groups = "NDSR4"; }; pinctrl_ndtr1_default: ndtr1_default { function = "NDTR1"; groups = "NDTR1"; }; pinctrl_ndtr2_default: ndtr2_default { function = "NDTR2"; groups = "NDTR2"; }; pinctrl_ndtr3_default: ndtr3_default { function = "NDTR3"; groups = "NDTR3"; }; pinctrl_ndtr4_default: ndtr4_default { function = "NDTR4"; groups = "NDTR4"; }; pinctrl_nri1_default: nri1_default { function = "NRI1"; groups = "NRI1"; }; pinctrl_nri2_default: nri2_default { function = "NRI2"; groups = "NRI2"; }; pinctrl_nri3_default: nri3_default { function = "NRI3"; groups = "NRI3"; }; pinctrl_nri4_default: nri4_default { function = "NRI4"; groups = "NRI4"; }; pinctrl_nrts1_default: nrts1_default { function = "NRTS1"; groups = "NRTS1"; }; pinctrl_nrts2_default: nrts2_default { function = "NRTS2"; groups = "NRTS2"; }; pinctrl_nrts3_default: nrts3_default { function = "NRTS3"; groups = "NRTS3"; }; pinctrl_nrts4_default: nrts4_default { function = "NRTS4"; groups = "NRTS4"; }; pinctrl_oscclk_default: oscclk_default { function = "OSCCLK"; groups = "OSCCLK"; }; pinctrl_pewake_default: pewake_default { function = "PEWAKE"; groups = "PEWAKE"; }; pinctrl_pnor_default: pnor_default { function = "PNOR"; groups = "PNOR"; }; pinctrl_pwm0_default: pwm0_default { function = "PWM0"; groups = "PWM0"; }; pinctrl_pwm1_default: pwm1_default { function = "PWM1"; groups = "PWM1"; }; pinctrl_pwm2_default: pwm2_default { function = "PWM2"; groups = "PWM2"; }; pinctrl_pwm3_default: pwm3_default { function = "PWM3"; groups = "PWM3"; }; pinctrl_pwm4_default: pwm4_default { function = "PWM4"; groups = "PWM4"; }; pinctrl_pwm5_default: pwm5_default { function = "PWM5"; groups = "PWM5"; }; pinctrl_pwm6_default: pwm6_default { function = "PWM6"; groups = "PWM6"; }; pinctrl_pwm7_default: pwm7_default { function = "PWM7"; groups = "PWM7"; }; pinctrl_rgmii1_default: rgmii1_default { function = "RGMII1"; groups = "RGMII1"; }; pinctrl_rgmii2_default: rgmii2_default { function = "RGMII2"; groups = "RGMII2"; }; pinctrl_rmii1_default: rmii1_default { function = "RMII1"; groups = "RMII1"; }; pinctrl_rmii2_default: rmii2_default { function = "RMII2"; groups = "RMII2"; }; pinctrl_rxd1_default: rxd1_default { function = "RXD1"; groups = "RXD1"; }; pinctrl_rxd2_default: rxd2_default { function = "RXD2"; groups = "RXD2"; }; pinctrl_rxd3_default: rxd3_default { function = "RXD3"; groups = "RXD3"; }; pinctrl_rxd4_default: rxd4_default { function = "RXD4"; groups = "RXD4"; }; pinctrl_salt1_default: salt1_default { function = "SALT1"; groups = "SALT1"; }; pinctrl_salt10_default: salt10_default { function = "SALT10"; groups = "SALT10"; }; pinctrl_salt11_default: salt11_default { function = "SALT11"; groups = "SALT11"; }; pinctrl_salt12_default: salt12_default { function = "SALT12"; groups = "SALT12"; }; pinctrl_salt13_default: salt13_default { function = "SALT13"; groups = "SALT13"; }; pinctrl_salt14_default: salt14_default { function = "SALT14"; groups = "SALT14"; }; pinctrl_salt2_default: salt2_default { function = "SALT2"; groups = "SALT2"; }; pinctrl_salt3_default: salt3_default { function = "SALT3"; groups = "SALT3"; }; pinctrl_salt4_default: salt4_default { function = "SALT4"; groups = "SALT4"; }; pinctrl_salt5_default: salt5_default { function = "SALT5"; groups = "SALT5"; }; pinctrl_salt6_default: salt6_default { function = "SALT6"; groups = "SALT6"; }; pinctrl_salt7_default: salt7_default { function = "SALT7"; groups = "SALT7"; }; pinctrl_salt8_default: salt8_default { function = "SALT8"; groups = "SALT8"; }; pinctrl_salt9_default: salt9_default { function = "SALT9"; groups = "SALT9"; }; pinctrl_scl1_default: scl1_default { function = "SCL1"; groups = "SCL1"; }; pinctrl_scl2_default: scl2_default { function = "SCL2"; groups = "SCL2"; }; pinctrl_sd1_default: sd1_default { function = "SD1"; groups = "SD1"; }; pinctrl_sd2_default: sd2_default { function = "SD2"; groups = "SD2"; }; pinctrl_sda1_default: sda1_default { function = "SDA1"; groups = "SDA1"; }; pinctrl_sda2_default: sda2_default { function = "SDA2"; groups = "SDA2"; }; pinctrl_sgpm_default: sgpm_default { function = "SGPM"; groups = "SGPM"; }; pinctrl_sgps1_default: sgps1_default { function = "SGPS1"; groups = "SGPS1"; }; pinctrl_sgps2_default: sgps2_default { function = "SGPS2"; groups = "SGPS2"; }; pinctrl_sioonctrl_default: sioonctrl_default { function = "SIOONCTRL"; groups = "SIOONCTRL"; }; pinctrl_siopbi_default: siopbi_default { function = "SIOPBI"; groups = "SIOPBI"; }; pinctrl_siopbo_default: siopbo_default { function = "SIOPBO"; groups = "SIOPBO"; }; pinctrl_siopwreq_default: siopwreq_default { function = "SIOPWREQ"; groups = "SIOPWREQ"; }; pinctrl_siopwrgd_default: siopwrgd_default { function = "SIOPWRGD"; groups = "SIOPWRGD"; }; pinctrl_sios3_default: sios3_default { function = "SIOS3"; groups = "SIOS3"; }; pinctrl_sios5_default: sios5_default { function = "SIOS5"; groups = "SIOS5"; }; pinctrl_siosci_default: siosci_default { function = "SIOSCI"; groups = "SIOSCI"; }; pinctrl_spi1_default: spi1_default { function = "SPI1"; groups = "SPI1"; }; pinctrl_spi1cs1_default: spi1cs1_default { function = "SPI1CS1"; groups = "SPI1CS1"; }; pinctrl_spi1debug_default: spi1debug_default { function = "SPI1DEBUG"; groups = "SPI1DEBUG"; }; pinctrl_spi1passthru_default: spi1passthru_default { function = "SPI1PASSTHRU"; groups = "SPI1PASSTHRU"; }; pinctrl_spi2ck_default: spi2ck_default { function = "SPI2CK"; groups = "SPI2CK"; }; pinctrl_spi2cs0_default: spi2cs0_default { function = "SPI2CS0"; groups = "SPI2CS0"; }; pinctrl_spi2cs1_default: spi2cs1_default { function = "SPI2CS1"; groups = "SPI2CS1"; }; pinctrl_spi2miso_default: spi2miso_default { function = "SPI2MISO"; groups = "SPI2MISO"; }; pinctrl_spi2mosi_default: spi2mosi_default { function = "SPI2MOSI"; groups = "SPI2MOSI"; }; pinctrl_timer3_default: timer3_default { function = "TIMER3"; groups = "TIMER3"; }; pinctrl_timer4_default: timer4_default { function = "TIMER4"; groups = "TIMER4"; }; pinctrl_timer5_default: timer5_default { function = "TIMER5"; groups = "TIMER5"; }; pinctrl_timer6_default: timer6_default { function = "TIMER6"; groups = "TIMER6"; }; pinctrl_timer7_default: timer7_default { function = "TIMER7"; groups = "TIMER7"; }; pinctrl_timer8_default: timer8_default { function = "TIMER8"; groups = "TIMER8"; }; pinctrl_txd1_default: txd1_default { function = "TXD1"; groups = "TXD1"; }; pinctrl_txd2_default: txd2_default { function = "TXD2"; groups = "TXD2"; }; pinctrl_txd3_default: txd3_default { function = "TXD3"; groups = "TXD3"; }; pinctrl_txd4_default: txd4_default { function = "TXD4"; groups = "TXD4"; }; pinctrl_uart6_default: uart6_default { function = "UART6"; groups = "UART6"; }; pinctrl_usbcki_default: usbcki_default { function = "USBCKI"; groups = "USBCKI"; }; pinctrl_usb2ah_default: usb2ah_default { function = "USB2AH"; groups = "USB2AH"; }; pinctrl_usb2ad_default: usb2ad_default { function = "USB2AD"; groups = "USB2AD"; }; pinctrl_usb11bhid_default: usb11bhid_default { function = "USB11BHID"; groups = "USB11BHID"; }; pinctrl_usb2bh_default: usb2bh_default { function = "USB2BH"; groups = "USB2BH"; }; pinctrl_vgabiosrom_default: vgabiosrom_default { function = "VGABIOSROM"; groups = "VGABIOSROM"; }; pinctrl_vgahs_default: vgahs_default { function = "VGAHS"; groups = "VGAHS"; }; pinctrl_vgavs_default: vgavs_default { function = "VGAVS"; groups = "VGAVS"; }; pinctrl_vpi24_default: vpi24_default { function = "VPI24"; groups = "VPI24"; }; pinctrl_vpo_default: vpo_default { function = "VPO"; groups = "VPO"; }; pinctrl_wdtrst1_default: wdtrst1_default { function = "WDTRST1"; groups = "WDTRST1"; }; pinctrl_wdtrst2_default: wdtrst2_default { function = "WDTRST2"; groups = "WDTRST2"; }; };
// SPDX-License-Identifier: GPL-2.0-only /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2018 Solarflare Communications Inc. * Copyright 2019-2020 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <net/ip6_checksum.h> #include "net_driver.h" #include "tx_common.h" #include "nic_common.h" #include "mcdi_functions.h" #include "ef100_regs.h" #include "io.h" #include "ef100_tx.h" #include "ef100_nic.h" int ef100_tx_probe(struct efx_tx_queue *tx_queue) { /* Allocate an extra descriptor for the QMDA status completion entry */ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, (tx_queue->ptr_mask + 2) * sizeof(efx_oword_t), GFP_KERNEL); } void ef100_tx_init(struct efx_tx_queue *tx_queue) { /* must be the inverse of lookup in efx_get_tx_channel */ tx_queue->core_txq = netdev_get_tx_queue(tx_queue->efx->net_dev, tx_queue->channel->channel - tx_queue->efx->tx_channel_offset); /* This value is purely documentational; as EF100 never passes through * the switch statement in tx.c:__efx_enqueue_skb(), that switch does * not handle case 3. EF100's TSOv3 descriptors are generated by * ef100_make_tso_desc(). * Meanwhile, all efx_mcdi_tx_init() cares about is that it's not 2. */ tx_queue->tso_version = 3; if (efx_mcdi_tx_init(tx_queue)) netdev_WARN(tx_queue->efx->net_dev, "failed to initialise TXQ %d\n", tx_queue->queue); } static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct ef100_nic_data *nic_data; struct efx_tx_buffer *buffer; size_t header_len; u32 mss; nic_data = efx->nic_data; if (!skb_is_gso_tcp(skb)) return false; if (!(efx->net_dev->features & NETIF_F_TSO)) return false; mss = skb_shinfo(skb)->gso_size; if (unlikely(mss < 4)) { WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); return false; } header_len = efx_tx_tso_header_length(skb); if (header_len > nic_data->tso_max_hdr_len) return false; if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { /* net_dev->gso_max_segs should've caught this */ WARN_ON_ONCE(1); return false; } if (skb->data_len / mss > nic_data->tso_max_frames) return false; /* net_dev->gso_max_size should've caught this */ if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len)) return false; /* Reserve an empty buffer for the TSO V3 descriptor. * Convey the length of the header since we already know it. */ buffer = efx_tx_queue_get_insert_buffer(tx_queue); buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT; buffer->len = header_len; buffer->unmap_len = 0; buffer->skb = skb; ++tx_queue->insert_count; return true; } static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) { if (likely(tx_queue->txd.addr)) return ((efx_oword_t *)tx_queue->txd.addr) + index; else return NULL; } static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) { unsigned int write_ptr; efx_dword_t reg; tx_queue->xmit_pending = false; if (unlikely(tx_queue->notify_count == tx_queue->write_count)) return; write_ptr = tx_queue->write_count & tx_queue->ptr_mask; /* The write pointer goes into the high word */ EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr); efx_writed_page(tx_queue->efx, &reg, ER_GZ_TX_RING_DOORBELL, tx_queue->queue); tx_queue->notify_count = tx_queue->write_count; } static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) { ef100_notify_tx_desc(tx_queue); ++tx_queue->pushes; } static void ef100_set_tx_csum_partial(const struct sk_buff *skb, struct efx_tx_buffer *buffer, efx_oword_t *txd) { efx_oword_t csum; int csum_start; if (!skb || skb->ip_summed != CHECKSUM_PARTIAL) return; /* skb->csum_start has the offset from head, but we need the offset * from data. */ csum_start = skb_checksum_start_offset(skb); EFX_POPULATE_OWORD_3(csum, ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1, ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, csum_start >> 1, ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, skb->csum_offset >> 1); EFX_OR_OWORD(*txd, *txd, csum); } static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd) { u16 vlan_tci = skb_vlan_tag_get(skb); efx_oword_t vlan; EFX_POPULATE_OWORD_2(vlan, ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1, ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci); EFX_OR_OWORD(*txd, *txd, vlan); } static void ef100_make_send_desc(struct efx_nic *efx, const struct sk_buff *skb, struct efx_tx_buffer *buffer, efx_oword_t *txd, unsigned int segment_count) { /* TX send descriptor */ EFX_POPULATE_OWORD_3(*txd, ESF_GZ_TX_SEND_NUM_SEGS, segment_count, ESF_GZ_TX_SEND_LEN, buffer->len, ESF_GZ_TX_SEND_ADDR, buffer->dma_addr); if (likely(efx->net_dev->features & NETIF_F_HW_CSUM)) ef100_set_tx_csum_partial(skb, buffer, txd); if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX && skb && skb_vlan_tag_present(skb)) ef100_set_tx_hw_vlan(skb, txd); } static void ef100_make_tso_desc(struct efx_nic *efx, const struct sk_buff *skb, struct efx_tx_buffer *buffer, efx_oword_t *txd, unsigned int segment_count) { bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; unsigned int len, ip_offset, tcp_offset, payload_segs; u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; unsigned int outer_ip_offset, outer_l4_offset; u16 vlan_tci = skb_vlan_tag_get(skb); u32 mss = skb_shinfo(skb)->gso_size; bool encap = skb->encapsulation; bool udp_encap = false; u16 vlan_enable = 0; struct tcphdr *tcp; bool outer_csum; u32 paylen; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP; if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX) vlan_enable = skb_vlan_tag_present(skb); len = skb->len - buffer->len; /* We use 1 for the TSO descriptor and 1 for the header */ payload_segs = segment_count - 2; if (encap) { outer_ip_offset = skb_network_offset(skb); outer_l4_offset = skb_transport_offset(skb); ip_offset = skb_inner_network_offset(skb); tcp_offset = skb_inner_transport_offset(skb); if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) udp_encap = true; } else { ip_offset = skb_network_offset(skb); tcp_offset = skb_transport_offset(skb); outer_ip_offset = outer_l4_offset = 0; } outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; /* subtract TCP payload length from inner checksum */ tcp = (void *)skb->data + tcp_offset; paylen = skb->len - tcp_offset; csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); EFX_POPULATE_OWORD_19(*txd, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO, ESF_GZ_TX_TSO_MSS, mss, ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1, ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs, ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1, ESF_GZ_TX_TSO_PAYLOAD_LEN, len, ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum, ESF_GZ_TX_TSO_CSO_INNER_L4, 1, ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1, ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1, ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid, ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1, ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1, ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1, ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial, ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial, ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid : ESE_GZ_TX_DESC_IP4_ID_NO_OP, ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable, ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci ); } static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, unsigned int segment_count, struct efx_rep *efv) { unsigned int old_write_count = tx_queue->write_count; unsigned int new_write_count = old_write_count; struct efx_tx_buffer *buffer; unsigned int next_desc_type; unsigned int write_ptr; efx_oword_t *txd; unsigned int nr_descs = tx_queue->insert_count - old_write_count; if (unlikely(nr_descs == 0)) return; if (segment_count) next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO; else next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND; if (unlikely(efv)) { /* Create TX override descriptor */ write_ptr = new_write_count & tx_queue->ptr_mask; txd = ef100_tx_desc(tx_queue, write_ptr); ++new_write_count; tx_queue->packet_write_count = new_write_count; EFX_POPULATE_OWORD_3(*txd, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX, ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport, ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1); nr_descs--; } /* if it's a raw write (such as XDP) then always SEND single frames */ if (!skb) nr_descs = 1; do { write_ptr = new_write_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[write_ptr]; txd = ef100_tx_desc(tx_queue, write_ptr); ++new_write_count; /* Create TX descriptor ring entry */ tx_queue->packet_write_count = new_write_count; switch (next_desc_type) { case ESE_GZ_TX_DESC_TYPE_SEND: ef100_make_send_desc(tx_queue->efx, skb, buffer, txd, nr_descs); break; case ESE_GZ_TX_DESC_TYPE_TSO: /* TX TSO descriptor */ WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3)); ef100_make_tso_desc(tx_queue->efx, skb, buffer, txd, nr_descs); break; default: /* TX segment descriptor */ EFX_POPULATE_OWORD_3(*txd, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG, ESF_GZ_TX_SEG_LEN, buffer->len, ESF_GZ_TX_SEG_ADDR, buffer->dma_addr); } /* if it's a raw write (such as XDP) then always SEND */ next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG : ESE_GZ_TX_DESC_TYPE_SEND; /* mark as an EFV buffer if applicable */ if (unlikely(efv)) buffer->flags |= EFX_TX_BUF_EFV; } while (new_write_count != tx_queue->insert_count); wmb(); /* Ensure descriptors are written before they are fetched */ tx_queue->write_count = new_write_count; /* The write_count above must be updated before reading * channel->holdoff_doorbell to avoid a race with the * completion path, so ensure these operations are not * re-ordered. This also flushes the update of write_count * back into the cache. */ smp_mb(); } void ef100_tx_write(struct efx_tx_queue *tx_queue) { ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL); ef100_tx_push_buffers(tx_queue); } int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) { unsigned int tx_done = EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC); unsigned int qlabel = EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL); struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, qlabel); unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & tx_queue->ptr_mask; return efx_xmit_done(tx_queue, tx_index); } /* Add a socket buffer to a TX queue * * You must hold netif_tx_lock() to call this function. * * Returns 0 on success, error code otherwise. In case of an error this * function will free the SKB. */ netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { return __ef100_enqueue_skb(tx_queue, skb, NULL); } int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, struct efx_rep *efv) { unsigned int old_insert_count = tx_queue->insert_count; struct efx_nic *efx = tx_queue->efx; bool xmit_more = netdev_xmit_more(); unsigned int fill_level; unsigned int segments; int rc; if (!tx_queue->buffer || !tx_queue->ptr_mask) { netif_stop_queue(efx->net_dev); dev_kfree_skb_any(skb); return -ENODEV; } segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; if (segments == 1) segments = 0; /* Don't use TSO/GSO for a single segment. */ if (segments && !ef100_tx_can_tso(tx_queue, skb)) { rc = efx_tx_tso_fallback(tx_queue, skb); tx_queue->tso_fallbacks++; if (rc) goto err; else return 0; } if (unlikely(efv)) { struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue); /* Drop representor packets if the queue is stopped. * We currently don't assert backoff to representors so this is * to make sure representor traffic can't starve the main * net device. * And, of course, if there are no TX descriptors left. */ if (netif_tx_queue_stopped(tx_queue->core_txq) || unlikely(efx_tx_buffer_in_use(buffer))) { atomic64_inc(&efv->stats.tx_errors); rc = -ENOSPC; goto err; } /* Also drop representor traffic if it could cause us to * stop the queue. If we assert backoff and we haven't * received traffic on the main net device recently then the * TX watchdog can go off erroneously. */ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); fill_level += efx_tx_max_skb_descs(efx); if (fill_level > efx->txq_stop_thresh) { struct efx_tx_queue *txq2; /* Refresh cached fill level and re-check */ efx_for_each_channel_tx_queue(txq2, tx_queue->channel) txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); fill_level += efx_tx_max_skb_descs(efx); if (fill_level > efx->txq_stop_thresh) { atomic64_inc(&efv->stats.tx_errors); rc = -ENOSPC; goto err; } } buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV; tx_queue->insert_count++; } /* Map for DMA and create descriptors */ rc = efx_tx_map_data(tx_queue, skb, segments); if (rc) goto err; ef100_tx_make_descriptors(tx_queue, skb, segments, efv); fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); if (fill_level > efx->txq_stop_thresh) { struct efx_tx_queue *txq2; /* Because of checks above, representor traffic should * not be able to stop the queue. */ WARN_ON(efv); netif_tx_stop_queue(tx_queue->core_txq); /* Re-read after a memory barrier in case we've raced with * the completion path. Otherwise there's a danger we'll never * restart the queue if all completions have just happened. */ smp_mb(); efx_for_each_channel_tx_queue(txq2, tx_queue->channel) txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); if (fill_level < efx->txq_stop_thresh) netif_tx_start_queue(tx_queue->core_txq); } tx_queue->xmit_pending = true; /* If xmit_more then we don't need to push the doorbell, unless there * are 256 descriptors already queued in which case we have to push to * ensure we never push more than 256 at once. * * Always push for representor traffic, and don't account it to parent * PF netdevice's BQL. */ if (unlikely(efv) || __netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) || tx_queue->write_count - tx_queue->notify_count > 255) ef100_tx_push_buffers(tx_queue); if (segments) { tx_queue->tso_bursts++; tx_queue->tso_packets += segments; tx_queue->tx_packets += segments; } else { tx_queue->tx_packets++; } return 0; err: efx_enqueue_unwind(tx_queue, old_insert_count); if (!IS_ERR_OR_NULL(skb)) dev_kfree_skb_any(skb); /* If we're not expecting another transmit and we had something to push * on this queue then we need to push here to get the previous packets * out. We only enter this branch from before the xmit_more handling * above, so xmit_pending still refers to the old state. */ if (tx_queue->xmit_pending && !xmit_more) ef100_tx_push_buffers(tx_queue); return rc; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173 * * Copyright (C)2003 USAGI/WIDE Project * * Author Mitsuru KANDA <[email protected]> */ /* * [Memo] * * Outbound: * The compression of IP datagram MUST be done before AH/ESP processing, * fragmentation, and the addition of Hop-by-Hop/Routing header. * * Inbound: * The decompression of IP datagram MUST be done after the reassembly, * AH/ESP processing. */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ipcomp.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <net/ip6_route.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/mutex.h> static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); __be32 spi; const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; struct ip_comp_hdr *ipcomph = (struct ip_comp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return 0; spi = htonl(ntohs(ipcomph->cpi)); x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); if (!x) return 0; if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0, sock_net_uid(net, NULL)); else ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); xfrm_state_put(x); return 0; } static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) { struct net *net = xs_net(x); struct xfrm_state *t = NULL; t = xfrm_state_alloc(net); if (!t) goto out; t->id.proto = IPPROTO_IPV6; t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr); if (!t->id.spi) goto error; memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); memcpy(&t->sel, &x->sel, sizeof(t->sel)); t->props.family = AF_INET6; t->props.mode = x->props.mode; memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); memcpy(&t->mark, &x->mark, sizeof(t->mark)); t->if_id = x->if_id; if (xfrm_init_state(t)) goto error; atomic_set(&t->tunnel_users, 1); out: return t; error: t->km.state = XFRM_STATE_DEAD; xfrm_state_put(t); t = NULL; goto out; } static int ipcomp6_tunnel_attach(struct xfrm_state *x) { struct net *net = xs_net(x); int err = 0; struct xfrm_state *t = NULL; __be32 spi; u32 mark = x->mark.m & x->mark.v; spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr); if (spi) t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr, spi, IPPROTO_IPV6, AF_INET6); if (!t) { t = ipcomp6_tunnel_create(x); if (!t) { err = -EINVAL; goto out; } xfrm_state_insert(t); xfrm_state_hold(t); } x->tunnel = t; atomic_inc(&t->tunnel_users); out: return err; } static int ipcomp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { int err = -EINVAL; x->props.header_len = 0; switch (x->props.mode) { case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: NL_SET_ERR_MSG(extack, "Unsupported XFRM mode for IPcomp"); goto out; } err = ipcomp_init_state(x, extack); if (err) goto out; if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp6_tunnel_attach(x); if (err) { NL_SET_ERR_MSG(extack, "Kernel error: failed to initialize the associated state"); goto out; } } err = 0; out: return err; } static int ipcomp6_rcv_cb(struct sk_buff *skb, int err) { return 0; } static const struct xfrm_type ipcomp6_type = { .owner = THIS_MODULE, .proto = IPPROTO_COMP, .init_state = ipcomp6_init_state, .destructor = ipcomp_destroy, .input = ipcomp_input, .output = ipcomp_output, }; static struct xfrm6_protocol ipcomp6_protocol = { .handler = xfrm6_rcv, .input_handler = xfrm_input, .cb_handler = ipcomp6_rcv_cb, .err_handler = ipcomp6_err, .priority = 0, }; static int __init ipcomp6_init(void) { if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm6_protocol_register(&ipcomp6_protocol, IPPROTO_COMP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit ipcomp6_fini(void) { if (xfrm6_protocol_deregister(&ipcomp6_protocol, IPPROTO_COMP) < 0) pr_info("%s: can't remove protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); } module_init(ipcomp6_init); module_exit(ipcomp6_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173"); MODULE_AUTHOR("Mitsuru KANDA <[email protected]>"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_COMP);
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Support for AltoBeam GB20600 (a.k.a DMB-TH) demodulator * ATBM8830, ATBM8831 * * Copyright (C) 2009 David T.L. Wong <[email protected]> */ #ifndef __ATBM8830_H__ #define __ATBM8830_H__ #include <linux/dvb/frontend.h> #include <linux/i2c.h> #define ATBM8830_PROD_8830 0 #define ATBM8830_PROD_8831 1 struct atbm8830_config { /* product type */ u8 prod; /* the demodulator's i2c address */ u8 demod_address; /* parallel or serial transport stream */ u8 serial_ts; /* transport stream clock output only when receiving valid stream */ u8 ts_clk_gated; /* Decoder sample TS data at rising edge of clock */ u8 ts_sampling_edge; /* Oscillator clock frequency */ u32 osc_clk_freq; /* in kHz */ /* IF frequency */ u32 if_freq; /* in kHz */ /* Swap I/Q for zero IF */ u8 zif_swap_iq; /* Tuner AGC settings */ u8 agc_min; u8 agc_max; u8 agc_hold_loop; }; #if IS_REACHABLE(CONFIG_DVB_ATBM8830) extern struct dvb_frontend *atbm8830_attach(const struct atbm8830_config *config, struct i2c_adapter *i2c); #else static inline struct dvb_frontend *atbm8830_attach(const struct atbm8830_config *config, struct i2c_adapter *i2c) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } #endif /* CONFIG_DVB_ATBM8830 */ #endif /* __ATBM8830_H__ */
// SPDX-License-Identifier: GPL-2.0 // // Copyright (C) 2019 Logic PD, Inc. / { keyboard { compatible = "gpio-keys"; button-0 { gpios = <&pcf8575 0 GPIO_ACTIVE_LOW>; label = "btn0"; linux,code = <KEY_WAKEUP>; debounce-interval = <10>; wakeup-source; }; button-1 { gpios = <&pcf8575 1 GPIO_ACTIVE_LOW>; label = "btn1"; linux,code = <KEY_WAKEUP>; debounce-interval = <10>; wakeup-source; }; button-2 { gpios = <&pcf8575 2 GPIO_ACTIVE_LOW>; label = "btn2"; linux,code = <KEY_WAKEUP>; debounce-interval = <10>; wakeup-source; }; button-3 { gpios = <&pcf8575 3 GPIO_ACTIVE_LOW>; label = "btn3"; linux,code = <KEY_WAKEUP>; debounce-interval = <10>; wakeup-source; }; }; leds { compatible = "gpio-leds"; gen-led0 { label = "led0"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_led0>; gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; linux,default-trigger = "cpu0"; }; gen-led1 { label = "led1"; gpios = <&pcf8575 8 GPIO_ACTIVE_HIGH>; }; gen-led2 { label = "led2"; gpios = <&pcf8575 9 GPIO_ACTIVE_HIGH>; linux,default-trigger = "heartbeat"; }; gen-led3 { label = "led3"; gpios = <&pcf8575 10 GPIO_ACTIVE_HIGH>; linux,default-trigger = "default-on"; }; }; reg_usb_otg_vbus: regulator-otg-vbus { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_usb_otg>; compatible = "regulator-fixed"; regulator-name = "usb_otg_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>; enable-active-high; }; reg_usb_h1_vbus: regulator-usb-h1-vbus { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_usb_h1_vbus>; compatible = "regulator-fixed"; regulator-name = "usb_h1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>; startup-delay-us = <70000>; enable-active-high; }; reg_3v3: regulator-3v3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_3v3>; compatible = "regulator-fixed"; regulator-name = "reg_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>; startup-delay-us = <70000>; enable-active-high; regulator-always-on; }; reg_enet: regulator-ethernet { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_enet>; compatible = "regulator-fixed"; regulator-name = "ethernet-supply"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>; startup-delay-us = <70000>; enable-active-high; vin-supply = <&sw4_reg>; }; reg_audio: regulator-audio { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_audio>; compatible = "regulator-fixed"; regulator-name = "3v3_aud"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&reg_3v3>; }; reg_hdmi: regulator-hdmi { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_hdmi>; compatible = "regulator-fixed"; regulator-name = "hdmi-supply"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&reg_3v3>; }; reg_uart3: regulator-uart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_uart3>; compatible = "regulator-fixed"; regulator-name = "uart3-supply"; gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; vin-supply = <&reg_3v3>; }; reg_1v8: regulator-1v8 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_1v8>; compatible = "regulator-fixed"; regulator-name = "1v8-supply"; gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; vin-supply = <&reg_3v3>; }; reg_pcie: regulator-pcie { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_pcie>; regulator-name = "mpcie_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; enable-active-high; }; reg_mipi: regulator-mipi { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_mipi>; regulator-name = "mipi_pwr_en"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>; enable-active-high; }; sound { compatible = "fsl,imx-audio-wm8962"; model = "wm8962-audio"; ssi-controller = <&ssi2>; audio-codec = <&wm8962>; audio-routing = "Headphone Jack", "HPOUTL", "Headphone Jack", "HPOUTR", "Ext Spk", "SPKOUTL", "Ext Spk", "SPKOUTR", "AMIC", "MICBIAS", "IN3R", "AMIC"; mux-int-port = <2>; mux-ext-port = <4>; }; }; &audmux { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_audmux>; status = "okay"; }; &ecspi1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ecspi1>; cs-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>; status = "disabled"; }; &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rgmii-id"; phy-reset-duration = <10>; phy-reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; phy-supply = <&reg_enet>; interrupt-parent = <&gpio1>; interrupts = <25 IRQ_TYPE_EDGE_FALLING>; status = "okay"; }; &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1>; clock-frequency = <400000>; status = "okay"; wm8962: audio-codec@1a { compatible = "wlf,wm8962"; reg = <0x1a>; clocks = <&clks IMX6QDL_CLK_CKO>; DCVDD-supply = <&reg_audio>; DBVDD-supply = <&reg_audio>; AVDD-supply = <&reg_audio>; CPVDD-supply = <&reg_audio>; MICVDD-supply = <&reg_audio>; PLLVDD-supply = <&reg_audio>; SPKVDD1-supply = <&reg_audio>; SPKVDD2-supply = <&reg_audio>; gpio-cfg = < 0x0000 /* 0:Default */ 0x0000 /* 1:Default */ 0x0000 /* 2:FN_DMICCLK */ 0x0000 /* 3:Default */ 0x0000 /* 4:FN_DMICCDAT */ 0x0000 /* 5:Default */ >; }; }; &i2c3 { ov5640: camera@10 { compatible = "ovti,ov5640"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ov5640>; reg = <0x10>; clocks = <&clks IMX6QDL_CLK_CKO>; clock-names = "xclk"; DOVDD-supply = <&reg_mipi>; AVDD-supply = <&reg_mipi>; DVDD-supply = <&reg_mipi>; reset-gpios = <&gpio3 26 GPIO_ACTIVE_LOW>; powerdown-gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>; port { ov5640_to_mipi_csi2: endpoint { remote-endpoint = <&mipi_csi2_in>; clock-lanes = <0>; data-lanes = <1 2>; }; }; }; pcf8575: gpio@20 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcf8574>; compatible = "nxp,pcf8575"; reg = <0x20>; interrupt-parent = <&gpio6>; interrupts = <31 IRQ_TYPE_EDGE_FALLING>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; lines-initial-states = <0x0710>; wakeup-source; }; }; &ipu1_csi1_from_mipi_vc1 { clock-lanes = <0>; data-lanes = <1 2>; }; &mipi_csi { status = "okay"; port@0 { reg = <0>; mipi_csi2_in: endpoint { remote-endpoint = <&ov5640_to_mipi_csi2>; clock-lanes = <0>; data-lanes = <1 2>; }; }; }; &pcie { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcie>; reset-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>; vpcie-supply = <&reg_pcie>; status = "okay"; }; &pwm3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pwm3>; }; &snvs_pwrkey { status = "okay"; }; &ssi2 { status = "okay"; }; &uart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3>; status = "okay"; }; &usbh1 { vbus-supply = <&reg_usb_h1_vbus>; status = "okay"; }; &usbotg { vbus-supply = <&reg_usb_otg_vbus>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbotg>; disable-over-current; dr_mode = "otg"; status = "okay"; }; &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; pinctrl-1 = <&pinctrl_usdhc2_100mhz>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>; vmmc-supply = <&reg_3v3>; no-1-8-v; keep-power-in-suspend; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; &iomuxc { pinctrl_audmux: audmuxgrp { fsl,pins = < MX6QDL_PAD_DISP0_DAT20__AUD4_TXC 0x130b0 MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x110b0 MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0 MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0 >; }; pinctrl_ecspi1: ecspi1grp { fsl,pins = < MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1 MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1 MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1 MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0 >; }; pinctrl_enet: enetgrp { fsl,pins = < MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030 MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030 MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030 MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030 MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030 MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0 MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030 MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030 MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030 MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030 MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030 MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* ENET_INT */ MX6QDL_PAD_ENET_RX_ER__GPIO1_IO24 0x1b0b0 /* ETHR_nRST */ >; }; pinctrl_i2c1: i2c1grp { fsl,pins = < MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1 MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1 >; }; pinctrl_led0: led0grp { fsl,pins = < MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x1b0b0 >; }; pinctrl_ov5640: ov5640grp { fsl,pins = < MX6QDL_PAD_EIM_D26__GPIO3_IO26 0x1b0b1 MX6QDL_PAD_EIM_D27__GPIO3_IO27 0x1b0b1 >; }; pinctrl_pcf8574: pcf8575grp { fsl,pins = < MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x1b0b0 >; }; pinctrl_pcie: pciegrp { fsl,pins = < MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0 MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0 >; }; pinctrl_pwm3: pwm3grp { fsl,pins = < MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1 >; }; pinctrl_reg_1v8: reg1v8grp { fsl,pins = < MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x1b0b0 >; }; pinctrl_reg_3v3: reg3v3grp { fsl,pins = < MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b0 >; }; pinctrl_reg_audio: reg-audiogrp { fsl,pins = < MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0 >; }; pinctrl_reg_enet: reg-enetgrp { fsl,pins = < MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0 >; }; pinctrl_reg_hdmi: reg-hdmigrp { fsl,pins = < MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x1b0b0 >; }; pinctrl_reg_mipi: reg-mipigrp { fsl,pins = <MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b0b1>; }; pinctrl_reg_pcie: reg-pciegrp { fsl,pins = < MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0 >; }; pinctrl_reg_uart3: reguart3grp { fsl,pins = < MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0 >; }; pinctrl_reg_usb_h1_vbus: usbh1grp { fsl,pins = < MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0 >; }; pinctrl_reg_usb_otg: reg-usb-otggrp { fsl,pins = < MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0 >; }; pinctrl_uart3: uart3grp { fsl,pins = < MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1 MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1 >; }; pinctrl_usbotg: usbotggrp { fsl,pins = < MX6QDL_PAD_GPIO_1__USB_OTG_ID 0xd17059 >; }; pinctrl_usdhc2: usdhc2grp { fsl,pins = < MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17069 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10069 MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17069 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17069 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17069 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17069 >; }; pinctrl_usdhc2_100mhz: h100-usdhc2-100mhzgrp { fsl,pins = < MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9 MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9 >; }; pinctrl_usdhc2_200mhz: h100-usdhc2-200mhzgrp { fsl,pins = < MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9 MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170f9 >; }; };
/* * Copyright (c) 2017, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_IB_CMD_H #define MLX5_IB_CMD_H #include "mlx5_ib.h" #include <linux/kernel.h> #include <linux/mlx5/driver.h> int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev); int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, void *out); int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid); int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid); int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn, u16 uid); void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn, u16 uid); int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn, u16 uid); int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn, u16 uid); int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid); int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid); int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct, char *out_vuid); #endif /* MLX5_IB_CMD_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2011 Florian Tobias Schandinat <[email protected]> */ /* * driver for Chrontel CH7301 DVI Transmitter */ #include <linux/slab.h> #include "via_aux.h" static const char *name = "CH7301 DVI Transmitter"; static void probe(struct via_aux_bus *bus, u8 addr) { struct via_aux_drv drv = { .bus = bus, .addr = addr, .name = name}; u8 tmp; if (!via_aux_read(&drv, 0x4B, &tmp, 1) || tmp != 0x17) return; printk(KERN_INFO "viafb: Found %s at address 0x%x\n", name, addr); via_aux_add(&drv); } void via_aux_ch7301_probe(struct via_aux_bus *bus) { probe(bus, 0x75); probe(bus, 0x76); }
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2023 Intel Corporation */ #ifndef _XE_GT_SRIOV_PRINTK_H_ #define _XE_GT_SRIOV_PRINTK_H_ #include "xe_gt_printk.h" #include "xe_sriov_printk.h" #define __xe_gt_sriov_printk(gt, _level, fmt, ...) \ xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__) #define xe_gt_sriov_err(_gt, _fmt, ...) \ __xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__) #define xe_gt_sriov_notice(_gt, _fmt, ...) \ __xe_gt_sriov_printk(_gt, notice, _fmt, ##__VA_ARGS__) #define xe_gt_sriov_info(_gt, _fmt, ...) \ __xe_gt_sriov_printk(_gt, info, _fmt, ##__VA_ARGS__) #define xe_gt_sriov_dbg(_gt, _fmt, ...) \ __xe_gt_sriov_printk(_gt, dbg, _fmt, ##__VA_ARGS__) /* for low level noisy debug messages */ #ifdef CONFIG_DRM_XE_DEBUG_SRIOV #define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) xe_gt_sriov_dbg(_gt, _fmt, ##__VA_ARGS__) #else #define xe_gt_sriov_dbg_verbose(_gt, _fmt, ...) typecheck(struct xe_gt *, (_gt)) #endif #endif
/* * Copyright 2009 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <drm/display/drm_dp_helper.h> #include "nouveau_drv.h" #include "nouveau_connector.h" #include "nouveau_encoder.h" #include "nouveau_crtc.h" #include <nvif/if0011.h> MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)"); static int nouveau_mst = 1; module_param_named(mst, nouveau_mst, int, 0400); static bool nouveau_dp_has_sink_count(struct drm_connector *connector, struct nouveau_encoder *outp) { return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc); } static bool nouveau_dp_probe_lttpr(struct nouveau_encoder *outp) { u8 rev, size = sizeof(rev); int ret; ret = nvif_outp_dp_aux_xfer(&outp->outp, DP_AUX_NATIVE_READ, &size, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, &rev); if (ret || size < sizeof(rev) || rev < 0x14) return false; return true; } static enum drm_connector_status nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector, struct nouveau_encoder *outp) { struct drm_connector *connector = &nv_connector->base; struct drm_dp_aux *aux = &nv_connector->aux; struct nv50_mstm *mstm = NULL; enum drm_connector_status status = connector_status_disconnected; int ret; u8 *dpcd = outp->dp.dpcd; outp->dp.lttpr.nr = 0; outp->dp.rate_nr = 0; outp->dp.link_nr = 0; outp->dp.link_bw = 0; if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && nouveau_dp_probe_lttpr(outp) && !drm_dp_read_dpcd_caps(aux, dpcd) && !drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) { int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps); if (nr) { drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, DP_PHY_REPEATER_MODE_TRANSPARENT); if (nr > 0) { ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, DP_PHY_REPEATER_MODE_NON_TRANSPARENT); if (ret != 1) { drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE, DP_PHY_REPEATER_MODE_TRANSPARENT); } else { outp->dp.lttpr.nr = nr; } } } } ret = drm_dp_read_dpcd_caps(aux, dpcd); if (ret < 0) goto out; outp->dp.link_nr = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; if (outp->dcb->dpconf.link_nr < outp->dp.link_nr) outp->dp.link_nr = outp->dcb->dpconf.link_nr; if (outp->dp.lttpr.nr) { int links = drm_dp_lttpr_max_lane_count(outp->dp.lttpr.caps); if (links && links < outp->dp.link_nr) outp->dp.link_nr = links; } if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) { __le16 rates[DP_MAX_SUPPORTED_RATES]; ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, rates, sizeof(rates)); if (ret == sizeof(rates)) { for (int i = 0; i < ARRAY_SIZE(rates); i++) { u32 rate = (le16_to_cpu(rates[i]) * 200) / 10; int j; if (!rate) break; for (j = 0; j < outp->dp.rate_nr; j++) { if (rate > outp->dp.rate[j].rate) { for (int k = outp->dp.rate_nr; k > j; k--) outp->dp.rate[k] = outp->dp.rate[k - 1]; break; } } outp->dp.rate[j].dpcd = i; outp->dp.rate[j].rate = rate; outp->dp.rate_nr++; } } } if (!outp->dp.rate_nr) { const u32 rates[] = { 810000, 540000, 270000, 162000 }; u32 max_rate = dpcd[DP_MAX_LINK_RATE] * 27000; if (outp->dp.lttpr.nr) { int rate = drm_dp_lttpr_max_link_rate(outp->dp.lttpr.caps); if (rate && rate < max_rate) max_rate = rate; } max_rate = min_t(int, max_rate, outp->dcb->dpconf.link_bw); for (int i = 0; i < ARRAY_SIZE(rates); i++) { if (rates[i] <= max_rate) { outp->dp.rate[outp->dp.rate_nr].dpcd = -1; outp->dp.rate[outp->dp.rate_nr].rate = rates[i]; outp->dp.rate_nr++; } } if (WARN_ON(!outp->dp.rate_nr)) goto out; } ret = nvif_outp_dp_rates(&outp->outp, outp->dp.rate, outp->dp.rate_nr); if (ret) goto out; for (int i = 0; i < outp->dp.rate_nr; i++) { u32 link_bw = outp->dp.rate[i].rate; if (link_bw > outp->dp.link_bw) outp->dp.link_bw = link_bw; } ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd)); if (ret < 0) goto out; if (nouveau_mst) { mstm = outp->dp.mstm; if (mstm) mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd) == DRM_DP_MST; } if (nouveau_dp_has_sink_count(connector, outp)) { ret = drm_dp_read_sink_count(aux); if (ret < 0) goto out; outp->dp.sink_count = ret; /* * Dongle connected, but no display. Don't bother reading * downstream port info */ if (!outp->dp.sink_count) return connector_status_disconnected; } ret = drm_dp_read_downstream_info(aux, dpcd, outp->dp.downstream_ports); if (ret < 0) goto out; status = connector_status_connected; out: if (status != connector_status_connected) { /* Clear any cached info */ outp->dp.sink_count = 0; } return status; } int nouveau_dp_detect(struct nouveau_connector *nv_connector, struct nouveau_encoder *nv_encoder) { struct drm_device *dev = nv_encoder->base.base.dev; struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector = &nv_connector->base; struct nv50_mstm *mstm = nv_encoder->dp.mstm; enum drm_connector_status status; u8 *dpcd = nv_encoder->dp.dpcd; int ret = NOUVEAU_DP_NONE, hpd; /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we * haven't probed them once before. */ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if (connector->status == connector_status_connected) return NOUVEAU_DP_SST; else if (connector->status == connector_status_disconnected) return NOUVEAU_DP_NONE; } // Ensure that the aux bus is enabled for probing drm_dp_dpcd_set_powered(&nv_connector->aux, true); mutex_lock(&nv_encoder->dp.hpd_irq_lock); if (mstm) { /* If we're not ready to handle MST state changes yet, just * report the last status of the connector. We'll reprobe it * once we've resumed. */ if (mstm->suspended) { if (mstm->is_mst) ret = NOUVEAU_DP_MST; else if (connector->status == connector_status_connected) ret = NOUVEAU_DP_SST; goto out; } } hpd = nvif_outp_detect(&nv_encoder->outp); if (hpd == NOT_PRESENT) { nvif_outp_dp_aux_pwr(&nv_encoder->outp, false); goto out; } nvif_outp_dp_aux_pwr(&nv_encoder->outp, true); status = nouveau_dp_probe_dpcd(nv_connector, nv_encoder); if (status == connector_status_disconnected) { nvif_outp_dp_aux_pwr(&nv_encoder->outp, false); goto out; } /* If we're in MST mode, we're done here */ if (mstm && mstm->can_mst && mstm->is_mst) { ret = NOUVEAU_DP_MST; goto out; } NV_DEBUG(drm, "sink dpcd version: 0x%02x\n", dpcd[DP_DPCD_REV]); for (int i = 0; i < nv_encoder->dp.rate_nr; i++) NV_DEBUG(drm, "sink rate %d: %d\n", i, nv_encoder->dp.rate[i].rate); NV_DEBUG(drm, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr, nv_encoder->dcb->dpconf.link_bw); NV_DEBUG(drm, "maximum: %dx%d\n", nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); if (mstm && mstm->can_mst) { ret = nv50_mstm_detect(nv_encoder); if (ret == 1) { ret = NOUVEAU_DP_MST; goto out; } else if (ret != 0) { nvif_outp_dp_aux_pwr(&nv_encoder->outp, false); goto out; } } ret = NOUVEAU_DP_SST; out: if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST) nv50_mstm_remove(mstm); /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected, * and since we don't really have a usecase for that anyway - just disable the aux bus here * if we've decided the connector is disconnected */ if (ret == NOUVEAU_DP_NONE) drm_dp_dpcd_set_powered(&nv_connector->aux, false); mutex_unlock(&nv_encoder->dp.hpd_irq_lock); return ret; } void nouveau_dp_power_down(struct nouveau_encoder *outp) { struct drm_dp_aux *aux = &outp->conn->aux; int ret; u8 pwr; mutex_lock(&outp->dp.hpd_irq_lock); ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); if (ret == 1) { pwr &= ~DP_SET_POWER_MASK; pwr |= DP_SET_POWER_D3; drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr); } outp->dp.lt.nr = 0; mutex_unlock(&outp->dp.hpd_irq_lock); } static bool nouveau_dp_train_link(struct nouveau_encoder *outp, bool retrain) { struct drm_dp_aux *aux = &outp->conn->aux; bool post_lt = false; int ret, retries = 0; if ( (outp->dp.dpcd[DP_MAX_LANE_COUNT] & 0x20) && !(outp->dp.dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED)) post_lt = true; retry: ret = nvif_outp_dp_train(&outp->outp, outp->dp.dpcd, outp->dp.lttpr.nr, outp->dp.lt.nr, outp->dp.lt.bw, outp->dp.lt.mst, post_lt, retrain); if (ret) return false; if (post_lt) { u8 stat[DP_LINK_STATUS_SIZE]; u8 prev[2]; u8 time = 0, adjusts = 0, tmp; ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat); if (ret) return false; for (;;) { if (!drm_dp_channel_eq_ok(stat, outp->dp.lt.nr)) { ret = 1; break; } if (!(stat[2] & 0x02)) break; msleep(5); time += 5; memcpy(prev, &stat[4], sizeof(prev)); ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat); if (ret) break; if (!memcmp(prev, &stat[4], sizeof(prev))) { if (time > 200) break; } else { u8 pe[4], vs[4]; if (adjusts++ == 6) break; for (int i = 0; i < outp->dp.lt.nr; i++) { pe[i] = drm_dp_get_adjust_request_pre_emphasis(stat, i) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; vs[i] = drm_dp_get_adjust_request_voltage(stat, i) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; } ret = nvif_outp_dp_drive(&outp->outp, outp->dp.lt.nr, pe, vs); if (ret) break; time = 0; } } if (drm_dp_dpcd_readb(aux, DP_LANE_COUNT_SET, &tmp) == 1) { tmp &= ~0x20; drm_dp_dpcd_writeb(aux, DP_LANE_COUNT_SET, tmp); } } if (ret == 1 && retries++ < 3) goto retry; return ret == 0; } bool nouveau_dp_train(struct nouveau_encoder *outp, bool mst, u32 khz, u8 bpc) { struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev); struct drm_dp_aux *aux = &outp->conn->aux; u32 min_rate; u8 pwr; bool ret = true; if (mst) min_rate = outp->dp.link_nr * outp->dp.rate[0].rate; else min_rate = DIV_ROUND_UP(khz * bpc * 3, 8); NV_DEBUG(drm, "%s link training (mst:%d min_rate:%d)\n", outp->base.base.name, mst, min_rate); mutex_lock(&outp->dp.hpd_irq_lock); if (drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr) == 1) { if ((pwr & DP_SET_POWER_MASK) != DP_SET_POWER_D0) { pwr &= ~DP_SET_POWER_MASK; pwr |= DP_SET_POWER_D0; drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr); } } for (int nr = outp->dp.link_nr; nr; nr >>= 1) { for (int rate = 0; rate < outp->dp.rate_nr; rate++) { if (outp->dp.rate[rate].rate * nr >= min_rate) { outp->dp.lt.nr = nr; outp->dp.lt.bw = outp->dp.rate[rate].rate; outp->dp.lt.mst = mst; if (nouveau_dp_train_link(outp, false)) goto done; } } } ret = false; done: mutex_unlock(&outp->dp.hpd_irq_lock); return ret; } static bool nouveau_dp_link_check_locked(struct nouveau_encoder *outp) { u8 link_status[DP_LINK_STATUS_SIZE]; if (!outp || !outp->dp.lt.nr) return true; if (drm_dp_dpcd_read_phy_link_status(&outp->conn->aux, DP_PHY_DPRX, link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, outp->dp.lt.nr)) return true; return nouveau_dp_train_link(outp, true); } bool nouveau_dp_link_check(struct nouveau_connector *nv_connector) { struct nouveau_encoder *outp = nv_connector->dp_encoder; bool link_ok = true; if (outp) { mutex_lock(&outp->dp.hpd_irq_lock); if (outp->dp.lt.nr) link_ok = nouveau_dp_link_check_locked(outp); mutex_unlock(&outp->dp.hpd_irq_lock); } return link_ok; } void nouveau_dp_irq(struct work_struct *work) { struct nouveau_connector *nv_connector = container_of(work, typeof(*nv_connector), irq_work); struct drm_connector *connector = &nv_connector->base; struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP); struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev); struct nv50_mstm *mstm; u64 hpd = 0; int ret; if (!outp) return; mstm = outp->dp.mstm; NV_DEBUG(drm, "service %s\n", connector->name); mutex_lock(&outp->dp.hpd_irq_lock); if (mstm && mstm->is_mst) { if (!nv50_mstm_service(drm, nv_connector, mstm)) hpd |= NVIF_CONN_EVENT_V0_UNPLUG; } else { drm_dp_cec_irq(&nv_connector->aux); if (nouveau_dp_has_sink_count(connector, outp)) { ret = drm_dp_read_sink_count(&nv_connector->aux); if (ret != outp->dp.sink_count) hpd |= NVIF_CONN_EVENT_V0_PLUG; if (ret >= 0) outp->dp.sink_count = ret; } } mutex_unlock(&outp->dp.hpd_irq_lock); nouveau_connector_hpd(nv_connector, NVIF_CONN_EVENT_V0_IRQ | hpd); } /* TODO: * - Validate against the DP caps advertised by the GPU (we don't check these * yet) */ enum drm_mode_status nv50_dp_mode_valid(struct nouveau_encoder *outp, const struct drm_display_mode *mode, unsigned *out_clock) { const unsigned int min_clock = 25000; unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock; /* Check with the minmum bpc always, so we can advertise better modes. * In particlar not doing this causes modes to be dropped on HDR * displays as we might check with a bpc of 16 even. */ const u8 bpp = 6 * 3; if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace) return MODE_NO_INTERLACE; if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) clock *= 2; max_rate = outp->dp.link_nr * outp->dp.link_bw; mode_rate = DIV_ROUND_UP(clock * bpp, 8); if (mode_rate > max_rate) return MODE_CLOCK_HIGH; ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports); if (ds_max_dotclock && clock > ds_max_dotclock) return MODE_CLOCK_HIGH; if (clock < min_clock) return MODE_CLOCK_LOW; if (out_clock) *out_clock = clock; return MODE_OK; }
// SPDX-License-Identifier: GPL-2.0 /* * arch/sh/kernel/machvec.c * * The SuperH machine vector setup handlers, yanked from setup.c * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2002 - 2007 Paul Mundt */ #include <linux/init.h> #include <linux/string.h> #include <asm/machvec.h> #include <asm/sections.h> #include <asm/addrspace.h> #include <asm/setup.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/processor.h> #define MV_NAME_SIZE 32 #define for_each_mv(mv) \ for ((mv) = (struct sh_machine_vector *)__machvec_start; \ (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ (mv)++) static struct sh_machine_vector * __init get_mv_byname(const char *name) { struct sh_machine_vector *mv; for_each_mv(mv) if (strcasecmp(name, mv->mv_name) == 0) return mv; return NULL; } static unsigned int __initdata machvec_selected; static int __init early_parse_mv(char *from) { char mv_name[MV_NAME_SIZE] = ""; char *mv_end; char *mv_comma; int mv_len; struct sh_machine_vector *mvp; mv_end = strchr(from, ' '); if (mv_end == NULL) mv_end = from + strlen(from); mv_comma = strchr(from, ','); mv_len = mv_end - from; if (mv_len > (MV_NAME_SIZE-1)) mv_len = MV_NAME_SIZE-1; memcpy(mv_name, from, mv_len); mv_name[mv_len] = '\0'; from = mv_end; machvec_selected = 1; /* Boot with the generic vector */ if (strcmp(mv_name, "generic") == 0) return 0; mvp = get_mv_byname(mv_name); if (unlikely(!mvp)) { pr_info("Available vectors:\n\n\t'%s', ", sh_mv.mv_name); for_each_mv(mvp) pr_cont("'%s', ", mvp->mv_name); pr_cont("\n\n"); panic("Failed to select machvec '%s' -- halting.\n", mv_name); } else sh_mv = *mvp; return 0; } early_param("sh_mv", early_parse_mv); void __init sh_mv_setup(void) { /* * Only overload the machvec if one hasn't been selected on * the command line with sh_mv= */ if (!machvec_selected) { unsigned long machvec_size; machvec_size = ((unsigned long)__machvec_end - (unsigned long)__machvec_start); /* * Sanity check for machvec section alignment. Ensure * __initmv hasn't been misused. */ if (machvec_size % sizeof(struct sh_machine_vector)) panic("machvec misaligned, invalid __initmv use?"); /* * If the machvec hasn't been preselected, use the first * vector (usually the only one) from .machvec.init. */ if (machvec_size >= sizeof(struct sh_machine_vector)) sh_mv = *(struct sh_machine_vector *)__machvec_start; } pr_notice("Booting machvec: %s\n", get_system_type()); /* * Manually walk the vec, fill in anything that the board hasn't yet * by hand, wrapping to the generic implementation. */ #define mv_set(elem) do { \ if (!sh_mv.mv_##elem) \ sh_mv.mv_##elem = generic_##elem; \ } while (0) mv_set(irq_demux); mv_set(mode_pins); mv_set(mem_init); }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 Rafał Miłecki <[email protected]> */ #include <linux/bcm47xx_nvram.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/io.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/nvmem-provider.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #define NVRAM_MAGIC "FLSH" /** * struct brcm_nvram - driver state internal struct * * @dev: NVMEM device pointer * @nvmem_size: Size of the whole space available for NVRAM * @data: NVRAM data copy stored to avoid poking underlying flash controller * @data_len: NVRAM data size * @padding_byte: Padding value used to fill remaining space * @cells: Array of discovered NVMEM cells * @ncells: Number of elements in cells */ struct brcm_nvram { struct device *dev; size_t nvmem_size; uint8_t *data; size_t data_len; uint8_t padding_byte; struct nvmem_cell_info *cells; int ncells; }; struct brcm_nvram_header { char magic[4]; __le32 len; __le32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ __le32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ __le32 config_ncdl; /* ncdl values for memc */ }; static int brcm_nvram_read(void *context, unsigned int offset, void *val, size_t bytes) { struct brcm_nvram *priv = context; size_t to_copy; if (offset + bytes > priv->data_len) to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0); else to_copy = bytes; memcpy(val, priv->data + offset, to_copy); memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy); return 0; } static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev) { struct resource *res; void __iomem *base; base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); priv->nvmem_size = resource_size(res); priv->padding_byte = readb(base + priv->nvmem_size - 1); for (priv->data_len = priv->nvmem_size; priv->data_len; priv->data_len--) { if (readb(base + priv->data_len - 1) != priv->padding_byte) break; } WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len); priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL); if (!priv->data) return -ENOMEM; memcpy_fromio(priv->data, base, priv->data_len); bcm47xx_nvram_init_from_iomem(base, priv->data_len); return 0; } static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, int index, unsigned int offset, void *buf, size_t bytes) { u8 mac[ETH_ALEN]; if (bytes != 3 * ETH_ALEN - 1) return -EINVAL; if (!mac_pton(buf, mac)) return -EINVAL; if (index) eth_addr_add(mac, index); ether_addr_copy(buf, mac); return 0; } static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data, size_t len) { struct device *dev = priv->dev; char *var, *value; uint8_t tmp; int idx; int err = 0; tmp = priv->data[len - 1]; priv->data[len - 1] = '\0'; priv->ncells = 0; for (var = data + sizeof(struct brcm_nvram_header); var < (char *)data + len && *var; var += strlen(var) + 1) { priv->ncells++; } priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); if (!priv->cells) { err = -ENOMEM; goto out; } for (var = data + sizeof(struct brcm_nvram_header), idx = 0; var < (char *)data + len && *var; var = value + strlen(value) + 1, idx++) { char *eq, *name; eq = strchr(var, '='); if (!eq) break; *eq = '\0'; name = devm_kstrdup(dev, var, GFP_KERNEL); *eq = '='; if (!name) { err = -ENOMEM; goto out; } value = eq + 1; priv->cells[idx].name = name; priv->cells[idx].offset = value - (char *)data; priv->cells[idx].bytes = strlen(value); priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); if (!strcmp(name, "et0macaddr") || !strcmp(name, "et1macaddr") || !strcmp(name, "et2macaddr")) { priv->cells[idx].raw_len = strlen(value); priv->cells[idx].bytes = ETH_ALEN; priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr; } } out: priv->data[len - 1] = tmp; return err; } static int brcm_nvram_parse(struct brcm_nvram *priv) { struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data; struct device *dev = priv->dev; size_t len; int err; if (memcmp(header->magic, NVRAM_MAGIC, 4)) { dev_err(dev, "Invalid NVRAM magic\n"); return -EINVAL; } len = le32_to_cpu(header->len); if (len > priv->nvmem_size) { dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len, priv->nvmem_size); return -EINVAL; } err = brcm_nvram_add_cells(priv, priv->data, len); if (err) dev_err(dev, "Failed to add cells: %d\n", err); return 0; } static int brcm_nvram_probe(struct platform_device *pdev) { struct nvmem_config config = { .name = "brcm-nvram", .reg_read = brcm_nvram_read, }; struct device *dev = &pdev->dev; struct brcm_nvram *priv; int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; err = brcm_nvram_copy_data(priv, pdev); if (err) return err; err = brcm_nvram_parse(priv); if (err) return err; config.dev = dev; config.cells = priv->cells; config.ncells = priv->ncells; config.priv = priv; config.size = priv->nvmem_size; return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); } static const struct of_device_id brcm_nvram_of_match_table[] = { { .compatible = "brcm,nvram", }, {}, }; static struct platform_driver brcm_nvram_driver = { .probe = brcm_nvram_probe, .driver = { .name = "brcm_nvram", .of_match_table = brcm_nvram_of_match_table, }, }; static int __init brcm_nvram_init(void) { return platform_driver_register(&brcm_nvram_driver); } subsys_initcall_sync(brcm_nvram_init); MODULE_AUTHOR("Rafał Miłecki"); MODULE_DESCRIPTION("Broadcom I/O-mapped NVRAM support driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(of, brcm_nvram_of_match_table);
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) * * Copyright (C) 2022 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ #define __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ #include <dt-bindings/clock/renesas-cpg-mssr.h> /* R9A07G054 CPG Core Clocks */ #define R9A07G054_CLK_I 0 #define R9A07G054_CLK_I2 1 #define R9A07G054_CLK_G 2 #define R9A07G054_CLK_S0 3 #define R9A07G054_CLK_S1 4 #define R9A07G054_CLK_SPI0 5 #define R9A07G054_CLK_SPI1 6 #define R9A07G054_CLK_SD0 7 #define R9A07G054_CLK_SD1 8 #define R9A07G054_CLK_M0 9 #define R9A07G054_CLK_M1 10 #define R9A07G054_CLK_M2 11 #define R9A07G054_CLK_M3 12 #define R9A07G054_CLK_M4 13 #define R9A07G054_CLK_HP 14 #define R9A07G054_CLK_TSU 15 #define R9A07G054_CLK_ZT 16 #define R9A07G054_CLK_P0 17 #define R9A07G054_CLK_P1 18 #define R9A07G054_CLK_P2 19 #define R9A07G054_CLK_AT 20 #define R9A07G054_OSCCLK 21 #define R9A07G054_CLK_P0_DIV2 22 #define R9A07G054_CLK_DRP_M 23 #define R9A07G054_CLK_DRP_D 24 #define R9A07G054_CLK_DRP_A 25 /* R9A07G054 Module Clocks */ #define R9A07G054_CA55_SCLK 0 #define R9A07G054_CA55_PCLK 1 #define R9A07G054_CA55_ATCLK 2 #define R9A07G054_CA55_GICCLK 3 #define R9A07G054_CA55_PERICLK 4 #define R9A07G054_CA55_ACLK 5 #define R9A07G054_CA55_TSCLK 6 #define R9A07G054_GIC600_GICCLK 7 #define R9A07G054_IA55_CLK 8 #define R9A07G054_IA55_PCLK 9 #define R9A07G054_MHU_PCLK 10 #define R9A07G054_SYC_CNT_CLK 11 #define R9A07G054_DMAC_ACLK 12 #define R9A07G054_DMAC_PCLK 13 #define R9A07G054_OSTM0_PCLK 14 #define R9A07G054_OSTM1_PCLK 15 #define R9A07G054_OSTM2_PCLK 16 #define R9A07G054_MTU_X_MCK_MTU3 17 #define R9A07G054_POE3_CLKM_POE 18 #define R9A07G054_GPT_PCLK 19 #define R9A07G054_POEG_A_CLKP 20 #define R9A07G054_POEG_B_CLKP 21 #define R9A07G054_POEG_C_CLKP 22 #define R9A07G054_POEG_D_CLKP 23 #define R9A07G054_WDT0_PCLK 24 #define R9A07G054_WDT0_CLK 25 #define R9A07G054_WDT1_PCLK 26 #define R9A07G054_WDT1_CLK 27 #define R9A07G054_WDT2_PCLK 28 #define R9A07G054_WDT2_CLK 29 #define R9A07G054_SPI_CLK2 30 #define R9A07G054_SPI_CLK 31 #define R9A07G054_SDHI0_IMCLK 32 #define R9A07G054_SDHI0_IMCLK2 33 #define R9A07G054_SDHI0_CLK_HS 34 #define R9A07G054_SDHI0_ACLK 35 #define R9A07G054_SDHI1_IMCLK 36 #define R9A07G054_SDHI1_IMCLK2 37 #define R9A07G054_SDHI1_CLK_HS 38 #define R9A07G054_SDHI1_ACLK 39 #define R9A07G054_GPU_CLK 40 #define R9A07G054_GPU_AXI_CLK 41 #define R9A07G054_GPU_ACE_CLK 42 #define R9A07G054_ISU_ACLK 43 #define R9A07G054_ISU_PCLK 44 #define R9A07G054_H264_CLK_A 45 #define R9A07G054_H264_CLK_P 46 #define R9A07G054_CRU_SYSCLK 47 #define R9A07G054_CRU_VCLK 48 #define R9A07G054_CRU_PCLK 49 #define R9A07G054_CRU_ACLK 50 #define R9A07G054_MIPI_DSI_PLLCLK 51 #define R9A07G054_MIPI_DSI_SYSCLK 52 #define R9A07G054_MIPI_DSI_ACLK 53 #define R9A07G054_MIPI_DSI_PCLK 54 #define R9A07G054_MIPI_DSI_VCLK 55 #define R9A07G054_MIPI_DSI_LPCLK 56 #define R9A07G054_LCDC_CLK_A 57 #define R9A07G054_LCDC_CLK_P 58 #define R9A07G054_LCDC_CLK_D 59 #define R9A07G054_SSI0_PCLK2 60 #define R9A07G054_SSI0_PCLK_SFR 61 #define R9A07G054_SSI1_PCLK2 62 #define R9A07G054_SSI1_PCLK_SFR 63 #define R9A07G054_SSI2_PCLK2 64 #define R9A07G054_SSI2_PCLK_SFR 65 #define R9A07G054_SSI3_PCLK2 66 #define R9A07G054_SSI3_PCLK_SFR 67 #define R9A07G054_SRC_CLKP 68 #define R9A07G054_USB_U2H0_HCLK 69 #define R9A07G054_USB_U2H1_HCLK 70 #define R9A07G054_USB_U2P_EXR_CPUCLK 71 #define R9A07G054_USB_PCLK 72 #define R9A07G054_ETH0_CLK_AXI 73 #define R9A07G054_ETH0_CLK_CHI 74 #define R9A07G054_ETH1_CLK_AXI 75 #define R9A07G054_ETH1_CLK_CHI 76 #define R9A07G054_I2C0_PCLK 77 #define R9A07G054_I2C1_PCLK 78 #define R9A07G054_I2C2_PCLK 79 #define R9A07G054_I2C3_PCLK 80 #define R9A07G054_SCIF0_CLK_PCK 81 #define R9A07G054_SCIF1_CLK_PCK 82 #define R9A07G054_SCIF2_CLK_PCK 83 #define R9A07G054_SCIF3_CLK_PCK 84 #define R9A07G054_SCIF4_CLK_PCK 85 #define R9A07G054_SCI0_CLKP 86 #define R9A07G054_SCI1_CLKP 87 #define R9A07G054_IRDA_CLKP 88 #define R9A07G054_RSPI0_CLKB 89 #define R9A07G054_RSPI1_CLKB 90 #define R9A07G054_RSPI2_CLKB 91 #define R9A07G054_CANFD_PCLK 92 #define R9A07G054_GPIO_HCLK 93 #define R9A07G054_ADC_ADCLK 94 #define R9A07G054_ADC_PCLK 95 #define R9A07G054_TSU_PCLK 96 #define R9A07G054_STPAI_INITCLK 97 #define R9A07G054_STPAI_ACLK 98 #define R9A07G054_STPAI_MCLK 99 #define R9A07G054_STPAI_DCLKIN 100 #define R9A07G054_STPAI_ACLK_DRP 101 /* R9A07G054 Resets */ #define R9A07G054_CA55_RST_1_0 0 #define R9A07G054_CA55_RST_1_1 1 #define R9A07G054_CA55_RST_3_0 2 #define R9A07G054_CA55_RST_3_1 3 #define R9A07G054_CA55_RST_4 4 #define R9A07G054_CA55_RST_5 5 #define R9A07G054_CA55_RST_6 6 #define R9A07G054_CA55_RST_7 7 #define R9A07G054_CA55_RST_8 8 #define R9A07G054_CA55_RST_9 9 #define R9A07G054_CA55_RST_10 10 #define R9A07G054_CA55_RST_11 11 #define R9A07G054_CA55_RST_12 12 #define R9A07G054_GIC600_GICRESET_N 13 #define R9A07G054_GIC600_DBG_GICRESET_N 14 #define R9A07G054_IA55_RESETN 15 #define R9A07G054_MHU_RESETN 16 #define R9A07G054_DMAC_ARESETN 17 #define R9A07G054_DMAC_RST_ASYNC 18 #define R9A07G054_SYC_RESETN 19 #define R9A07G054_OSTM0_PRESETZ 20 #define R9A07G054_OSTM1_PRESETZ 21 #define R9A07G054_OSTM2_PRESETZ 22 #define R9A07G054_MTU_X_PRESET_MTU3 23 #define R9A07G054_POE3_RST_M_REG 24 #define R9A07G054_GPT_RST_C 25 #define R9A07G054_POEG_A_RST 26 #define R9A07G054_POEG_B_RST 27 #define R9A07G054_POEG_C_RST 28 #define R9A07G054_POEG_D_RST 29 #define R9A07G054_WDT0_PRESETN 30 #define R9A07G054_WDT1_PRESETN 31 #define R9A07G054_WDT2_PRESETN 32 #define R9A07G054_SPI_RST 33 #define R9A07G054_SDHI0_IXRST 34 #define R9A07G054_SDHI1_IXRST 35 #define R9A07G054_GPU_RESETN 36 #define R9A07G054_GPU_AXI_RESETN 37 #define R9A07G054_GPU_ACE_RESETN 38 #define R9A07G054_ISU_ARESETN 39 #define R9A07G054_ISU_PRESETN 40 #define R9A07G054_H264_X_RESET_VCP 41 #define R9A07G054_H264_CP_PRESET_P 42 #define R9A07G054_CRU_CMN_RSTB 43 #define R9A07G054_CRU_PRESETN 44 #define R9A07G054_CRU_ARESETN 45 #define R9A07G054_MIPI_DSI_CMN_RSTB 46 #define R9A07G054_MIPI_DSI_ARESET_N 47 #define R9A07G054_MIPI_DSI_PRESET_N 48 #define R9A07G054_LCDC_RESET_N 49 #define R9A07G054_SSI0_RST_M2_REG 50 #define R9A07G054_SSI1_RST_M2_REG 51 #define R9A07G054_SSI2_RST_M2_REG 52 #define R9A07G054_SSI3_RST_M2_REG 53 #define R9A07G054_SRC_RST 54 #define R9A07G054_USB_U2H0_HRESETN 55 #define R9A07G054_USB_U2H1_HRESETN 56 #define R9A07G054_USB_U2P_EXL_SYSRST 57 #define R9A07G054_USB_PRESETN 58 #define R9A07G054_ETH0_RST_HW_N 59 #define R9A07G054_ETH1_RST_HW_N 60 #define R9A07G054_I2C0_MRST 61 #define R9A07G054_I2C1_MRST 62 #define R9A07G054_I2C2_MRST 63 #define R9A07G054_I2C3_MRST 64 #define R9A07G054_SCIF0_RST_SYSTEM_N 65 #define R9A07G054_SCIF1_RST_SYSTEM_N 66 #define R9A07G054_SCIF2_RST_SYSTEM_N 67 #define R9A07G054_SCIF3_RST_SYSTEM_N 68 #define R9A07G054_SCIF4_RST_SYSTEM_N 69 #define R9A07G054_SCI0_RST 70 #define R9A07G054_SCI1_RST 71 #define R9A07G054_IRDA_RST 72 #define R9A07G054_RSPI0_RST 73 #define R9A07G054_RSPI1_RST 74 #define R9A07G054_RSPI2_RST 75 #define R9A07G054_CANFD_RSTP_N 76 #define R9A07G054_CANFD_RSTC_N 77 #define R9A07G054_GPIO_RSTN 78 #define R9A07G054_GPIO_PORT_RESETN 79 #define R9A07G054_GPIO_SPARE_RESETN 80 #define R9A07G054_ADC_PRESETN 81 #define R9A07G054_ADC_ADRST_N 82 #define R9A07G054_TSU_PRESETN 83 #define R9A07G054_STPAI_ARESETN 84 /* Power domain IDs. */ #define R9A07G054_PD_ALWAYS_ON 0 #define R9A07G054_PD_GIC 1 #define R9A07G054_PD_IA55 2 #define R9A07G054_PD_MHU 3 #define R9A07G054_PD_CORESIGHT 4 #define R9A07G054_PD_SYC 5 #define R9A07G054_PD_DMAC 6 #define R9A07G054_PD_GTM0 7 #define R9A07G054_PD_GTM1 8 #define R9A07G054_PD_GTM2 9 #define R9A07G054_PD_MTU 10 #define R9A07G054_PD_POE3 11 #define R9A07G054_PD_GPT 12 #define R9A07G054_PD_POEGA 13 #define R9A07G054_PD_POEGB 14 #define R9A07G054_PD_POEGC 15 #define R9A07G054_PD_POEGD 16 #define R9A07G054_PD_WDT0 17 #define R9A07G054_PD_WDT1 18 #define R9A07G054_PD_SPI 19 #define R9A07G054_PD_SDHI0 20 #define R9A07G054_PD_SDHI1 21 #define R9A07G054_PD_3DGE 22 #define R9A07G054_PD_ISU 23 #define R9A07G054_PD_VCPL4 24 #define R9A07G054_PD_CRU 25 #define R9A07G054_PD_MIPI_DSI 26 #define R9A07G054_PD_LCDC 27 #define R9A07G054_PD_SSI0 28 #define R9A07G054_PD_SSI1 29 #define R9A07G054_PD_SSI2 30 #define R9A07G054_PD_SSI3 31 #define R9A07G054_PD_SRC 32 #define R9A07G054_PD_USB0 33 #define R9A07G054_PD_USB1 34 #define R9A07G054_PD_USB_PHY 35 #define R9A07G054_PD_ETHER0 36 #define R9A07G054_PD_ETHER1 37 #define R9A07G054_PD_I2C0 38 #define R9A07G054_PD_I2C1 39 #define R9A07G054_PD_I2C2 40 #define R9A07G054_PD_I2C3 41 #define R9A07G054_PD_SCIF0 42 #define R9A07G054_PD_SCIF1 43 #define R9A07G054_PD_SCIF2 44 #define R9A07G054_PD_SCIF3 45 #define R9A07G054_PD_SCIF4 46 #define R9A07G054_PD_SCI0 47 #define R9A07G054_PD_SCI1 48 #define R9A07G054_PD_IRDA 49 #define R9A07G054_PD_RSPI0 50 #define R9A07G054_PD_RSPI1 51 #define R9A07G054_PD_RSPI2 52 #define R9A07G054_PD_CANFD 53 #define R9A07G054_PD_ADC 54 #define R9A07G054_PD_TSU 55 #endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
/* * Copyright 2012 Nouveau Community * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Martin Peres */ #include <subdev/bios.h> #include <subdev/bios/bit.h> #include <subdev/bios/vmap.h> u32 nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { struct bit_entry bit_P; u32 vmap = 0; if (!bit_entry(bios, 'P', &bit_P)) { if (bit_P.version == 2) { vmap = nvbios_rd32(bios, bit_P.offset + 0x20); if (vmap) { *ver = nvbios_rd08(bios, vmap + 0); switch (*ver) { case 0x10: case 0x20: *hdr = nvbios_rd08(bios, vmap + 1); *cnt = nvbios_rd08(bios, vmap + 3); *len = nvbios_rd08(bios, vmap + 2); return vmap; default: break; } } } } return 0; } u32 nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_vmap *info) { u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); memset(info, 0x00, sizeof(*info)); switch (!!vmap * *ver) { case 0x10: info->max0 = 0xff; info->max1 = 0xff; info->max2 = 0xff; break; case 0x20: info->max0 = nvbios_rd08(bios, vmap + 0x7); info->max1 = nvbios_rd08(bios, vmap + 0x8); if (*len >= 0xc) info->max2 = nvbios_rd08(bios, vmap + 0xc); else info->max2 = 0xff; break; } return vmap; } u32 nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len) { u8 hdr, cnt; u32 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len); if (vmap && idx < cnt) { vmap = vmap + hdr + (idx * *len); return vmap; } return 0; } u32 nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len, struct nvbios_vmap_entry *info) { u32 vmap = nvbios_vmap_entry(bios, idx, ver, len); memset(info, 0x00, sizeof(*info)); switch (!!vmap * *ver) { case 0x10: info->link = 0xff; info->min = nvbios_rd32(bios, vmap + 0x00); info->max = nvbios_rd32(bios, vmap + 0x04); info->arg[0] = nvbios_rd32(bios, vmap + 0x08); info->arg[1] = nvbios_rd32(bios, vmap + 0x0c); info->arg[2] = nvbios_rd32(bios, vmap + 0x10); break; case 0x20: info->mode = nvbios_rd08(bios, vmap + 0x00); info->link = nvbios_rd08(bios, vmap + 0x01); info->min = nvbios_rd32(bios, vmap + 0x02); info->max = nvbios_rd32(bios, vmap + 0x06); info->arg[0] = nvbios_rd32(bios, vmap + 0x0a); info->arg[1] = nvbios_rd32(bios, vmap + 0x0e); info->arg[2] = nvbios_rd32(bios, vmap + 0x12); info->arg[3] = nvbios_rd32(bios, vmap + 0x16); info->arg[4] = nvbios_rd32(bios, vmap + 0x1a); info->arg[5] = nvbios_rd32(bios, vmap + 0x1e); break; } return vmap; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * dlmmod.c * * standalone DLM module * * Copyright (C) 2004 Oracle. All rights reserved. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include <linux/delay.h> #include "../cluster/heartbeat.h" #include "../cluster/nodemanager.h" #include "../cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #include "dlmdebug.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) #include "../cluster/masklog.h" static void dlm_mle_node_down(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, struct o2nm_node *node, int idx); static void dlm_mle_node_up(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, struct o2nm_node *node, int idx); static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); static int dlm_do_assert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, void *nodemap, u32 flags); static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); static inline int dlm_mle_equal(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, const char *name, unsigned int namelen) { if (dlm != mle->dlm) return 0; if (namelen != mle->mnamelen || memcmp(name, mle->mname, namelen) != 0) return 0; return 1; } static struct kmem_cache *dlm_lockres_cache; static struct kmem_cache *dlm_lockname_cache; static struct kmem_cache *dlm_mle_cache; static void dlm_mle_release(struct kref *kref); static void dlm_init_mle(struct dlm_master_list_entry *mle, enum dlm_mle_type type, struct dlm_ctxt *dlm, struct dlm_lock_resource *res, const char *name, unsigned int namelen); static void dlm_put_mle(struct dlm_master_list_entry *mle); static void __dlm_put_mle(struct dlm_master_list_entry *mle); static int dlm_find_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry **mle, char *name, unsigned int namelen); static int dlm_do_master_request(struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int to); static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int *blocked); static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int blocked); static int dlm_add_migration_mle(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, struct dlm_master_list_entry **oldmle, const char *name, unsigned int namelen, u8 new_master, u8 master); static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 target); static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); int dlm_is_host_down(int errno) { switch (errno) { case -EBADF: case -ECONNREFUSED: case -ENOTCONN: case -ECONNRESET: case -EPIPE: case -EHOSTDOWN: case -EHOSTUNREACH: case -ETIMEDOUT: case -ECONNABORTED: case -ENETDOWN: case -ENETUNREACH: case -ENETRESET: case -ESHUTDOWN: case -ENOPROTOOPT: case -EINVAL: /* if returned from our tcp code, this means there is no socket */ return 1; } return 0; } /* * MASTER LIST FUNCTIONS */ /* * regarding master list entries and heartbeat callbacks: * * in order to avoid sleeping and allocation that occurs in * heartbeat, master list entries are simply attached to the * dlm's established heartbeat callbacks. the mle is attached * when it is created, and since the dlm->spinlock is held at * that time, any heartbeat event will be properly discovered * by the mle. the mle needs to be detached from the * dlm->mle_hb_events list as soon as heartbeat events are no * longer useful to the mle, and before the mle is freed. * * as a general rule, heartbeat events are no longer needed by * the mle once an "answer" regarding the lock master has been * received. */ static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { assert_spin_locked(&dlm->spinlock); list_add_tail(&mle->hb_events, &dlm->mle_hb_events); } static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { if (!list_empty(&mle->hb_events)) list_del_init(&mle->hb_events); } static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { spin_lock(&dlm->spinlock); __dlm_mle_detach_hb_events(dlm, mle); spin_unlock(&dlm->spinlock); } static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) { struct dlm_ctxt *dlm; dlm = mle->dlm; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); mle->inuse++; kref_get(&mle->mle_refs); } static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) { struct dlm_ctxt *dlm; dlm = mle->dlm; spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); mle->inuse--; __dlm_put_mle(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); } /* remove from list and free */ static void __dlm_put_mle(struct dlm_master_list_entry *mle) { struct dlm_ctxt *dlm; dlm = mle->dlm; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); if (!kref_read(&mle->mle_refs)) { /* this may or may not crash, but who cares. * it's a BUG. */ mlog(ML_ERROR, "bad mle: %p\n", mle); dlm_print_one_mle(mle); BUG(); } else kref_put(&mle->mle_refs, dlm_mle_release); } /* must not have any spinlocks coming in */ static void dlm_put_mle(struct dlm_master_list_entry *mle) { struct dlm_ctxt *dlm; dlm = mle->dlm; spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); __dlm_put_mle(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); } static inline void dlm_get_mle(struct dlm_master_list_entry *mle) { kref_get(&mle->mle_refs); } static void dlm_init_mle(struct dlm_master_list_entry *mle, enum dlm_mle_type type, struct dlm_ctxt *dlm, struct dlm_lock_resource *res, const char *name, unsigned int namelen) { assert_spin_locked(&dlm->spinlock); mle->dlm = dlm; mle->type = type; INIT_HLIST_NODE(&mle->master_hash_node); INIT_LIST_HEAD(&mle->hb_events); bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); spin_lock_init(&mle->spinlock); init_waitqueue_head(&mle->wq); atomic_set(&mle->woken, 0); kref_init(&mle->mle_refs); bitmap_zero(mle->response_map, O2NM_MAX_NODES); mle->master = O2NM_MAX_NODES; mle->new_master = O2NM_MAX_NODES; mle->inuse = 0; BUG_ON(mle->type != DLM_MLE_BLOCK && mle->type != DLM_MLE_MASTER && mle->type != DLM_MLE_MIGRATION); if (mle->type == DLM_MLE_MASTER) { BUG_ON(!res); mle->mleres = res; memcpy(mle->mname, res->lockname.name, res->lockname.len); mle->mnamelen = res->lockname.len; mle->mnamehash = res->lockname.hash; } else { BUG_ON(!name); mle->mleres = NULL; memcpy(mle->mname, name, namelen); mle->mnamelen = namelen; mle->mnamehash = dlm_lockid_hash(name, namelen); } atomic_inc(&dlm->mle_tot_count[mle->type]); atomic_inc(&dlm->mle_cur_count[mle->type]); /* copy off the node_map and register hb callbacks on our copy */ bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES); bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES); clear_bit(dlm->node_num, mle->vote_map); clear_bit(dlm->node_num, mle->node_map); /* attach the mle to the domain node up/down events */ __dlm_mle_attach_hb_events(dlm, mle); } void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); if (!hlist_unhashed(&mle->master_hash_node)) hlist_del_init(&mle->master_hash_node); } void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { struct hlist_head *bucket; assert_spin_locked(&dlm->master_lock); bucket = dlm_master_hash(dlm, mle->mnamehash); hlist_add_head(&mle->master_hash_node, bucket); } /* returns 1 if found, 0 if not */ static int dlm_find_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry **mle, char *name, unsigned int namelen) { struct dlm_master_list_entry *tmpmle; struct hlist_head *bucket; unsigned int hash; assert_spin_locked(&dlm->master_lock); hash = dlm_lockid_hash(name, namelen); bucket = dlm_master_hash(dlm, hash); hlist_for_each_entry(tmpmle, bucket, master_hash_node) { if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) continue; dlm_get_mle(tmpmle); *mle = tmpmle; return 1; } return 0; } void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) { struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); else dlm_mle_node_down(dlm, mle, NULL, idx); } } static void dlm_mle_node_down(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, struct o2nm_node *node, int idx) { spin_lock(&mle->spinlock); if (!test_bit(idx, mle->node_map)) mlog(0, "node %u already removed from nodemap!\n", idx); else clear_bit(idx, mle->node_map); spin_unlock(&mle->spinlock); } static void dlm_mle_node_up(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, struct o2nm_node *node, int idx) { spin_lock(&mle->spinlock); if (test_bit(idx, mle->node_map)) mlog(0, "node %u already in node map!\n", idx); else set_bit(idx, mle->node_map); spin_unlock(&mle->spinlock); } int dlm_init_mle_cache(void) { dlm_mle_cache = kmem_cache_create("o2dlm_mle", sizeof(struct dlm_master_list_entry), 0, SLAB_HWCACHE_ALIGN, NULL); if (dlm_mle_cache == NULL) return -ENOMEM; return 0; } void dlm_destroy_mle_cache(void) { kmem_cache_destroy(dlm_mle_cache); } static void dlm_mle_release(struct kref *kref) { struct dlm_master_list_entry *mle; struct dlm_ctxt *dlm; mle = container_of(kref, struct dlm_master_list_entry, mle_refs); dlm = mle->dlm; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, mle->type); /* remove from list if not already */ __dlm_unlink_mle(dlm, mle); /* detach the mle from the domain node up/down events */ __dlm_mle_detach_hb_events(dlm, mle); atomic_dec(&dlm->mle_cur_count[mle->type]); /* NOTE: kfree under spinlock here. * if this is bad, we can move this to a freelist. */ kmem_cache_free(dlm_mle_cache, mle); } /* * LOCK RESOURCE FUNCTIONS */ int dlm_init_master_caches(void) { dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", sizeof(struct dlm_lock_resource), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dlm_lockres_cache) goto bail; dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", DLM_LOCKID_NAME_MAX, 0, SLAB_HWCACHE_ALIGN, NULL); if (!dlm_lockname_cache) goto bail; return 0; bail: dlm_destroy_master_caches(); return -ENOMEM; } void dlm_destroy_master_caches(void) { kmem_cache_destroy(dlm_lockname_cache); dlm_lockname_cache = NULL; kmem_cache_destroy(dlm_lockres_cache); dlm_lockres_cache = NULL; } static void dlm_lockres_release(struct kref *kref) { struct dlm_lock_resource *res; struct dlm_ctxt *dlm; res = container_of(kref, struct dlm_lock_resource, refs); dlm = res->dlm; /* This should not happen -- all lockres' have a name * associated with them at init time. */ BUG_ON(!res->lockname.name); mlog(0, "destroying lockres %.*s\n", res->lockname.len, res->lockname.name); atomic_dec(&dlm->res_cur_count); if (!hlist_unhashed(&res->hash_node) || !list_empty(&res->granted) || !list_empty(&res->converting) || !list_empty(&res->blocked) || !list_empty(&res->dirty) || !list_empty(&res->recovering) || !list_empty(&res->purge)) { mlog(ML_ERROR, "Going to BUG for resource %.*s." " We're on a list! [%c%c%c%c%c%c%c]\n", res->lockname.len, res->lockname.name, !hlist_unhashed(&res->hash_node) ? 'H' : ' ', !list_empty(&res->granted) ? 'G' : ' ', !list_empty(&res->converting) ? 'C' : ' ', !list_empty(&res->blocked) ? 'B' : ' ', !list_empty(&res->dirty) ? 'D' : ' ', !list_empty(&res->recovering) ? 'R' : ' ', !list_empty(&res->purge) ? 'P' : ' '); dlm_print_one_lock_resource(res); } /* By the time we're ready to blow this guy away, we shouldn't * be on any lists. */ BUG_ON(!hlist_unhashed(&res->hash_node)); BUG_ON(!list_empty(&res->granted)); BUG_ON(!list_empty(&res->converting)); BUG_ON(!list_empty(&res->blocked)); BUG_ON(!list_empty(&res->dirty)); BUG_ON(!list_empty(&res->recovering)); BUG_ON(!list_empty(&res->purge)); kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); kmem_cache_free(dlm_lockres_cache, res); } void dlm_lockres_put(struct dlm_lock_resource *res) { kref_put(&res->refs, dlm_lockres_release); } static void dlm_init_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, const char *name, unsigned int namelen) { char *qname; /* If we memset here, we lose our reference to the kmalloc'd * res->lockname.name, so be sure to init every field * correctly! */ qname = (char *) res->lockname.name; memcpy(qname, name, namelen); res->lockname.len = namelen; res->lockname.hash = dlm_lockid_hash(name, namelen); init_waitqueue_head(&res->wq); spin_lock_init(&res->spinlock); INIT_HLIST_NODE(&res->hash_node); INIT_LIST_HEAD(&res->granted); INIT_LIST_HEAD(&res->converting); INIT_LIST_HEAD(&res->blocked); INIT_LIST_HEAD(&res->dirty); INIT_LIST_HEAD(&res->recovering); INIT_LIST_HEAD(&res->purge); INIT_LIST_HEAD(&res->tracking); atomic_set(&res->asts_reserved, 0); res->migration_pending = 0; res->inflight_locks = 0; res->inflight_assert_workers = 0; res->dlm = dlm; kref_init(&res->refs); atomic_inc(&dlm->res_tot_count); atomic_inc(&dlm->res_cur_count); /* just for consistency */ spin_lock(&res->spinlock); dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); spin_unlock(&res->spinlock); res->state = DLM_LOCK_RES_IN_PROGRESS; res->last_used = 0; spin_lock(&dlm->track_lock); list_add_tail(&res->tracking, &dlm->tracking_list); spin_unlock(&dlm->track_lock); memset(res->lvb, 0, DLM_LVB_LEN); bitmap_zero(res->refmap, O2NM_MAX_NODES); } struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, const char *name, unsigned int namelen) { struct dlm_lock_resource *res = NULL; res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); if (!res) goto error; res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); if (!res->lockname.name) goto error; dlm_init_lockres(dlm, res, name, namelen); return res; error: if (res) kmem_cache_free(dlm_lockres_cache, res); return NULL; } void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, int bit) { assert_spin_locked(&res->spinlock); mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, res->lockname.name, bit, __builtin_return_address(0)); set_bit(bit, res->refmap); } void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, int bit) { assert_spin_locked(&res->spinlock); mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, res->lockname.name, bit, __builtin_return_address(0)); clear_bit(bit, res->refmap); } static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { res->inflight_locks++; mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_locks, __builtin_return_address(0)); } void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); __dlm_lockres_grab_inflight_ref(dlm, res); } void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); BUG_ON(res->inflight_locks == 0); res->inflight_locks--; mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_locks, __builtin_return_address(0)); wake_up(&res->wq); } void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); res->inflight_assert_workers++; mlog(0, "%s:%.*s: inflight assert worker++: now %u\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_assert_workers); } static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); BUG_ON(res->inflight_assert_workers == 0); res->inflight_assert_workers--; mlog(0, "%s:%.*s: inflight assert worker--: now %u\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_assert_workers); } static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { spin_lock(&res->spinlock); __dlm_lockres_drop_inflight_worker(dlm, res); spin_unlock(&res->spinlock); } /* * lookup a lock resource by name. * may already exist in the hashtable. * lockid is null terminated * * if not, allocate enough for the lockres and for * the temporary structure used in doing the mastering. * * also, do a lookup in the dlm->master_list to see * if another node has begun mastering the same lock. * if so, there should be a block entry in there * for this name, and we should *not* attempt to master * the lock here. need to wait around for that node * to assert_master (or die). * */ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, const char *lockid, int namelen, int flags) { struct dlm_lock_resource *tmpres=NULL, *res=NULL; struct dlm_master_list_entry *mle = NULL; struct dlm_master_list_entry *alloc_mle = NULL; int blocked = 0; int ret, nodenum; struct dlm_node_iter iter; unsigned int hash; int tries = 0; int bit, wait_on_recovery = 0; BUG_ON(!lockid); hash = dlm_lockid_hash(lockid, namelen); mlog(0, "get lockres %s (len %d)\n", lockid, namelen); lookup: spin_lock(&dlm->spinlock); tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); if (tmpres) { spin_unlock(&dlm->spinlock); spin_lock(&tmpres->spinlock); /* * Right after dlm spinlock was released, dlm_thread could have * purged the lockres. Check if lockres got unhashed. If so * start over. */ if (hlist_unhashed(&tmpres->hash_node)) { spin_unlock(&tmpres->spinlock); dlm_lockres_put(tmpres); tmpres = NULL; goto lookup; } /* Wait on the thread that is mastering the resource */ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { __dlm_wait_on_lockres(tmpres); BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); spin_unlock(&tmpres->spinlock); dlm_lockres_put(tmpres); tmpres = NULL; goto lookup; } /* Wait on the resource purge to complete before continuing */ if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { BUG_ON(tmpres->owner == dlm->node_num); __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); spin_unlock(&tmpres->spinlock); dlm_lockres_put(tmpres); tmpres = NULL; goto lookup; } /* Grab inflight ref to pin the resource */ dlm_lockres_grab_inflight_ref(dlm, tmpres); spin_unlock(&tmpres->spinlock); if (res) { spin_lock(&dlm->track_lock); if (!list_empty(&res->tracking)) list_del_init(&res->tracking); else mlog(ML_ERROR, "Resource %.*s not " "on the Tracking list\n", res->lockname.len, res->lockname.name); spin_unlock(&dlm->track_lock); dlm_lockres_put(res); } res = tmpres; goto leave; } if (!res) { spin_unlock(&dlm->spinlock); mlog(0, "allocating a new resource\n"); /* nothing found and we need to allocate one. */ alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!alloc_mle) goto leave; res = dlm_new_lockres(dlm, lockid, namelen); if (!res) goto leave; goto lookup; } mlog(0, "no lockres found, allocated our own: %p\n", res); if (flags & LKM_LOCAL) { /* caller knows it's safe to assume it's not mastered elsewhere * DONE! return right away */ spin_lock(&res->spinlock); dlm_change_lockres_owner(dlm, res, dlm->node_num); __dlm_insert_lockres(dlm, res); dlm_lockres_grab_inflight_ref(dlm, res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); /* lockres still marked IN_PROGRESS */ goto wake_waiters; } /* check master list to see if another node has started mastering it */ spin_lock(&dlm->master_lock); /* if we found a block, wait for lock to be mastered by another node */ blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); if (blocked) { int mig; if (mle->type == DLM_MLE_MASTER) { mlog(ML_ERROR, "master entry for nonexistent lock!\n"); BUG(); } mig = (mle->type == DLM_MLE_MIGRATION); /* if there is a migration in progress, let the migration * finish before continuing. we can wait for the absence * of the MIGRATION mle: either the migrate finished or * one of the nodes died and the mle was cleaned up. * if there is a BLOCK here, but it already has a master * set, we are too late. the master does not have a ref * for us in the refmap. detach the mle and drop it. * either way, go back to the top and start over. */ if (mig || mle->master != O2NM_MAX_NODES) { BUG_ON(mig && mle->master == dlm->node_num); /* we arrived too late. the master does not * have a ref for us. retry. */ mlog(0, "%s:%.*s: late on %s\n", dlm->name, namelen, lockid, mig ? "MIGRATION" : "BLOCK"); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); /* master is known, detach */ if (!mig) dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); mle = NULL; /* this is lame, but we can't wait on either * the mle or lockres waitqueue here */ if (mig) msleep(100); goto lookup; } } else { /* go ahead and try to master lock on this node */ mle = alloc_mle; /* make sure this does not get freed below */ alloc_mle = NULL; dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); set_bit(dlm->node_num, mle->maybe_map); __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); if (bit < O2NM_MAX_NODES) { mlog(0, "%s: res %.*s, At least one node (%d) " "to recover before lock mastery can begin\n", dlm->name, namelen, (char *)lockid, bit); wait_on_recovery = 1; } } /* at this point there is either a DLM_MLE_BLOCK or a * DLM_MLE_MASTER on the master list, so it's safe to add the * lockres to the hashtable. anyone who finds the lock will * still have to wait on the IN_PROGRESS. */ /* finally add the lockres to its hash bucket */ __dlm_insert_lockres(dlm, res); /* since this lockres is new it doesn't not require the spinlock */ __dlm_lockres_grab_inflight_ref(dlm, res); /* get an extra ref on the mle in case this is a BLOCK * if so, the creator of the BLOCK may try to put the last * ref at this time in the assert master handler, so we * need an extra one to keep from a bad ptr deref. */ dlm_get_mle_inuse(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); redo_request: while (wait_on_recovery) { /* any cluster changes that occurred after dropping the * dlm spinlock would be detectable be a change on the mle, * so we only need to clear out the recovery map once. */ if (dlm_is_recovery_lock(lockid, namelen)) { mlog(0, "%s: Recovery map is not empty, but must " "master $RECOVERY lock now\n", dlm->name); if (!dlm_pre_master_reco_lockres(dlm, res)) wait_on_recovery = 0; else { mlog(0, "%s: waiting 500ms for heartbeat state " "change\n", dlm->name); msleep(500); } continue; } dlm_kick_recovery_thread(dlm); msleep(1000); dlm_wait_for_recovery(dlm); spin_lock(&dlm->spinlock); bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); if (bit < O2NM_MAX_NODES) { mlog(0, "%s: res %.*s, At least one node (%d) " "to recover before lock mastery can begin\n", dlm->name, namelen, (char *)lockid, bit); wait_on_recovery = 1; } else wait_on_recovery = 0; spin_unlock(&dlm->spinlock); if (wait_on_recovery) dlm_wait_for_node_recovery(dlm, bit, 10000); } /* must wait for lock to be mastered elsewhere */ if (blocked) goto wait; ret = -EINVAL; dlm_node_iter_init(mle->vote_map, &iter); while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { ret = dlm_do_master_request(res, mle, nodenum); if (ret < 0) mlog_errno(ret); if (mle->master != O2NM_MAX_NODES) { /* found a master ! */ if (mle->master <= nodenum) break; /* if our master request has not reached the master * yet, keep going until it does. this is how the * master will know that asserts are needed back to * the lower nodes. */ mlog(0, "%s: res %.*s, Requests only up to %u but " "master is %u, keep going\n", dlm->name, namelen, lockid, nodenum, mle->master); } } wait: /* keep going until the response map includes all nodes */ ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); if (ret < 0) { wait_on_recovery = 1; mlog(0, "%s: res %.*s, Node map changed, redo the master " "request now, blocked=%d\n", dlm->name, res->lockname.len, res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s: res %.*s, Spinning on " "dlm_wait_for_lock_mastery, blocked = %d\n", dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); tries = 0; } goto redo_request; } mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, res->lockname.name, res->owner); /* make sure we never continue without this */ BUG_ON(res->owner == O2NM_MAX_NODES); /* master is known, detach if not already detached */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); /* put the extra ref */ dlm_put_mle_inuse(mle); wake_waiters: spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); leave: /* need to free the unused mle */ if (alloc_mle) kmem_cache_free(dlm_mle_cache, alloc_mle); return res; } #define DLM_MASTERY_TIMEOUT_MS 5000 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int *blocked) { u8 m; int ret, bit; int map_changed, voting_done; int assert, sleep; recheck: ret = 0; assert = 0; /* check if another node has already become the owner */ spin_lock(&res->spinlock); if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, res->lockname.len, res->lockname.name, res->owner); spin_unlock(&res->spinlock); /* this will cause the master to re-assert across * the whole cluster, freeing up mles */ if (res->owner != dlm->node_num) { ret = dlm_do_master_request(res, mle, res->owner); if (ret < 0) { /* give recovery a chance to run */ mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); msleep(500); goto recheck; } } ret = 0; goto leave; } spin_unlock(&res->spinlock); spin_lock(&mle->spinlock); m = mle->master; map_changed = !bitmap_equal(mle->vote_map, mle->node_map, O2NM_MAX_NODES); voting_done = bitmap_equal(mle->vote_map, mle->response_map, O2NM_MAX_NODES); /* restart if we hit any errors */ if (map_changed) { int b; mlog(0, "%s: %.*s: node map changed, restarting\n", dlm->name, res->lockname.len, res->lockname.name); ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; } spin_unlock(&mle->spinlock); if (ret < 0) { mlog_errno(ret); goto leave; } mlog(0, "%s:%.*s: restart lock mastery succeeded, " "rechecking now\n", dlm->name, res->lockname.len, res->lockname.name); goto recheck; } else { if (!voting_done) { mlog(0, "map not changed and voting not done " "for %s:%.*s\n", dlm->name, res->lockname.len, res->lockname.name); } } if (m != O2NM_MAX_NODES) { /* another node has done an assert! * all done! */ sleep = 0; } else { sleep = 1; /* have all nodes responded? */ if (voting_done && !*blocked) { bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); if (dlm->node_num <= bit) { /* my node number is lowest. * now tell other nodes that I am * mastering this. */ mle->master = dlm->node_num; /* ref was grabbed in get_lock_resource * will be dropped in dlmlock_master */ assert = 1; sleep = 0; } /* if voting is done, but we have not received * an assert master yet, we must sleep */ } } spin_unlock(&mle->spinlock); /* sleep if we haven't finished voting yet */ if (sleep) { unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); atomic_set(&mle->woken, 0); (void)wait_event_timeout(mle->wq, (atomic_read(&mle->woken) == 1), timeo); if (res->owner == O2NM_MAX_NODES) { mlog(0, "%s:%.*s: waiting again\n", dlm->name, res->lockname.len, res->lockname.name); goto recheck; } mlog(0, "done waiting, master is %u\n", res->owner); ret = 0; goto leave; } ret = 0; /* done */ if (assert) { m = dlm->node_num; mlog(0, "about to master %.*s here, this=%u\n", res->lockname.len, res->lockname.name, m); ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); if (ret) { /* This is a failure in the network path, * not in the response to the assert_master * (any nonzero response is a BUG on this node). * Most likely a socket just got disconnected * due to node death. */ mlog_errno(ret); } /* no longer need to restart lock mastery. * all living nodes have been contacted. */ ret = 0; } /* set the lockres owner */ spin_lock(&res->spinlock); /* mastery reference obtained either during * assert_master_handler or in get_lock_resource */ dlm_change_lockres_owner(dlm, res, m); spin_unlock(&res->spinlock); leave: return ret; } struct dlm_bitmap_diff_iter { int curnode; unsigned long *orig_bm; unsigned long *cur_bm; unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; }; enum dlm_node_state_change { NODE_DOWN = -1, NODE_NO_CHANGE = 0, NODE_UP }; static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, unsigned long *orig_bm, unsigned long *cur_bm) { unsigned long p1, p2; int i; iter->curnode = -1; iter->orig_bm = orig_bm; iter->cur_bm = cur_bm; for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { p1 = *(iter->orig_bm + i); p2 = *(iter->cur_bm + i); iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); } } static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, enum dlm_node_state_change *state) { int bit; if (iter->curnode >= O2NM_MAX_NODES) return -ENOENT; bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, iter->curnode+1); if (bit >= O2NM_MAX_NODES) { iter->curnode = O2NM_MAX_NODES; return -ENOENT; } /* if it was there in the original then this node died */ if (test_bit(bit, iter->orig_bm)) *state = NODE_DOWN; else *state = NODE_UP; iter->curnode = bit; return bit; } static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int blocked) { struct dlm_bitmap_diff_iter bdi; enum dlm_node_state_change sc; int node; int ret = 0; mlog(0, "something happened such that the " "master process may need to be restarted!\n"); assert_spin_locked(&mle->spinlock); dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); node = dlm_bitmap_diff_iter_next(&bdi, &sc); while (node >= 0) { if (sc == NODE_UP) { /* a node came up. clear any old vote from * the response map and set it in the vote map * then restart the mastery. */ mlog(ML_NOTICE, "node %d up while restarting\n", node); /* redo the master request, but only for the new node */ mlog(0, "sending request to new node\n"); clear_bit(node, mle->response_map); set_bit(node, mle->vote_map); } else { mlog(ML_ERROR, "node down! %d\n", node); if (blocked) { int lowest = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); /* act like it was never there */ clear_bit(node, mle->maybe_map); if (node == lowest) { mlog(0, "expected master %u died" " while this node was blocked " "waiting on it!\n", node); lowest = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, lowest+1); if (lowest < O2NM_MAX_NODES) { mlog(0, "%s:%.*s:still " "blocked. waiting on %u " "now\n", dlm->name, res->lockname.len, res->lockname.name, lowest); } else { /* mle is an MLE_BLOCK, but * there is now nothing left to * block on. we need to return * all the way back out and try * again with an MLE_MASTER. * dlm_do_local_recovery_cleanup * has already run, so the mle * refcount is ok */ mlog(0, "%s:%.*s: no " "longer blocking. try to " "master this here\n", dlm->name, res->lockname.len, res->lockname.name); mle->type = DLM_MLE_MASTER; mle->mleres = res; } } } /* now blank out everything, as if we had never * contacted anyone */ bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); bitmap_zero(mle->response_map, O2NM_MAX_NODES); /* reset the vote_map to the current node_map */ bitmap_copy(mle->vote_map, mle->node_map, O2NM_MAX_NODES); /* put myself into the maybe map */ if (mle->type != DLM_MLE_BLOCK) set_bit(dlm->node_num, mle->maybe_map); } ret = -EAGAIN; node = dlm_bitmap_diff_iter_next(&bdi, &sc); } return ret; } /* * DLM_MASTER_REQUEST_MSG * * returns: 0 on success, * -errno on a network error * * on error, the caller should assume the target node is "dead" * */ static int dlm_do_master_request(struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, int to) { struct dlm_ctxt *dlm = mle->dlm; struct dlm_master_request request; int ret, response=0, resend; memset(&request, 0, sizeof(request)); request.node_idx = dlm->node_num; BUG_ON(mle->type == DLM_MLE_MIGRATION); request.namelen = (u8)mle->mnamelen; memcpy(request.name, mle->mname, request.namelen); again: ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, sizeof(request), to, &response); if (ret < 0) { if (ret == -ESRCH) { /* should never happen */ mlog(ML_ERROR, "TCP stack not ready!\n"); BUG(); } else if (ret == -EINVAL) { mlog(ML_ERROR, "bad args passed to o2net!\n"); BUG(); } else if (ret == -ENOMEM) { mlog(ML_ERROR, "out of memory while trying to send " "network message! retrying\n"); /* this is totally crude */ msleep(50); goto again; } else if (!dlm_is_host_down(ret)) { /* not a network error. bad. */ mlog_errno(ret); mlog(ML_ERROR, "unhandled error!"); BUG(); } /* all other errors should be network errors, * and likely indicate node death */ mlog(ML_ERROR, "link to %d went down!\n", to); goto out; } ret = 0; resend = 0; spin_lock(&mle->spinlock); switch (response) { case DLM_MASTER_RESP_YES: set_bit(to, mle->response_map); mlog(0, "node %u is the master, response=YES\n", to); mlog(0, "%s:%.*s: master node %u now knows I have a " "reference\n", dlm->name, res->lockname.len, res->lockname.name, to); mle->master = to; break; case DLM_MASTER_RESP_NO: mlog(0, "node %u not master, response=NO\n", to); set_bit(to, mle->response_map); break; case DLM_MASTER_RESP_MAYBE: mlog(0, "node %u not master, response=MAYBE\n", to); set_bit(to, mle->response_map); set_bit(to, mle->maybe_map); break; case DLM_MASTER_RESP_ERROR: mlog(0, "node %u hit an error, resending\n", to); resend = 1; response = 0; break; default: mlog(ML_ERROR, "bad response! %u\n", response); BUG(); } spin_unlock(&mle->spinlock); if (resend) { /* this is also totally crude */ msleep(50); goto again; } out: return ret; } /* * locks that can be taken here: * dlm->spinlock * res->spinlock * mle->spinlock * dlm->master_list * * if possible, TRIM THIS DOWN!!! */ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { u8 response = DLM_MASTER_RESP_MAYBE; struct dlm_ctxt *dlm = data; struct dlm_lock_resource *res = NULL; struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; char *name; unsigned int namelen, hash; int found, ret; int set_maybe; int dispatch_assert = 0; int dispatched = 0; if (!dlm_grab(dlm)) return DLM_MASTER_RESP_NO; if (!dlm_domain_fully_joined(dlm)) { response = DLM_MASTER_RESP_NO; goto send_response; } name = request->name; namelen = request->namelen; hash = dlm_lockid_hash(name, namelen); if (namelen > DLM_LOCKID_NAME_MAX) { response = DLM_IVBUFLEN; goto send_response; } way_up_top: spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, name, namelen, hash); if (res) { spin_unlock(&dlm->spinlock); /* take care of the easy cases up front */ spin_lock(&res->spinlock); /* * Right after dlm spinlock was released, dlm_thread could have * purged the lockres. Check if lockres got unhashed. If so * start over. */ if (hlist_unhashed(&res->hash_node)) { spin_unlock(&res->spinlock); dlm_lockres_put(res); goto way_up_top; } if (res->state & (DLM_LOCK_RES_RECOVERING| DLM_LOCK_RES_MIGRATING)) { spin_unlock(&res->spinlock); mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " "being recovered/migrated\n"); response = DLM_MASTER_RESP_ERROR; if (mle) kmem_cache_free(dlm_mle_cache, mle); goto send_response; } if (res->owner == dlm->node_num) { dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); spin_unlock(&res->spinlock); response = DLM_MASTER_RESP_YES; if (mle) kmem_cache_free(dlm_mle_cache, mle); /* this node is the owner. * there is some extra work that needs to * happen now. the requesting node has * caused all nodes up to this one to * create mles. this node now needs to * go back and clean those up. */ dispatch_assert = 1; goto send_response; } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { spin_unlock(&res->spinlock); // mlog(0, "node %u is the master\n", res->owner); response = DLM_MASTER_RESP_NO; if (mle) kmem_cache_free(dlm_mle_cache, mle); goto send_response; } /* ok, there is no owner. either this node is * being blocked, or it is actively trying to * master this lock. */ if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { mlog(ML_ERROR, "lock with no owner should be " "in-progress!\n"); BUG(); } // mlog(0, "lockres is in progress...\n"); spin_lock(&dlm->master_lock); found = dlm_find_mle(dlm, &tmpmle, name, namelen); if (!found) { mlog(ML_ERROR, "no mle found for this lock!\n"); BUG(); } set_maybe = 1; spin_lock(&tmpmle->spinlock); if (tmpmle->type == DLM_MLE_BLOCK) { // mlog(0, "this node is waiting for " // "lockres to be mastered\n"); response = DLM_MASTER_RESP_NO; } else if (tmpmle->type == DLM_MLE_MIGRATION) { mlog(0, "node %u is master, but trying to migrate to " "node %u.\n", tmpmle->master, tmpmle->new_master); if (tmpmle->master == dlm->node_num) { mlog(ML_ERROR, "no owner on lockres, but this " "node is trying to migrate it to %u?!\n", tmpmle->new_master); BUG(); } else { /* the real master can respond on its own */ response = DLM_MASTER_RESP_NO; } } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { set_maybe = 0; if (tmpmle->master == dlm->node_num) { response = DLM_MASTER_RESP_YES; /* this node will be the owner. * go back and clean the mles on any * other nodes */ dispatch_assert = 1; dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); } else response = DLM_MASTER_RESP_NO; } else { // mlog(0, "this node is attempting to " // "master lockres\n"); response = DLM_MASTER_RESP_MAYBE; } if (set_maybe) set_bit(request->node_idx, tmpmle->maybe_map); spin_unlock(&tmpmle->spinlock); spin_unlock(&dlm->master_lock); spin_unlock(&res->spinlock); /* keep the mle attached to heartbeat events */ dlm_put_mle(tmpmle); if (mle) kmem_cache_free(dlm_mle_cache, mle); goto send_response; } /* * lockres doesn't exist on this node * if there is an MLE_BLOCK, return NO * if there is an MLE_MASTER, return MAYBE * otherwise, add an MLE_BLOCK, return NO */ spin_lock(&dlm->master_lock); found = dlm_find_mle(dlm, &tmpmle, name, namelen); if (!found) { /* this lockid has never been seen on this node yet */ // mlog(0, "no mle found\n"); if (!mle) { spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!mle) { response = DLM_MASTER_RESP_ERROR; mlog_errno(-ENOMEM); goto send_response; } goto way_up_top; } // mlog(0, "this is second time thru, already allocated, " // "add the block.\n"); dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); set_bit(request->node_idx, mle->maybe_map); __dlm_insert_mle(dlm, mle); response = DLM_MASTER_RESP_NO; } else { spin_lock(&tmpmle->spinlock); if (tmpmle->master == dlm->node_num) { mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); BUG(); } if (tmpmle->type == DLM_MLE_BLOCK) response = DLM_MASTER_RESP_NO; else if (tmpmle->type == DLM_MLE_MIGRATION) { mlog(0, "migration mle was found (%u->%u)\n", tmpmle->master, tmpmle->new_master); /* real master can respond on its own */ response = DLM_MASTER_RESP_NO; } else response = DLM_MASTER_RESP_MAYBE; set_bit(request->node_idx, tmpmle->maybe_map); spin_unlock(&tmpmle->spinlock); } spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); if (found) { /* keep the mle attached to heartbeat events */ dlm_put_mle(tmpmle); } send_response: /* * __dlm_lookup_lockres() grabbed a reference to this lockres. * The reference is released by dlm_assert_master_worker() under * the call to dlm_dispatch_assert_master(). If * dlm_assert_master_worker() isn't called, we drop it here. */ if (dispatch_assert) { mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); spin_lock(&res->spinlock); ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); response = DLM_MASTER_RESP_ERROR; spin_unlock(&res->spinlock); dlm_lockres_put(res); } else { dispatched = 1; __dlm_lockres_grab_inflight_worker(dlm, res); spin_unlock(&res->spinlock); } } else { if (res) dlm_lockres_put(res); } if (!dispatched) dlm_put(dlm); return response; } /* * DLM_ASSERT_MASTER_MSG */ /* * NOTE: this can be used for debugging * can periodically run all locks owned by this node * and re-assert across the cluster... */ static int dlm_do_assert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, void *nodemap, u32 flags) { struct dlm_assert_master assert; int to, tmpret; struct dlm_node_iter iter; int ret = 0; int reassert; const char *lockname = res->lockname.name; unsigned int namelen = res->lockname.len; BUG_ON(namelen > O2NM_MAX_NAME_LEN); spin_lock(&res->spinlock); res->state |= DLM_LOCK_RES_SETREF_INPROG; spin_unlock(&res->spinlock); again: reassert = 0; /* note that if this nodemap is empty, it returns 0 */ dlm_node_iter_init(nodemap, &iter); while ((to = dlm_node_iter_next(&iter)) >= 0) { int r = 0; struct dlm_master_list_entry *mle = NULL; mlog(0, "sending assert master to %d (%.*s)\n", to, namelen, lockname); memset(&assert, 0, sizeof(assert)); assert.node_idx = dlm->node_num; assert.namelen = namelen; memcpy(assert.name, lockname, namelen); assert.flags = cpu_to_be32(flags); tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, &assert, sizeof(assert), to, &r); if (tmpret < 0) { mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", tmpret, DLM_ASSERT_MASTER_MSG, dlm->key, to); if (!dlm_is_host_down(tmpret)) { mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); BUG(); } /* a node died. finish out the rest of the nodes. */ mlog(0, "link to %d went down!\n", to); /* any nonzero status return will do */ ret = tmpret; r = 0; } else if (r < 0) { /* ok, something horribly messed. kill thyself. */ mlog(ML_ERROR,"during assert master of %.*s to %u, " "got %d.\n", namelen, lockname, to, r); spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); if (dlm_find_mle(dlm, &mle, (char *)lockname, namelen)) { dlm_print_one_mle(mle); __dlm_put_mle(mle); } spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); BUG(); } if (r & DLM_ASSERT_RESPONSE_REASSERT && !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { mlog(ML_ERROR, "%.*s: very strange, " "master MLE but no lockres on %u\n", namelen, lockname, to); } if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { mlog(0, "%.*s: node %u has a reference to this " "lockres, set the bit in the refmap\n", namelen, lockname, to); spin_lock(&res->spinlock); dlm_lockres_set_refmap_bit(dlm, res, to); spin_unlock(&res->spinlock); } } if (reassert) goto again; spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_SETREF_INPROG; spin_unlock(&res->spinlock); wake_up(&res->wq); return ret; } /* * locks that can be taken here: * dlm->spinlock * res->spinlock * mle->spinlock * dlm->master_list * * if possible, TRIM THIS DOWN!!! */ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_master_list_entry *mle = NULL; struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; struct dlm_lock_resource *res = NULL; char *name; unsigned int namelen, hash; u32 flags; int master_request = 0, have_lockres_ref = 0; int ret = 0; if (!dlm_grab(dlm)) return 0; name = assert->name; namelen = assert->namelen; hash = dlm_lockid_hash(name, namelen); flags = be32_to_cpu(assert->flags); if (namelen > DLM_LOCKID_NAME_MAX) { mlog(ML_ERROR, "Invalid name length!"); goto done; } spin_lock(&dlm->spinlock); if (flags) mlog(0, "assert_master with flags: %u\n", flags); /* find the MLE */ spin_lock(&dlm->master_lock); if (!dlm_find_mle(dlm, &mle, name, namelen)) { /* not an error, could be master just re-asserting */ mlog(0, "just got an assert_master from %u, but no " "MLE for it! (%.*s)\n", assert->node_idx, namelen, name); } else { int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); if (bit >= O2NM_MAX_NODES) { /* not necessarily an error, though less likely. * could be master just re-asserting. */ mlog(0, "no bits set in the maybe_map, but %u " "is asserting! (%.*s)\n", assert->node_idx, namelen, name); } else if (bit != assert->node_idx) { if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { mlog(0, "master %u was found, %u should " "back off\n", assert->node_idx, bit); } else { /* with the fix for bug 569, a higher node * number winning the mastery will respond * YES to mastery requests, but this node * had no way of knowing. let it pass. */ mlog(0, "%u is the lowest node, " "%u is asserting. (%.*s) %u must " "have begun after %u won.\n", bit, assert->node_idx, namelen, name, bit, assert->node_idx); } } if (mle->type == DLM_MLE_MIGRATION) { if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { mlog(0, "%s:%.*s: got cleanup assert" " from %u for migration\n", dlm->name, namelen, name, assert->node_idx); } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { mlog(0, "%s:%.*s: got unrelated assert" " from %u for migration, ignoring\n", dlm->name, namelen, name, assert->node_idx); __dlm_put_mle(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; } } } spin_unlock(&dlm->master_lock); /* ok everything checks out with the MLE * now check to see if there is a lockres */ res = __dlm_lookup_lockres(dlm, name, namelen, hash); if (res) { spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { mlog(ML_ERROR, "%u asserting but %.*s is " "RECOVERING!\n", assert->node_idx, namelen, name); goto kill; } if (!mle) { if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && res->owner != assert->node_idx) { mlog(ML_ERROR, "DIE! Mastery assert from %u, " "but current owner is %u! (%.*s)\n", assert->node_idx, res->owner, namelen, name); __dlm_print_one_lock_resource(res); BUG(); } } else if (mle->type != DLM_MLE_MIGRATION) { if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { /* owner is just re-asserting */ if (res->owner == assert->node_idx) { mlog(0, "owner %u re-asserting on " "lock %.*s\n", assert->node_idx, namelen, name); goto ok; } mlog(ML_ERROR, "got assert_master from " "node %u, but %u is the owner! " "(%.*s)\n", assert->node_idx, res->owner, namelen, name); goto kill; } if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { mlog(ML_ERROR, "got assert from %u, but lock " "with no owner should be " "in-progress! (%.*s)\n", assert->node_idx, namelen, name); goto kill; } } else /* mle->type == DLM_MLE_MIGRATION */ { /* should only be getting an assert from new master */ if (assert->node_idx != mle->new_master) { mlog(ML_ERROR, "got assert from %u, but " "new master is %u, and old master " "was %u (%.*s)\n", assert->node_idx, mle->new_master, mle->master, namelen, name); goto kill; } } ok: spin_unlock(&res->spinlock); } // mlog(0, "woo! got an assert_master from node %u!\n", // assert->node_idx); if (mle) { int extra_ref = 0; int nn = -1; int rr, err = 0; spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; else { /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) { master_request = 1; break; } } } mle->master = assert->node_idx; atomic_set(&mle->woken, 1); wake_up(&mle->wq); spin_unlock(&mle->spinlock); if (res) { int wake = 0; spin_lock(&res->spinlock); if (mle->type == DLM_MLE_MIGRATION) { mlog(0, "finishing off migration of lockres %.*s, " "from %u to %u\n", res->lockname.len, res->lockname.name, dlm->node_num, mle->new_master); res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; dlm_change_lockres_owner(dlm, res, mle->new_master); BUG_ON(res->state & DLM_LOCK_RES_DIRTY); } else { dlm_change_lockres_owner(dlm, res, mle->master); } spin_unlock(&res->spinlock); have_lockres_ref = 1; if (wake) wake_up(&res->wq); } /* master is known, detach if not already detached. * ensures that only one assert_master call will happen * on this mle. */ spin_lock(&dlm->master_lock); rr = kref_read(&mle->mle_refs); if (mle->inuse > 0) { if (extra_ref && rr < 3) err = 1; else if (!extra_ref && rr < 2) err = 1; } else { if (extra_ref && rr < 2) err = 1; else if (!extra_ref && rr < 1) err = 1; } if (err) { mlog(ML_ERROR, "%s:%.*s: got assert master from %u " "that will mess up this node, refs=%d, extra=%d, " "inuse=%d\n", dlm->name, namelen, name, assert->node_idx, rr, extra_ref, mle->inuse); dlm_print_one_mle(mle); } __dlm_unlink_mle(dlm, mle); __dlm_mle_detach_hb_events(dlm, mle); __dlm_put_mle(mle); if (extra_ref) { /* the assert master message now balances the extra * ref given by the master / migration request message. * if this is the last put, it will be removed * from the list. */ __dlm_put_mle(mle); } spin_unlock(&dlm->master_lock); } else if (res) { if (res->owner != assert->node_idx) { mlog(0, "assert_master from %u, but current " "owner is %u (%.*s), no mle\n", assert->node_idx, res->owner, namelen, name); } } spin_unlock(&dlm->spinlock); done: ret = 0; if (res) { spin_lock(&res->spinlock); res->state |= DLM_LOCK_RES_SETREF_INPROG; spin_unlock(&res->spinlock); *ret_data = (void *)res; } dlm_put(dlm); if (master_request) { mlog(0, "need to tell master to reassert\n"); /* positive. negative would shoot down the node. */ ret |= DLM_ASSERT_RESPONSE_REASSERT; if (!have_lockres_ref) { mlog(ML_ERROR, "strange, got assert from %u, MASTER " "mle present here for %s:%.*s, but no lockres!\n", assert->node_idx, dlm->name, namelen, name); } } if (have_lockres_ref) { /* let the master know we have a reference to the lockres */ ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; mlog(0, "%s:%.*s: got assert from %u, need a ref\n", dlm->name, namelen, name, assert->node_idx); } return ret; kill: /* kill the caller! */ mlog(ML_ERROR, "Bad message received from another node. Dumping state " "and killing the other node now! This node is OK and can continue.\n"); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_lock(&dlm->master_lock); if (mle) __dlm_put_mle(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } void dlm_assert_master_post_handler(int status, void *data, void *ret_data) { struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; if (ret_data) { spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_SETREF_INPROG; spin_unlock(&res->spinlock); wake_up(&res->wq); dlm_lockres_put(res); } return; } int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, int ignore_higher, u8 request_from, u32 flags) { struct dlm_work_item *item; item = kzalloc(sizeof(*item), GFP_ATOMIC); if (!item) return -ENOMEM; /* queue up work for dlm_assert_master_worker */ dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); item->u.am.lockres = res; /* already have a ref */ /* can optionally ignore node numbers higher than this node */ item->u.am.ignore_higher = ignore_higher; item->u.am.request_from = request_from; item->u.am.flags = flags; if (ignore_higher) mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); return 0; } static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) { struct dlm_ctxt *dlm = data; int ret = 0; struct dlm_lock_resource *res; unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; int ignore_higher; int bit; u8 request_from; u32 flags; dlm = item->dlm; res = item->u.am.lockres; ignore_higher = item->u.am.ignore_higher; request_from = item->u.am.request_from; flags = item->u.am.flags; spin_lock(&dlm->spinlock); bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES); spin_unlock(&dlm->spinlock); clear_bit(dlm->node_num, nodemap); if (ignore_higher) { /* if is this just to clear up mles for nodes below * this node, do not send the message to the original * caller or any node number higher than this */ clear_bit(request_from, nodemap); bit = dlm->node_num; while (1) { bit = find_next_bit(nodemap, O2NM_MAX_NODES, bit+1); if (bit >= O2NM_MAX_NODES) break; clear_bit(bit, nodemap); } } /* * If we're migrating this lock to someone else, we are no * longer allowed to assert out own mastery. OTOH, we need to * prevent migration from starting while we're still asserting * our dominance. The reserved ast delays migration. */ spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_MIGRATING) { mlog(0, "Someone asked us to assert mastery, but we're " "in the middle of migration. Skipping assert, " "the new master will handle that.\n"); spin_unlock(&res->spinlock); goto put; } else __dlm_lockres_reserve_ast(res); spin_unlock(&res->spinlock); /* this call now finishes out the nodemap * even if one or more nodes die */ mlog(0, "worker about to master %.*s here, this=%u\n", res->lockname.len, res->lockname.name, dlm->node_num); ret = dlm_do_assert_master(dlm, res, nodemap, flags); if (ret < 0) { /* no need to restart, we are done */ if (!dlm_is_host_down(ret)) mlog_errno(ret); } /* Ok, we've asserted ourselves. Let's let migration start. */ dlm_lockres_release_ast(dlm, res); put: dlm_lockres_drop_inflight_worker(dlm, res); dlm_lockres_put(res); mlog(0, "finished with dlm_assert_master_worker\n"); } /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. * We cannot wait for node recovery to complete to begin mastering this * lockres because this lockres is used to kick off recovery! ;-) * So, do a pre-check on all living nodes to see if any of those nodes * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources * mastered by dead nodes are purged when the heartbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { struct dlm_node_iter iter; int nodenum; int ret = 0; u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { /* do not send to self */ if (nodenum == dlm->node_num) continue; ret = dlm_do_master_requery(dlm, res, nodenum, &master); if (ret < 0) { mlog_errno(ret); if (!dlm_is_host_down(ret)) BUG(); /* host is down, so answer for that node would be * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ ret = 0; } if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { /* check to see if this master is in the recovery map */ spin_lock(&dlm->spinlock); if (test_bit(master, dlm->recovery_map)) { mlog(ML_NOTICE, "%s: node %u has not seen " "node %u go down yet, and thinks the " "dead node is mastering the recovery " "lock. must wait.\n", dlm->name, nodenum, master); ret = -EAGAIN; } spin_unlock(&dlm->spinlock); mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } } return ret; } /* * DLM_DEREF_LOCKRES_MSG */ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { struct dlm_deref_lockres deref; int ret = 0, r; const char *lockname; unsigned int namelen; lockname = res->lockname.name; namelen = res->lockname.len; BUG_ON(namelen > O2NM_MAX_NAME_LEN); memset(&deref, 0, sizeof(deref)); deref.node_idx = dlm->node_num; deref.namelen = namelen; memcpy(deref.name, lockname, namelen); ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, &deref, sizeof(deref), res->owner, &r); if (ret < 0) mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", dlm->name, namelen, lockname, ret, res->owner); else if (r < 0) { /* BAD. other node says I did not have a ref. */ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", dlm->name, namelen, lockname, res->owner, r); dlm_print_one_lock_resource(res); if (r == -ENOMEM) BUG(); } else ret = r; return ret; } int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; struct dlm_lock_resource *res = NULL; char *name; unsigned int namelen; int ret = -EINVAL; u8 node; unsigned int hash; struct dlm_work_item *item; int cleared = 0; int dispatch = 0; if (!dlm_grab(dlm)) return 0; name = deref->name; namelen = deref->namelen; node = deref->node_idx; if (namelen > DLM_LOCKID_NAME_MAX) { mlog(ML_ERROR, "Invalid name length!"); goto done; } if (deref->node_idx >= O2NM_MAX_NODES) { mlog(ML_ERROR, "Invalid node number: %u\n", node); goto done; } hash = dlm_lockid_hash(name, namelen); spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); if (!res) { spin_unlock(&dlm->spinlock); mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", dlm->name, namelen, name); goto done; } spin_unlock(&dlm->spinlock); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_SETREF_INPROG) dispatch = 1; else { BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); if (test_bit(node, res->refmap)) { dlm_lockres_clear_refmap_bit(dlm, res, node); cleared = 1; } } spin_unlock(&res->spinlock); if (!dispatch) { if (cleared) dlm_lockres_calc_usage(dlm, res); else { mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " "but it is already dropped!\n", dlm->name, res->lockname.len, res->lockname.name, node); dlm_print_one_lock_resource(res); } ret = DLM_DEREF_RESPONSE_DONE; goto done; } item = kzalloc(sizeof(*item), GFP_NOFS); if (!item) { ret = -ENOMEM; mlog_errno(ret); goto done; } dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); item->u.dl.deref_res = res; item->u.dl.deref_node = node; spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); return DLM_DEREF_RESPONSE_INPROG; done: if (res) dlm_lockres_put(res); dlm_put(dlm); return ret; } int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_deref_lockres_done *deref = (struct dlm_deref_lockres_done *)msg->buf; struct dlm_lock_resource *res = NULL; char *name; unsigned int namelen; int ret = -EINVAL; u8 node; unsigned int hash; if (!dlm_grab(dlm)) return 0; name = deref->name; namelen = deref->namelen; node = deref->node_idx; if (namelen > DLM_LOCKID_NAME_MAX) { mlog(ML_ERROR, "Invalid name length!"); goto done; } if (deref->node_idx >= O2NM_MAX_NODES) { mlog(ML_ERROR, "Invalid node number: %u\n", node); goto done; } hash = dlm_lockid_hash(name, namelen); spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); if (!res) { spin_unlock(&dlm->spinlock); mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", dlm->name, namelen, name); goto done; } spin_lock(&res->spinlock); if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) { spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done " "but it is already derefed!\n", dlm->name, res->lockname.len, res->lockname.name, node); ret = 0; goto done; } __dlm_do_purge_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); spin_unlock(&dlm->spinlock); ret = 0; done: if (res) dlm_lockres_put(res); dlm_put(dlm); return ret; } static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 node) { struct dlm_deref_lockres_done deref; int ret = 0, r; const char *lockname; unsigned int namelen; lockname = res->lockname.name; namelen = res->lockname.len; BUG_ON(namelen > O2NM_MAX_NAME_LEN); memset(&deref, 0, sizeof(deref)); deref.node_idx = dlm->node_num; deref.namelen = namelen; memcpy(deref.name, lockname, namelen); ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key, &deref, sizeof(deref), node, &r); if (ret < 0) { mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE " " to node %u\n", dlm->name, namelen, lockname, ret, node); } else if (r < 0) { /* ignore the error */ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", dlm->name, namelen, lockname, node, r); dlm_print_one_lock_resource(res); } } static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) { struct dlm_ctxt *dlm; struct dlm_lock_resource *res; u8 node; u8 cleared = 0; dlm = item->dlm; res = item->u.dl.deref_res; node = item->u.dl.deref_node; spin_lock(&res->spinlock); BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); if (test_bit(node, res->refmap)) { dlm_lockres_clear_refmap_bit(dlm, res, node); cleared = 1; } spin_unlock(&res->spinlock); dlm_drop_lockres_ref_done(dlm, res, node); if (cleared) { mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", dlm->name, res->lockname.len, res->lockname.name, node); dlm_lockres_calc_usage(dlm, res); } else { mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " "but it is already dropped!\n", dlm->name, res->lockname.len, res->lockname.name, node); dlm_print_one_lock_resource(res); } dlm_lockres_put(res); } /* * A migratable resource is one that is : * 1. locally mastered, and, * 2. zero local locks, and, * 3. one or more non-local locks, or, one or more references * Returns 1 if yes, 0 if not. */ static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { enum dlm_lockres_list idx; int nonlocal = 0, node_ref; struct list_head *queue; struct dlm_lock *lock; u64 cookie; assert_spin_locked(&res->spinlock); /* delay migration when the lockres is in MIGRATING state */ if (res->state & DLM_LOCK_RES_MIGRATING) return 0; /* delay migration when the lockres is in RECOCERING state */ if (res->state & (DLM_LOCK_RES_RECOVERING| DLM_LOCK_RES_RECOVERY_WAITING)) return 0; if (res->owner != dlm->node_num) return 0; for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { queue = dlm_list_idx_to_ptr(res, idx); list_for_each_entry(lock, queue, list) { if (lock->ml.node != dlm->node_num) { nonlocal++; continue; } cookie = be64_to_cpu(lock->ml.cookie); mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on " "%s list\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), dlm_list_in_text(idx)); return 0; } } if (!nonlocal) { node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES); if (node_ref >= O2NM_MAX_NODES) return 0; } mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len, res->lockname.name); return 1; } /* * DLM_MIGRATE_LOCKRES */ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 target) { struct dlm_master_list_entry *mle = NULL; struct dlm_master_list_entry *oldmle = NULL; struct dlm_migratable_lockres *mres = NULL; int ret = 0; const char *name; unsigned int namelen; int mle_added = 0; int wake = 0; if (!dlm_grab(dlm)) return -EINVAL; name = res->lockname.name; namelen = res->lockname.len; mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, target); /* preallocate up front. if this fails, abort */ ret = -ENOMEM; mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); if (!mres) { mlog_errno(ret); goto leave; } mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!mle) { mlog_errno(ret); goto leave; } ret = 0; /* * clear any existing master requests and * add the migration mle to the list */ spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, namelen, target, dlm->node_num); /* get an extra reference on the mle. * otherwise the assert_master from the new * master will destroy this. */ if (ret != -EEXIST) dlm_get_mle_inuse(mle); spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); if (ret == -EEXIST) { mlog(0, "another process is already migrating it\n"); goto fail; } mle_added = 1; /* * set the MIGRATING flag and flush asts * if we fail after this we need to re-dirty the lockres */ if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { mlog(ML_ERROR, "tried to migrate %.*s to %u, but " "the target went down.\n", res->lockname.len, res->lockname.name, target); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; spin_unlock(&res->spinlock); ret = -EINVAL; } fail: if (ret != -EEXIST && oldmle) { /* master is known, detach if not already detached */ dlm_mle_detach_hb_events(dlm, oldmle); dlm_put_mle(oldmle); } if (ret < 0) { if (mle_added) { dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); dlm_put_mle_inuse(mle); } else if (mle) { kmem_cache_free(dlm_mle_cache, mle); mle = NULL; } goto leave; } /* * at this point, we have a migration target, an mle * in the master list, and the MIGRATING flag set on * the lockres */ /* now that remote nodes are spinning on the MIGRATING flag, * ensure that all assert_master work is flushed. */ flush_workqueue(dlm->dlm_worker); /* notify new node and send all lock state */ /* call send_one_lockres with migration flag. * this serves as notice to the target node that a * migration is starting. */ ret = dlm_send_one_lockres(dlm, res, mres, target, DLM_MRES_MIGRATION); if (ret < 0) { mlog(0, "migration to node %u failed with %d\n", target, ret); /* migration failed, detach and clean up mle */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); dlm_put_mle_inuse(mle); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; spin_unlock(&res->spinlock); if (dlm_is_host_down(ret)) dlm_wait_for_node_death(dlm, target, DLM_NODE_DEATH_WAIT_MAX); goto leave; } /* at this point, the target sends a message to all nodes, * (using dlm_do_migrate_request). this node is skipped since * we had to put an mle in the list to begin the process. this * node now waits for target to do an assert master. this node * will be the last one notified, ensuring that the migration * is complete everywhere. if the target dies while this is * going on, some nodes could potentially see the target as the * master, so it is important that my recovery finds the migration * mle and sets the master to UNKNOWN. */ /* wait for new node to assert master */ while (1) { ret = wait_event_interruptible_timeout(mle->wq, (atomic_read(&mle->woken) == 1), msecs_to_jiffies(5000)); if (ret >= 0) { if (atomic_read(&mle->woken) == 1 || res->owner == target) break; mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " "target %u is no longer up, restarting\n", dlm->name, res->lockname.len, res->lockname.name, target); ret = -EINVAL; /* migration failed, detach and clean up mle */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle(mle); dlm_put_mle_inuse(mle); spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; spin_unlock(&res->spinlock); goto leave; } } else mlog(0, "%s:%.*s: caught signal during migration\n", dlm->name, res->lockname.len, res->lockname.name); } /* all done, set the owner, clear the flag */ spin_lock(&res->spinlock); dlm_set_lockres_owner(dlm, res, target); res->state &= ~DLM_LOCK_RES_MIGRATING; dlm_remove_nonlocal_locks(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); /* master is known, detach if not already detached */ dlm_mle_detach_hb_events(dlm, mle); dlm_put_mle_inuse(mle); ret = 0; dlm_lockres_calc_usage(dlm, res); leave: /* re-dirty the lockres if we failed */ if (ret < 0) dlm_kick_thread(dlm, res); /* wake up waiters if the MIGRATING flag got set * but migration failed */ if (wake) wake_up(&res->wq); if (mres) free_page((unsigned long)mres); dlm_put(dlm); mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, name, target, ret); return ret; } /* * Should be called only after beginning the domain leave process. * There should not be any remaining locks on nonlocal lock resources, * and there should be no local locks left on locally mastered resources. * * Called with the dlm spinlock held, may drop it to do migration, but * will re-acquire before exit. * * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) __must_hold(&dlm->spinlock) { int ret; int lock_dropped = 0; u8 target = O2NM_MAX_NODES; assert_spin_locked(&dlm->spinlock); spin_lock(&res->spinlock); if (dlm_is_lockres_migratable(dlm, res)) target = dlm_pick_migration_target(dlm, res); spin_unlock(&res->spinlock); if (target == O2NM_MAX_NODES) goto leave; /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ spin_unlock(&dlm->spinlock); lock_dropped = 1; ret = dlm_migrate_lockres(dlm, res, target); if (ret) mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", dlm->name, res->lockname.len, res->lockname.name, target, ret); spin_lock(&dlm->spinlock); leave: return lock_dropped; } int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) { int ret; spin_lock(&dlm->ast_lock); spin_lock(&lock->spinlock); ret = (list_empty(&lock->bast_list) && !lock->bast_pending); spin_unlock(&lock->spinlock); spin_unlock(&dlm->ast_lock); return ret; } static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 mig_target) { int can_proceed; spin_lock(&res->spinlock); can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) can_proceed = 1; spin_unlock(&dlm->spinlock); return can_proceed; } static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int ret; spin_lock(&res->spinlock); ret = !!(res->state & DLM_LOCK_RES_DIRTY); spin_unlock(&res->spinlock); return ret; } static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 target) { int ret = 0; mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", res->lockname.len, res->lockname.name, dlm->node_num, target); /* need to set MIGRATING flag on lockres. this is done by * ensuring that all asts have been flushed for this lockres. */ spin_lock(&res->spinlock); BUG_ON(res->migration_pending); res->migration_pending = 1; /* strategy is to reserve an extra ast then release * it below, letting the release do all of the work */ __dlm_lockres_reserve_ast(res); spin_unlock(&res->spinlock); /* now flush all the pending asts */ dlm_kick_thread(dlm, res); /* before waiting on DIRTY, block processes which may * try to dirty the lockres before MIGRATING is set */ spin_lock(&res->spinlock); BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); res->state |= DLM_LOCK_RES_BLOCK_DIRTY; spin_unlock(&res->spinlock); /* now wait on any pending asts and the DIRTY state */ wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); dlm_lockres_release_ast(dlm, res); mlog(0, "about to wait on migration_wq, dirty=%s\n", res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); /* if the extra ref we just put was the final one, this * will pass thru immediately. otherwise, we need to wait * for the last ast to finish. */ again: ret = wait_event_interruptible_timeout(dlm->migration_wq, dlm_migration_can_proceed(dlm, res, target), msecs_to_jiffies(1000)); if (ret < 0) { mlog(0, "woken again: migrating? %s, dead? %s\n", res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", test_bit(target, dlm->domain_map) ? "no":"yes"); } else { mlog(0, "all is well: migrating? %s, dead? %s\n", res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", test_bit(target, dlm->domain_map) ? "no":"yes"); } if (!dlm_migration_can_proceed(dlm, res, target)) { mlog(0, "trying again...\n"); goto again; } ret = 0; /* did the target go down or die? */ spin_lock(&dlm->spinlock); if (!test_bit(target, dlm->domain_map)) { mlog(ML_ERROR, "aha. migration target %u just went down\n", target); ret = -EHOSTDOWN; } spin_unlock(&dlm->spinlock); /* * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for * another try; otherwise, we are sure the MIGRATING state is there, * drop the unneeded state which blocked threads trying to DIRTY */ spin_lock(&res->spinlock); BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; if (!ret) BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); else res->migration_pending = 0; spin_unlock(&res->spinlock); /* * at this point: * * o the DLM_LOCK_RES_MIGRATING flag is set if target not down * o there are no pending asts on this lockres * o all processes trying to reserve an ast on this * lockres must wait for the MIGRATING flag to clear */ return ret; } /* last step in the migration process. * original master calls this to free all of the dlm_lock * structures that used to be for other nodes. */ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { struct list_head *queue = &res->granted; int i, bit; struct dlm_lock *lock, *next; assert_spin_locked(&res->spinlock); BUG_ON(res->owner == dlm->node_num); for (i=0; i<3; i++) { list_for_each_entry_safe(lock, next, queue, list) { if (lock->ml.node != dlm->node_num) { mlog(0, "putting lock for node %u\n", lock->ml.node); /* be extra careful */ BUG_ON(!list_empty(&lock->ast_list)); BUG_ON(!list_empty(&lock->bast_list)); BUG_ON(lock->ast_pending); BUG_ON(lock->bast_pending); dlm_lockres_clear_refmap_bit(dlm, res, lock->ml.node); list_del_init(&lock->list); dlm_lock_put(lock); /* In a normal unlock, we would have added a * DLM_UNLOCK_FREE_LOCK action. Force it. */ dlm_lock_put(lock); } } queue++; } bit = 0; while (1) { bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); if (bit >= O2NM_MAX_NODES) break; /* do not clear the local node reference, if there is a * process holding this, let it drop the ref itself */ if (bit != dlm->node_num) { mlog(0, "%s:%.*s: node %u had a ref to this " "migrating lockres, clearing\n", dlm->name, res->lockname.len, res->lockname.name, bit); dlm_lockres_clear_refmap_bit(dlm, res, bit); } bit++; } } /* * Pick a node to migrate the lock resource to. This function selects a * potential target based first on the locks and then on refmap. It skips * nodes that are in the process of exiting the domain. */ static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { enum dlm_lockres_list idx; struct list_head *queue; struct dlm_lock *lock; int noderef; u8 nodenum = O2NM_MAX_NODES; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); /* Go through all the locks */ for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { queue = dlm_list_idx_to_ptr(res, idx); list_for_each_entry(lock, queue, list) { if (lock->ml.node == dlm->node_num) continue; if (test_bit(lock->ml.node, dlm->exit_domain_map)) continue; nodenum = lock->ml.node; goto bail; } } /* Go thru the refmap */ noderef = -1; while (1) { noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, noderef + 1); if (noderef >= O2NM_MAX_NODES) break; if (noderef == dlm->node_num) continue; if (test_bit(noderef, dlm->exit_domain_map)) continue; nodenum = noderef; goto bail; } bail: return nodenum; } /* this is called by the new master once all lockres * data has been received */ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 master, u8 new_master, struct dlm_node_iter *iter) { struct dlm_migrate_request migrate; int ret, skip, status = 0; int nodenum; memset(&migrate, 0, sizeof(migrate)); migrate.namelen = res->lockname.len; memcpy(migrate.name, res->lockname.name, migrate.namelen); migrate.new_master = new_master; migrate.master = master; ret = 0; /* send message to all nodes, except the master and myself */ while ((nodenum = dlm_node_iter_next(iter)) >= 0) { if (nodenum == master || nodenum == new_master) continue; /* We could race exit domain. If exited, skip. */ spin_lock(&dlm->spinlock); skip = (!test_bit(nodenum, dlm->domain_map)); spin_unlock(&dlm->spinlock); if (skip) { clear_bit(nodenum, iter->node_map); continue; } ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, &migrate, sizeof(migrate), nodenum, &status); if (ret < 0) { mlog(ML_ERROR, "%s: res %.*s, Error %d send " "MIGRATE_REQUEST to node %u\n", dlm->name, migrate.namelen, migrate.name, ret, nodenum); if (!dlm_is_host_down(ret)) { mlog(ML_ERROR, "unhandled error=%d!\n", ret); BUG(); } clear_bit(nodenum, iter->node_map); ret = 0; } else if (status < 0) { mlog(0, "migrate request (node %u) returned %d!\n", nodenum, status); ret = status; } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { /* during the migration request we short-circuited * the mastery of the lockres. make sure we have * a mastery ref for nodenum */ mlog(0, "%s:%.*s: need ref for node %u\n", dlm->name, res->lockname.len, res->lockname.name, nodenum); spin_lock(&res->spinlock); dlm_lockres_set_refmap_bit(dlm, res, nodenum); spin_unlock(&res->spinlock); } } if (ret < 0) mlog_errno(ret); mlog(0, "returning ret=%d\n", ret); return ret; } /* if there is an existing mle for this lockres, we now know who the master is. * (the one who sent us *this* message) we can clear it up right away. * since the process that put the mle on the list still has a reference to it, * we can unhash it now, set the master and wake the process. as a result, * we will have no mle in the list to start with. now we can add an mle for * the migration and this should be the only one found for those scanning the * list. */ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_lock_resource *res = NULL; struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; const char *name; unsigned int namelen, hash; int ret = 0; if (!dlm_grab(dlm)) return 0; name = migrate->name; namelen = migrate->namelen; hash = dlm_lockid_hash(name, namelen); /* preallocate.. if this fails, abort */ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); if (!mle) { ret = -ENOMEM; goto leave; } /* check for pre-existing lock */ spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, name, namelen, hash); if (res) { spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { /* if all is working ok, this can only mean that we got * a migrate request from a node that we now see as * dead. what can we do here? drop it to the floor? */ spin_unlock(&res->spinlock); mlog(ML_ERROR, "Got a migrate request, but the " "lockres is marked as recovering!"); kmem_cache_free(dlm_mle_cache, mle); ret = -EINVAL; /* need a better solution */ goto unlock; } res->state |= DLM_LOCK_RES_MIGRATING; spin_unlock(&res->spinlock); } spin_lock(&dlm->master_lock); /* ignore status. only nonzero status would BUG. */ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, namelen, migrate->new_master, migrate->master); if (ret < 0) kmem_cache_free(dlm_mle_cache, mle); spin_unlock(&dlm->master_lock); unlock: spin_unlock(&dlm->spinlock); if (oldmle) { /* master is known, detach if not already detached */ dlm_mle_detach_hb_events(dlm, oldmle); dlm_put_mle(oldmle); } if (res) dlm_lockres_put(res); leave: dlm_put(dlm); return ret; } /* must be holding dlm->spinlock and dlm->master_lock * when adding a migration mle, we can clear any other mles * in the master list because we know with certainty that * the master is "master". so we remove any old mle from * the list after setting it's master field, and then add * the new migration mle. this way we can hold with the rule * of having only one mle for a given lock name at all times. */ static int dlm_add_migration_mle(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_master_list_entry *mle, struct dlm_master_list_entry **oldmle, const char *name, unsigned int namelen, u8 new_master, u8 master) { int found; int ret = 0; *oldmle = NULL; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->master_lock); /* caller is responsible for any ref taken here on oldmle */ found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); if (found) { struct dlm_master_list_entry *tmp = *oldmle; spin_lock(&tmp->spinlock); if (tmp->type == DLM_MLE_MIGRATION) { if (master == dlm->node_num) { /* ah another process raced me to it */ mlog(0, "tried to migrate %.*s, but some " "process beat me to it\n", namelen, name); spin_unlock(&tmp->spinlock); return -EEXIST; } else { /* bad. 2 NODES are trying to migrate! */ mlog(ML_ERROR, "migration error mle: " "master=%u new_master=%u // request: " "master=%u new_master=%u // " "lockres=%.*s\n", tmp->master, tmp->new_master, master, new_master, namelen, name); BUG(); } } else { /* this is essentially what assert_master does */ tmp->master = master; atomic_set(&tmp->woken, 1); wake_up(&tmp->wq); /* remove it so that only one mle will be found */ __dlm_unlink_mle(dlm, tmp); __dlm_mle_detach_hb_events(dlm, tmp); if (tmp->type == DLM_MLE_MASTER) { ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; mlog(0, "%s:%.*s: master=%u, newmaster=%u, " "telling master to get ref " "for cleared out mle during " "migration\n", dlm->name, namelen, name, master, new_master); } } spin_unlock(&tmp->spinlock); } /* now add a migration mle to the tail of the list */ dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); mle->new_master = new_master; /* the new master will be sending an assert master for this. * at that point we will get the refmap reference */ mle->master = master; /* do this for consistency with other mle types */ set_bit(new_master, mle->maybe_map); __dlm_insert_mle(dlm, mle); return ret; } /* * Sets the owner of the lockres, associated to the mle, to UNKNOWN */ static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { struct dlm_lock_resource *res; /* Find the lockres associated to the mle and set its owner to UNK */ res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, mle->mnamehash); if (res) { spin_unlock(&dlm->master_lock); /* move lockres onto recovery list */ spin_lock(&res->spinlock); dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); dlm_move_lockres_to_recovery_list(dlm, res); spin_unlock(&res->spinlock); dlm_lockres_put(res); /* about to get rid of mle, detach from heartbeat */ __dlm_mle_detach_hb_events(dlm, mle); /* dump the mle */ spin_lock(&dlm->master_lock); __dlm_put_mle(mle); spin_unlock(&dlm->master_lock); } return res; } static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) { __dlm_mle_detach_hb_events(dlm, mle); spin_lock(&mle->spinlock); __dlm_unlink_mle(dlm, mle); atomic_set(&mle->woken, 1); spin_unlock(&mle->spinlock); wake_up(&mle->wq); } static void dlm_clean_block_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle, u8 dead_node) { int bit; BUG_ON(mle->type != DLM_MLE_BLOCK); spin_lock(&mle->spinlock); bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES); if (bit != dead_node) { mlog(0, "mle found, but dead node %u would not have been " "master\n", dead_node); spin_unlock(&mle->spinlock); } else { /* Must drop the refcount by one since the assert_master will * never arrive. This may result in the mle being unlinked and * freed, but there may still be a process waiting in the * dlmlock path which is fine. */ mlog(0, "node %u was expected master\n", dead_node); atomic_set(&mle->woken, 1); spin_unlock(&mle->spinlock); wake_up(&mle->wq); /* Do not need events any longer, so detach from heartbeat */ __dlm_mle_detach_hb_events(dlm, mle); __dlm_put_mle(mle); } } void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_master_list_entry *mle; struct dlm_lock_resource *res; struct hlist_head *bucket; struct hlist_node *tmp; unsigned int i; mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); top: assert_spin_locked(&dlm->spinlock); /* clean the master list */ spin_lock(&dlm->master_lock); for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_master_hash(dlm, i); hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { BUG_ON(mle->type != DLM_MLE_BLOCK && mle->type != DLM_MLE_MASTER && mle->type != DLM_MLE_MIGRATION); /* MASTER mles are initiated locally. The waiting * process will notice the node map change shortly. * Let that happen as normal. */ if (mle->type == DLM_MLE_MASTER) continue; /* BLOCK mles are initiated by other nodes. Need to * clean up if the dead node would have been the * master. */ if (mle->type == DLM_MLE_BLOCK) { dlm_clean_block_mle(dlm, mle, dead_node); continue; } /* Everything else is a MIGRATION mle */ /* The rule for MIGRATION mles is that the master * becomes UNKNOWN if *either* the original or the new * master dies. All UNKNOWN lockres' are sent to * whichever node becomes the recovery master. The new * master is responsible for determining if there is * still a master for this lockres, or if he needs to * take over mastery. Either way, this node should * expect another message to resolve this. */ if (mle->master != dead_node && mle->new_master != dead_node) continue; if (mle->new_master == dead_node && mle->inuse) { mlog(ML_NOTICE, "%s: target %u died during " "migration from %u, the MLE is " "still keep used, ignore it!\n", dlm->name, dead_node, mle->master); continue; } /* If we have reached this point, this mle needs to be * removed from the list and freed. */ dlm_clean_migration_mle(dlm, mle); mlog(0, "%s: node %u died during migration from " "%u to %u!\n", dlm->name, dead_node, mle->master, mle->new_master); /* If we find a lockres associated with the mle, we've * hit this rare case that messes up our lock ordering. * If so, we need to drop the master lock so that we can * take the lockres lock, meaning that we will have to * restart from the head of list. */ res = dlm_reset_mleres_owner(dlm, mle); if (res) /* restart */ goto top; /* This may be the last reference */ __dlm_put_mle(mle); } } spin_unlock(&dlm->master_lock); } int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 old_master) { struct dlm_node_iter iter; int ret = 0; spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); clear_bit(old_master, iter.node_map); clear_bit(dlm->node_num, iter.node_map); spin_unlock(&dlm->spinlock); /* ownership of the lockres is changing. account for the * mastery reference here since old_master will briefly have * a reference after the migration completes */ spin_lock(&res->spinlock); dlm_lockres_set_refmap_bit(dlm, res, old_master); spin_unlock(&res->spinlock); mlog(0, "now time to do a migrate request to other nodes\n"); ret = dlm_do_migrate_request(dlm, res, old_master, dlm->node_num, &iter); if (ret < 0) { mlog_errno(ret); goto leave; } mlog(0, "doing assert master of %.*s to all except the original node\n", res->lockname.len, res->lockname.name); /* this call now finishes out the nodemap * even if one or more nodes die */ ret = dlm_do_assert_master(dlm, res, iter.node_map, DLM_ASSERT_MASTER_FINISH_MIGRATION); if (ret < 0) { /* no longer need to retry. all living nodes contacted. */ mlog_errno(ret); ret = 0; } bitmap_zero(iter.node_map, O2NM_MAX_NODES); set_bit(old_master, iter.node_map); mlog(0, "doing assert master of %.*s back to %u\n", res->lockname.len, res->lockname.name, old_master); ret = dlm_do_assert_master(dlm, res, iter.node_map, DLM_ASSERT_MASTER_FINISH_MIGRATION); if (ret < 0) { mlog(0, "assert master to original master failed " "with %d.\n", ret); /* the only nonzero status here would be because of * a dead original node. we're done. */ ret = 0; } /* all done, set the owner, clear the flag */ spin_lock(&res->spinlock); dlm_set_lockres_owner(dlm, res, dlm->node_num); res->state &= ~DLM_LOCK_RES_MIGRATING; spin_unlock(&res->spinlock); /* re-dirty it on the new master */ dlm_kick_thread(dlm, res); wake_up(&res->wq); leave: return ret; } /* * LOCKRES AST REFCOUNT * this is integral to migration */ /* for future intent to call an ast, reserve one ahead of time. * this should be called only after waiting on the lockres * with dlm_wait_on_lockres, and while still holding the * spinlock after the call. */ void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); if (res->state & DLM_LOCK_RES_MIGRATING) { __dlm_print_one_lock_resource(res); } BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); atomic_inc(&res->asts_reserved); } /* * used to drop the reserved ast, either because it went unused, * or because the ast/bast was actually called. * * also, if there is a pending migration on this lockres, * and this was the last pending ast on the lockres, * atomically set the MIGRATING flag before we drop the lock. * this is how we ensure that migration can proceed with no * asts in progress. note that it is ok if the state of the * queues is such that a lock should be granted in the future * or that a bast should be fired, because the new master will * shuffle the lists on this lockres as soon as it is migrated. */ void dlm_lockres_release_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) return; if (!res->migration_pending) { spin_unlock(&res->spinlock); return; } BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); res->migration_pending = 0; res->state |= DLM_LOCK_RES_MIGRATING; spin_unlock(&res->spinlock); wake_up(&res->wq); wake_up(&dlm->migration_wq); } void dlm_force_free_mles(struct dlm_ctxt *dlm) { int i; struct hlist_head *bucket; struct dlm_master_list_entry *mle; struct hlist_node *tmp; /* * We notified all other nodes that we are exiting the domain and * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still * around we force free them and wake any processes that are waiting * on the mles */ spin_lock(&dlm->spinlock); spin_lock(&dlm->master_lock); BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES)); for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_master_hash(dlm, i); hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { if (mle->type != DLM_MLE_BLOCK) { mlog(ML_ERROR, "bad mle: %p\n", mle); dlm_print_one_mle(mle); } atomic_set(&mle->woken, 1); wake_up(&mle->wq); __dlm_unlink_mle(dlm, mle); __dlm_mle_detach_hb_events(dlm, mle); __dlm_put_mle(mle); } } spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SCTP_H #define _NF_CONNTRACK_SCTP_H /* SCTP tracking. */ #include <uapi/linux/netfilter/nf_conntrack_sctp.h> struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; u8 init[IP_CT_DIR_MAX]; u8 last_dir; u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Synopsys G210 Test Chip driver * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * * Authors: Joao Pinto <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include "ufshcd-pltfrm.h" #include "ufshcd-dwc.h" #include "tc-dwc-g210.h" /* * UFS DWC specific variant operations */ static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = { .name = "tc-dwc-g210-pltfm", .link_startup_notify = ufshcd_dwc_link_startup_notify, .phy_initialization = tc_dwc_g210_config_20_bit, }; static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = { .name = "tc-dwc-g210-pltfm", .link_startup_notify = ufshcd_dwc_link_startup_notify, .phy_initialization = tc_dwc_g210_config_40_bit, }; static const struct of_device_id tc_dwc_g210_pltfm_match[] = { { .compatible = "snps,g210-tc-6.00-20bit", .data = &tc_dwc_g210_20bit_pltfm_hba_vops, }, { .compatible = "snps,g210-tc-6.00-40bit", .data = &tc_dwc_g210_40bit_pltfm_hba_vops, }, { }, }; MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match); /** * tc_dwc_g210_pltfm_probe() * @pdev: pointer to platform device structure * */ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) { int err; const struct of_device_id *of_id; struct ufs_hba_variant_ops *vops; struct device *dev = &pdev->dev; of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node); vops = (struct ufs_hba_variant_ops *)of_id->data; /* Perform generic probe */ err = ufshcd_pltfrm_init(pdev, vops); if (err) dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); return err; } /** * tc_dwc_g210_pltfm_remove() * @pdev: pointer to platform device structure * */ static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev) { ufshcd_pltfrm_remove(pdev); } static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) }; static struct platform_driver tc_dwc_g210_pltfm_driver = { .probe = tc_dwc_g210_pltfm_probe, .remove = tc_dwc_g210_pltfm_remove, .driver = { .name = "tc-dwc-g210-pltfm", .pm = &tc_dwc_g210_pltfm_pm_ops, .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match), }, }; module_platform_driver(tc_dwc_g210_pltfm_driver); MODULE_ALIAS("platform:tc-dwc-g210-pltfm"); MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver"); MODULE_AUTHOR("Joao Pinto <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_CSC_TYPES_H #define __IA_CSS_CSC_TYPES_H /* @file * CSS-API header file for Color Space Conversion parameters. */ /* Color Correction configuration. * * This structure is used for 3 cases. * ("YCgCo" is the output format of Demosaic.) * * 1. Color Space Conversion (YCgCo to YUV) for ISP1. * ISP block: CSC1 (Color Space Conversion) * struct ia_css_cc_config *cc_config * * 2. Color Correction Matrix (YCgCo to RGB) for ISP2. * ISP block: CCM2 (Color Correction Matrix) * struct ia_css_cc_config *yuv2rgb_cc_config * * 3. Color Space Conversion (RGB to YUV) for ISP2. * ISP block: CSC2 (Color Space Conversion) * struct ia_css_cc_config *rgb2yuv_cc_config * * default/ineffective: * 1. YCgCo -> YUV * 1 0.174 0.185 * 0 -0.66252 -0.66874 * 0 -0.83738 0.58131 * * fraction_bits = 12 * 4096 713 758 * 0 -2714 -2739 * 0 -3430 2381 * * 2. YCgCo -> RGB * 1 -1 1 * 1 1 0 * 1 -1 -1 * * fraction_bits = 12 * 4096 -4096 4096 * 4096 4096 0 * 4096 -4096 -4096 * * 3. RGB -> YUV * 0.299 0.587 0.114 * -0.16874 -0.33126 0.5 * 0.5 -0.41869 -0.08131 * * fraction_bits = 13 * 2449 4809 934 * -1382 -2714 4096 * 4096 -3430 -666 */ struct ia_css_cc_config { u32 fraction_bits;/** Fractional bits of matrix. u8.0, [0,13] */ s32 matrix[3 * 3]; /** Conversion matrix. s[13-fraction_bits].[fraction_bits], [-8192,8191] */ }; #endif /* __IA_CSS_CSC_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 - 2018 Intel Corporation. */ #ifndef DEF_RVTCQ_H #define DEF_RVTCQ_H #include <rdma/rdma_vt.h> #include <rdma/rdmavt_cq.h> int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct uverbs_attr_bundle *attrs); int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int rvt_driver_cq_init(void); void rvt_cq_exit(void); #endif /* DEF_RVTCQ_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AMD MP2 common macros and structures * * Copyright (c) 2022, Advanced Micro Devices, Inc. * All Rights Reserved. * * Author: Basavaraj Natikar <[email protected]> */ #ifndef AMD_SFH_COMMON_H #define AMD_SFH_COMMON_H #include <linux/pci.h> #include "amd_sfh_hid.h" #define PCI_DEVICE_ID_AMD_MP2 0x15E4 #define PCI_DEVICE_ID_AMD_MP2_1_1 0x164A #define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4)) #define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4)) #define AMD_C2P_MSG_V1(regno) (0x10900 + ((regno) * 4)) #define AMD_P2C_MSG_V1(regno) (0x10500 + ((regno) * 4)) #define SENSOR_ENABLED 4 #define SENSOR_DISABLED 5 #define AMD_SFH_IDLE_LOOP 200 enum cmd_id { NO_OP, ENABLE_SENSOR, DISABLE_SENSOR, STOP_ALL_SENSORS = 8, }; struct amd_mp2_sensor_info { u8 sensor_idx; u32 period; dma_addr_t dma_address; }; struct sfh_dev_status { bool is_hpd_present; bool is_als_present; }; struct amd_mp2_dev { struct pci_dev *pdev; struct amdtp_cl_data *cl_data; void __iomem *mmio; void __iomem *vsbase; const struct amd_sfh1_1_ops *sfh1_1_ops; struct amd_mp2_ops *mp2_ops; struct amd_input_data in_data; /* mp2 active control status */ u32 mp2_acs; struct sfh_dev_status dev_en; struct work_struct work; u8 init_done; u8 rver; }; struct amd_mp2_ops { void (*start)(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info); void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx); void (*stop_all)(struct amd_mp2_dev *privdata); int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts); void (*clear_intr)(struct amd_mp2_dev *privdata); int (*init_intr)(struct amd_mp2_dev *privdata); int (*discovery_status)(struct amd_mp2_dev *privdata); void (*suspend)(struct amd_mp2_dev *mp2); void (*resume)(struct amd_mp2_dev *mp2); void (*remove)(void *privdata); int (*get_rep_desc)(int sensor_idx, u8 rep_desc[]); u32 (*get_desc_sz)(int sensor_idx, int descriptor_name); u8 (*get_feat_rep)(int sensor_idx, int report_id, u8 *feature_report); u8 (*get_in_rep)(u8 current_index, int sensor_idx, int report_id, struct amd_input_data *in_data); }; void amd_sfh_work(struct work_struct *work); void amd_sfh_work_buffer(struct work_struct *work); void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata); int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata); void amd_sfh_clear_intr(struct amd_mp2_dev *privdata); int amd_sfh_irq_init(struct amd_mp2_dev *privdata); static inline u64 amd_get_c2p_val(struct amd_mp2_dev *mp2, u32 idx) { return mp2->rver == 1 ? AMD_C2P_MSG_V1(idx) : AMD_C2P_MSG(idx); } static inline u64 amd_get_p2c_val(struct amd_mp2_dev *mp2, u32 idx) { return mp2->rver == 1 ? AMD_P2C_MSG_V1(idx) : AMD_P2C_MSG(idx); } #endif
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Device Tree file for MikroTik CRS328-4C-20S-4S+ Bit board * * Copyright (C) 2020 Sartura Ltd. * Author: Luka Kovacic <[email protected]> */ #include "armada-xp-crs328-4c-20s-4s.dtsi" / { model = "MikroTik CRS328-4C-20S-4S+ Bit"; }; &spi0 { status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "jedec,spi-nor"; reg = <0>; /* Chip select 0 */ spi-max-frequency = <108000000>; m25p,fast-read; partition@u-boot { reg = <0x00000000 0x001f0000>; label = "u-boot"; }; partition@u-boot-env { reg = <0x001f0000 0x00010000>; label = "u-boot-env"; }; partition@ubi1 { reg = <0x00200000 0x03f00000>; label = "ubi1"; }; partition@ubi2 { reg = <0x04100000 0x03f00000>; label = "ubi2"; }; }; };
/* * arch/powerpc/kernel/mpic.c * * Driver for interrupt controllers following the OpenPIC standard, the * common implementation being IBM's MPIC. This driver also can deal * with various broken implementations of this HW. * * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * Copyright 2010-2012 Freescale Semiconductor, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #undef DEBUG #undef DEBUG_IPI #undef DEBUG_IRQ #undef DEBUG_LOW #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/ptrace.h> #include <asm/signal.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/machdep.h> #include <asm/mpic.h> #include <asm/smp.h> #include "mpic.h" #ifdef DEBUG #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif const struct bus_type mpic_subsys = { .name = "mpic", .dev_name = "mpic", }; EXPORT_SYMBOL_GPL(mpic_subsys); static struct mpic *mpics; static struct mpic *mpic_primary; static DEFINE_RAW_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS #define distribute_irqs (1) #else #define distribute_irqs (0) #endif #endif #ifdef CONFIG_MPIC_WEIRD static u32 mpic_infos[][MPIC_IDX_END] = { [0] = { /* Original OpenPIC compatible MPIC */ MPIC_GREG_BASE, MPIC_GREG_FEATURE_0, MPIC_GREG_GLOBAL_CONF_0, MPIC_GREG_VENDOR_ID, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_GREG_IPI_STRIDE, MPIC_GREG_SPURIOUS, MPIC_GREG_TIMER_FREQ, MPIC_TIMER_BASE, MPIC_TIMER_STRIDE, MPIC_TIMER_CURRENT_CNT, MPIC_TIMER_BASE_CNT, MPIC_TIMER_VECTOR_PRI, MPIC_TIMER_DESTINATION, MPIC_CPU_BASE, MPIC_CPU_STRIDE, MPIC_CPU_IPI_DISPATCH_0, MPIC_CPU_IPI_DISPATCH_STRIDE, MPIC_CPU_CURRENT_TASK_PRI, MPIC_CPU_WHOAMI, MPIC_CPU_INTACK, MPIC_CPU_EOI, MPIC_CPU_MCACK, MPIC_IRQ_BASE, MPIC_IRQ_STRIDE, MPIC_IRQ_VECTOR_PRI, MPIC_VECPRI_VECTOR_MASK, MPIC_VECPRI_POLARITY_POSITIVE, MPIC_VECPRI_POLARITY_NEGATIVE, MPIC_VECPRI_SENSE_LEVEL, MPIC_VECPRI_SENSE_EDGE, MPIC_VECPRI_POLARITY_MASK, MPIC_VECPRI_SENSE_MASK, MPIC_IRQ_DESTINATION }, [1] = { /* Tsi108/109 PIC */ TSI108_GREG_BASE, TSI108_GREG_FEATURE_0, TSI108_GREG_GLOBAL_CONF_0, TSI108_GREG_VENDOR_ID, TSI108_GREG_IPI_VECTOR_PRI_0, TSI108_GREG_IPI_STRIDE, TSI108_GREG_SPURIOUS, TSI108_GREG_TIMER_FREQ, TSI108_TIMER_BASE, TSI108_TIMER_STRIDE, TSI108_TIMER_CURRENT_CNT, TSI108_TIMER_BASE_CNT, TSI108_TIMER_VECTOR_PRI, TSI108_TIMER_DESTINATION, TSI108_CPU_BASE, TSI108_CPU_STRIDE, TSI108_CPU_IPI_DISPATCH_0, TSI108_CPU_IPI_DISPATCH_STRIDE, TSI108_CPU_CURRENT_TASK_PRI, TSI108_CPU_WHOAMI, TSI108_CPU_INTACK, TSI108_CPU_EOI, TSI108_CPU_MCACK, TSI108_IRQ_BASE, TSI108_IRQ_STRIDE, TSI108_IRQ_VECTOR_PRI, TSI108_VECPRI_VECTOR_MASK, TSI108_VECPRI_POLARITY_POSITIVE, TSI108_VECPRI_POLARITY_NEGATIVE, TSI108_VECPRI_SENSE_LEVEL, TSI108_VECPRI_SENSE_EDGE, TSI108_VECPRI_POLARITY_MASK, TSI108_VECPRI_SENSE_MASK, TSI108_IRQ_DESTINATION }, }; #define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name] #else /* CONFIG_MPIC_WEIRD */ #define MPIC_INFO(name) MPIC_##name #endif /* CONFIG_MPIC_WEIRD */ static inline unsigned int mpic_processor_id(struct mpic *mpic) { unsigned int cpu = 0; if (!(mpic->flags & MPIC_SECONDARY)) cpu = hard_smp_processor_id(); return cpu; } /* * Register accessor functions */ static inline u32 _mpic_read(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg) { switch(type) { #ifdef CONFIG_PPC_DCR case mpic_access_dcr: return dcr_read(rb->dhost, reg); #endif case mpic_access_mmio_be: return in_be32(rb->base + (reg >> 2)); case mpic_access_mmio_le: default: return in_le32(rb->base + (reg >> 2)); } } static inline void _mpic_write(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg, u32 value) { switch(type) { #ifdef CONFIG_PPC_DCR case mpic_access_dcr: dcr_write(rb->dhost, reg, value); break; #endif case mpic_access_mmio_be: out_be32(rb->base + (reg >> 2), value); break; case mpic_access_mmio_le: default: out_le32(rb->base + (reg >> 2), value); break; } } static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) { enum mpic_reg_type type = mpic->reg_type; unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le) type = mpic_access_mmio_be; return _mpic_read(type, &mpic->gregs, offset); } static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) { unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); } static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm) { return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE + (tm & 3) * MPIC_INFO(TIMER_STRIDE); } static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) { unsigned int offset = mpic_tm_offset(mpic, tm) + MPIC_INFO(TIMER_VECTOR_PRI); return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); } static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) { unsigned int offset = mpic_tm_offset(mpic, tm) + MPIC_INFO(TIMER_VECTOR_PRI); _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); } static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) { unsigned int cpu = mpic_processor_id(mpic); return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); } static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) { unsigned int cpu = mpic_processor_id(mpic); _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); } static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) { unsigned int isu = src_no >> mpic->isu_shift; unsigned int idx = src_no & mpic->isu_mask; unsigned int val; val = _mpic_read(mpic->reg_type, &mpic->isus[isu], reg + (idx * MPIC_INFO(IRQ_STRIDE))); #ifdef CONFIG_MPIC_BROKEN_REGREAD if (reg == 0) val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) | mpic->isu_reg0_shadow[src_no]; #endif return val; } static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, unsigned int reg, u32 value) { unsigned int isu = src_no >> mpic->isu_shift; unsigned int idx = src_no & mpic->isu_mask; _mpic_write(mpic->reg_type, &mpic->isus[isu], reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); #ifdef CONFIG_MPIC_BROKEN_REGREAD if (reg == 0) mpic->isu_reg0_shadow[src_no] = value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY); #endif } #define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) #define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) #define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) /* * Low level utility functions */ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { rb->base = ioremap(phys_addr + offset, size); BUG_ON(rb->base == NULL); } #ifdef CONFIG_PPC_DCR static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0); rb->dhost = dcr_map(mpic->node, phys_addr + offset, size); BUG_ON(!DCR_MAP_OK(rb->dhost)); } static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { if (mpic->flags & MPIC_USES_DCR) _mpic_map_dcr(mpic, rb, offset, size); else _mpic_map_mmio(mpic, phys_addr, rb, offset, size); } #else /* CONFIG_PPC_DCR */ #define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) #endif /* !CONFIG_PPC_DCR */ /* Check if we have one of those nice broken MPICs with a flipped endian on * reads from IPI registers */ static void __init mpic_test_broken_ipi(struct mpic *mpic) { u32 r; mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); if (r == swab32(MPIC_VECPRI_MASK)) { printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); mpic->flags |= MPIC_BROKEN_IPI; } } #ifdef CONFIG_MPIC_U3_HT_IRQS /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) * to force the edge setting on the MPIC and do the ack workaround. */ static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) { if (source >= 128 || !mpic->fixups) return 0; return mpic->fixups[source].base != NULL; } static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; if (fixup->applebase) { unsigned int soff = (fixup->index >> 3) & ~3; unsigned int mask = 1U << (fixup->index & 0x1f); writel(mask, fixup->applebase + soff); } else { raw_spin_lock(&mpic->fixup_lock); writeb(0x11 + 2 * fixup->index, fixup->base + 2); writel(fixup->data, fixup->base + 4); raw_spin_unlock(&mpic->fixup_lock); } } static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, bool level) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; unsigned long flags; u32 tmp; if (fixup->base == NULL) return; DBG("startup_ht_interrupt(0x%x) index: %d\n", source, fixup->index); raw_spin_lock_irqsave(&mpic->fixup_lock, flags); /* Enable and configure */ writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); tmp &= ~(0x23U); if (level) tmp |= 0x22; writel(tmp, fixup->base + 4); raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, * set if this fixup was enabled, clear otherwise */ mpic->save_data[source].fixup_data = tmp | 1; #endif } static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; unsigned long flags; u32 tmp; if (fixup->base == NULL) return; DBG("shutdown_ht_interrupt(0x%x)\n", source); /* Disable */ raw_spin_lock_irqsave(&mpic->fixup_lock, flags); writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); tmp |= 1; writel(tmp, fixup->base + 4); raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, * set if this fixup was enabled, clear otherwise */ mpic->save_data[source].fixup_data = tmp & ~1; #endif } #ifdef CONFIG_PCI_MSI static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn) { u8 __iomem *base; u8 pos, flags; u64 addr = 0; for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); if (id == PCI_CAP_ID_HT) { id = readb(devbase + pos + 3); if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING) break; } } if (pos == 0) return; base = devbase + pos; flags = readb(base + HT_MSI_FLAGS); if (!(flags & HT_MSI_FLAGS_FIXED)) { addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK; addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); } printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", PCI_SLOT(devfn), PCI_FUNC(devfn), flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); if (!(flags & HT_MSI_FLAGS_ENABLE)) writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); } #else static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn) { return; } #endif static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn, u32 vdid) { int i, irq, n; u8 __iomem *base; u32 tmp; u8 pos; for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); if (id == PCI_CAP_ID_HT) { id = readb(devbase + pos + 3); if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ) break; } } if (pos == 0) return; base = devbase + pos; writeb(0x01, base + 2); n = (readl(base + 4) >> 16) & 0xff; printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" " has %d irqs\n", devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); for (i = 0; i <= n; i++) { writeb(0x10 + 2 * i, base + 2); tmp = readl(base + 4); irq = (tmp >> 16) & 0xff; DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); /* mask it , will be unmasked later */ tmp |= 0x1; writel(tmp, base + 4); mpic->fixups[irq].index = i; mpic->fixups[irq].base = base; /* Apple HT PIC has a non-standard way of doing EOIs */ if ((vdid & 0xffff) == 0x106b) mpic->fixups[irq].applebase = devbase + 0x60; else mpic->fixups[irq].applebase = NULL; writeb(0x11 + 2 * i, base + 2); mpic->fixups[irq].data = readl(base + 4) | 0x80000000; } } static void __init mpic_scan_ht_pics(struct mpic *mpic) { unsigned int devfn; u8 __iomem *cfgspace; printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); /* Allocate fixups array */ mpic->fixups = kcalloc(128, sizeof(*mpic->fixups), GFP_KERNEL); BUG_ON(mpic->fixups == NULL); /* Init spinlock */ raw_spin_lock_init(&mpic->fixup_lock); /* Map U3 config space. We assume all IO-APICs are on the primary bus * so we only need to map 64kB. */ cfgspace = ioremap(0xf2000000, 0x10000); BUG_ON(cfgspace == NULL); /* Now we scan all slots. We do a very quick scan, we read the header * type, vendor ID and device ID only, that's plenty enough */ for (devfn = 0; devfn < 0x100; devfn++) { u8 __iomem *devbase = cfgspace + (devfn << 8); u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); u32 l = readl(devbase + PCI_VENDOR_ID); u16 s; DBG("devfn %x, l: %x\n", devfn, l); /* If no device, skip */ if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000) goto next; /* Check if is supports capability lists */ s = readw(devbase + PCI_STATUS); if (!(s & PCI_STATUS_CAP_LIST)) goto next; mpic_scan_ht_pic(mpic, devbase, devfn, l); mpic_scan_ht_msi(mpic, devbase, devfn); next: /* next device, if function 0 */ if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) devfn += 7; } } #else /* CONFIG_MPIC_U3_HT_IRQS */ static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) { return 0; } static void __init mpic_scan_ht_pics(struct mpic *mpic) { } #endif /* CONFIG_MPIC_U3_HT_IRQS */ /* Find an mpic associated with a given linux interrupt */ static struct mpic *mpic_find(unsigned int irq) { if (irq < NR_IRQS_LEGACY) return NULL; return irq_get_chip_data(irq); } /* Determine if the linux irq is an IPI */ static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src) { return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); } /* Determine if the linux irq is a timer */ static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src) { return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); } /* Convert a cpu mask from logical to physical cpu numbers. */ static inline u32 mpic_physmask(u32 cpumask) { int i; u32 mask = 0; for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } #ifdef CONFIG_SMP /* Get the mpic structure from the IPI number */ static inline struct mpic * mpic_from_ipi(struct irq_data *d) { return irq_data_get_irq_chip_data(d); } #endif /* Get the mpic structure from the irq number */ static inline struct mpic * mpic_from_irq(unsigned int irq) { return irq_get_chip_data(irq); } /* Get the mpic structure from the irq data */ static inline struct mpic * mpic_from_irq_data(struct irq_data *d) { return irq_data_get_irq_chip_data(d); } /* Send an EOI */ static inline void mpic_eoi(struct mpic *mpic) { mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); } /* * Linux descriptor level callbacks */ void mpic_unmask_irq(struct irq_data *d) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & ~MPIC_VECPRI_MASK); /* make sure mask gets to controller before we return to user */ do { if (!loops--) { printk(KERN_ERR "%s: timeout on hwirq %u\n", __func__, src); break; } } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); } void mpic_mask_irq(struct irq_data *d) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | MPIC_VECPRI_MASK); /* make sure mask gets to controller before we return to user */ do { if (!loops--) { printk(KERN_ERR "%s: timeout on hwirq %u\n", __func__, src); break; } } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); } void mpic_end_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); #ifdef DEBUG_IRQ DBG("%s: end_irq: %d\n", mpic->name, d->irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway */ mpic_eoi(mpic); } #ifdef CONFIG_MPIC_U3_HT_IRQS static void mpic_unmask_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); mpic_unmask_irq(d); if (irqd_is_level_type(d)) mpic_ht_end_irq(mpic, src); } static unsigned int mpic_startup_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); mpic_unmask_irq(d); mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); return 0; } static void mpic_shutdown_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); mpic_shutdown_ht_interrupt(mpic, src); mpic_mask_irq(d); } static void mpic_end_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); #ifdef DEBUG_IRQ DBG("%s: end_irq: %d\n", mpic->name, d->irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway */ if (irqd_is_level_type(d)) mpic_ht_end_irq(mpic, src); mpic_eoi(mpic); } #endif /* !CONFIG_MPIC_U3_HT_IRQS */ #ifdef CONFIG_SMP static void mpic_unmask_ipi(struct irq_data *d) { struct mpic *mpic = mpic_from_ipi(d); unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); } static void mpic_mask_ipi(struct irq_data *d) { /* NEVER disable an IPI... that's just plain wrong! */ } static void mpic_end_ipi(struct irq_data *d) { struct mpic *mpic = mpic_from_ipi(d); /* * IPIs are marked IRQ_PER_CPU. This has the side effect of * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from * applying to them. We EOI them late to avoid re-entering. */ mpic_eoi(mpic); } #endif /* CONFIG_SMP */ static void mpic_unmask_tm(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src); mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); mpic_tm_read(src); } static void mpic_mask_tm(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); mpic_tm_read(src); } int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); if (mpic->flags & MPIC_SINGLE_DEST_CPU) { int cpuid = irq_choose_cpu(cpumask); mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); } else { u32 mask = cpumask_bits(cpumask)[0]; mask &= cpumask_bits(cpu_online_mask)[0]; mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), mpic_physmask(mask)); } return IRQ_SET_MASK_OK; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) { /* Now convert sense value */ switch(type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: return MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_POSITIVE); case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: return MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE); case IRQ_TYPE_LEVEL_HIGH: return MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_POSITIVE); case IRQ_TYPE_LEVEL_LOW: default: return MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE); } } int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct mpic *mpic = mpic_from_irq_data(d); unsigned int src = irqd_to_hwirq(d); unsigned int vecpri, vold, vnew; DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", mpic, d->irq, src, flow_type); if (src >= mpic->num_sources) return -EINVAL; vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); /* We don't support "none" type */ if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_DEFAULT; /* Default: read HW settings */ if (flow_type == IRQ_TYPE_DEFAULT) { int vold_ps; vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | MPIC_INFO(VECPRI_SENSE_MASK)); if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) flow_type = IRQ_TYPE_EDGE_RISING; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) flow_type = IRQ_TYPE_EDGE_FALLING; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) flow_type = IRQ_TYPE_LEVEL_HIGH; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) flow_type = IRQ_TYPE_LEVEL_LOW; else WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); } /* Apply to irq desc */ irqd_set_trigger_type(d, flow_type); /* Apply to HW */ if (mpic_is_ht_interrupt(mpic, src)) vecpri = MPIC_VECPRI_POLARITY_POSITIVE | MPIC_VECPRI_SENSE_EDGE; else vecpri = mpic_type_to_vecpri(mpic, flow_type); vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | MPIC_INFO(VECPRI_SENSE_MASK)); vnew |= vecpri; if (vold != vnew) mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); return IRQ_SET_MASK_OK_NOCOPY; } void mpic_set_vector(unsigned int virq, unsigned int vector) { struct mpic *mpic = mpic_from_irq(virq); unsigned int src = virq_to_hw(virq); unsigned int vecpri; DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", mpic, virq, src, vector); if (src >= mpic->num_sources) return; vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK); vecpri |= vector; mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); } static void mpic_set_destination(unsigned int virq, unsigned int cpuid) { struct mpic *mpic = mpic_from_irq(virq); unsigned int src = virq_to_hw(virq); DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", mpic, virq, src, cpuid); if (src >= mpic->num_sources) return; mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); } static struct irq_chip mpic_irq_chip = { .irq_mask = mpic_mask_irq, .irq_unmask = mpic_unmask_irq, .irq_eoi = mpic_end_irq, .irq_set_type = mpic_set_irq_type, }; #ifdef CONFIG_SMP static const struct irq_chip mpic_ipi_chip = { .irq_mask = mpic_mask_ipi, .irq_unmask = mpic_unmask_ipi, .irq_eoi = mpic_end_ipi, }; #endif /* CONFIG_SMP */ static struct irq_chip mpic_tm_chip = { .irq_mask = mpic_mask_tm, .irq_unmask = mpic_unmask_tm, .irq_eoi = mpic_end_irq, }; #ifdef CONFIG_MPIC_U3_HT_IRQS static const struct irq_chip mpic_irq_ht_chip = { .irq_startup = mpic_startup_ht_irq, .irq_shutdown = mpic_shutdown_ht_irq, .irq_mask = mpic_mask_irq, .irq_unmask = mpic_unmask_ht_irq, .irq_eoi = mpic_end_ht_irq, .irq_set_type = mpic_set_irq_type, }; #endif /* CONFIG_MPIC_U3_HT_IRQS */ static int mpic_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { /* Exact match, unless mpic node is NULL */ struct device_node *of_node = irq_domain_get_of_node(h); return of_node == NULL || of_node == node; } static int mpic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct mpic *mpic = h->host_data; struct irq_chip *chip; DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw); if (hw == mpic->spurious_vec) return -EINVAL; if (mpic->protected && test_bit(hw, mpic->protected)) { pr_warn("mpic: Mapping of source 0x%x failed, source protected by firmware !\n", (unsigned int)hw); return -EPERM; } #ifdef CONFIG_SMP else if (hw >= mpic->ipi_vecs[0]) { WARN_ON(mpic->flags & MPIC_SECONDARY); DBG("mpic: mapping as IPI\n"); irq_set_chip_data(virq, mpic); irq_set_chip_and_handler(virq, &mpic->hc_ipi, handle_percpu_irq); return 0; } #endif /* CONFIG_SMP */ if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { WARN_ON(mpic->flags & MPIC_SECONDARY); DBG("mpic: mapping as timer\n"); irq_set_chip_data(virq, mpic); irq_set_chip_and_handler(virq, &mpic->hc_tm, handle_fasteoi_irq); return 0; } if (mpic_map_error_int(mpic, virq, hw)) return 0; if (hw >= mpic->num_sources) { pr_warn("mpic: Mapping of source 0x%x failed, source out of range !\n", (unsigned int)hw); return -EINVAL; } mpic_msi_reserve_hwirq(mpic, hw); /* Default chip */ chip = &mpic->hc_irq; #ifdef CONFIG_MPIC_U3_HT_IRQS /* Check for HT interrupts, override vecpri */ if (mpic_is_ht_interrupt(mpic, hw)) chip = &mpic->hc_ht_irq; #endif /* CONFIG_MPIC_U3_HT_IRQS */ DBG("mpic: mapping to irq chip @%p\n", chip); irq_set_chip_data(virq, mpic); irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_DEFAULT); /* If the MPIC was reset, then all vectors have already been * initialized. Otherwise, a per source lazy initialization * is done here. */ if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { int cpu; preempt_disable(); cpu = mpic_processor_id(mpic); preempt_enable(); mpic_set_vector(virq, hw); mpic_set_destination(virq, cpu); mpic_irq_set_priority(virq, 8); } return 0; } static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { struct mpic *mpic = h->host_data; static unsigned char map_mpic_senses[4] = { IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW, IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_FALLING, }; *out_hwirq = intspec[0]; if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { /* * Freescale MPIC with extended intspec: * First two cells are as usual. Third specifies * an "interrupt type". Fourth is type-specific data. * * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt */ switch (intspec[2]) { case 0: break; case 1: if (!(mpic->flags & MPIC_FSL_HAS_EIMR)) break; if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs)) return -EINVAL; *out_hwirq = mpic->err_int_vecs[intspec[3]]; break; case 2: if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) return -EINVAL; *out_hwirq = mpic->ipi_vecs[intspec[0]]; break; case 3: if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) return -EINVAL; *out_hwirq = mpic->timer_vecs[intspec[0]]; break; default: pr_debug("%s: unknown irq type %u\n", __func__, intspec[2]); return -EINVAL; } *out_flags = map_mpic_senses[intspec[1] & 3]; } else if (intsize > 1) { u32 mask = 0x3; /* Apple invented a new race of encoding on machines with * an HT APIC. They encode, among others, the index within * the HT APIC. We don't care about it here since thankfully, * it appears that they have the APIC already properly * configured, and thus our current fixup code that reads the * APIC config works fine. However, we still need to mask out * bits in the specifier to make sure we only get bit 0 which * is the level/edge bit (the only sense bit exposed by Apple), * as their bit 1 means something else. */ if (machine_is(powermac)) mask = 0x1; *out_flags = map_mpic_senses[intspec[1] & mask]; } else *out_flags = IRQ_TYPE_NONE; DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n", intsize, intspec[0], intspec[1], *out_hwirq, *out_flags); return 0; } /* IRQ handler for a secondary MPIC cascaded from another IRQ controller */ static void mpic_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct mpic *mpic = irq_desc_get_handler_data(desc); unsigned int virq; BUG_ON(!(mpic->flags & MPIC_SECONDARY)); virq = mpic_get_one_irq(mpic); if (virq) generic_handle_irq(virq); chip->irq_eoi(&desc->irq_data); } static const struct irq_domain_ops mpic_host_ops = { .match = mpic_host_match, .map = mpic_host_map, .xlate = mpic_host_xlate, }; static u32 fsl_mpic_get_version(struct mpic *mpic) { u32 brr1; if (!(mpic->flags & MPIC_FSL)) return 0; brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs, MPIC_FSL_BRR1); return brr1 & MPIC_FSL_BRR1_VER; } /* * Exported functions */ u32 fsl_mpic_primary_get_version(void) { struct mpic *mpic = mpic_primary; if (mpic) return fsl_mpic_get_version(mpic); return 0; } struct mpic * __init mpic_alloc(struct device_node *node, phys_addr_t phys_addr, unsigned int flags, unsigned int isu_size, unsigned int irq_count, const char *name) { int i, psize, intvec_top; struct mpic *mpic; u32 greg_feature; const char *vers; const u32 *psrc; u32 last_irq; u32 fsl_version = 0; /* Default MPIC search parameters */ static const struct of_device_id __initconst mpic_device_id[] = { { .type = "open-pic", }, { .compatible = "open-pic", }, {}, }; /* * If we were not passed a device-tree node, then perform the default * search for standardized a standardized OpenPIC. */ if (node) { node = of_node_get(node); } else { node = of_find_matching_node(NULL, mpic_device_id); if (!node) return NULL; } /* Pick the physical address from the device tree if unspecified */ if (!phys_addr) { /* Check if it is DCR-based */ if (of_property_read_bool(node, "dcr-reg")) { flags |= MPIC_USES_DCR; } else { struct resource r; if (of_address_to_resource(node, 0, &r)) goto err_of_node_put; phys_addr = r.start; } } /* Read extra device-tree properties into the flags variable */ if (of_property_read_bool(node, "big-endian")) flags |= MPIC_BIG_ENDIAN; if (of_property_read_bool(node, "pic-no-reset")) flags |= MPIC_NO_RESET; if (of_property_read_bool(node, "single-cpu-affinity")) flags |= MPIC_SINGLE_DEST_CPU; if (of_device_is_compatible(node, "fsl,mpic")) { flags |= MPIC_FSL | MPIC_LARGE_VECTORS; mpic_irq_chip.flags |= IRQCHIP_SKIP_SET_WAKE; mpic_tm_chip.flags |= IRQCHIP_SKIP_SET_WAKE; } mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); if (mpic == NULL) goto err_of_node_put; mpic->name = name; mpic->node = node; mpic->paddr = phys_addr; mpic->flags = flags; mpic->hc_irq = mpic_irq_chip; mpic->hc_irq.name = name; if (!(mpic->flags & MPIC_SECONDARY)) mpic->hc_irq.irq_set_affinity = mpic_set_affinity; #ifdef CONFIG_MPIC_U3_HT_IRQS mpic->hc_ht_irq = mpic_irq_ht_chip; mpic->hc_ht_irq.name = name; if (!(mpic->flags & MPIC_SECONDARY)) mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; #endif /* CONFIG_MPIC_U3_HT_IRQS */ #ifdef CONFIG_SMP mpic->hc_ipi = mpic_ipi_chip; mpic->hc_ipi.name = name; #endif /* CONFIG_SMP */ mpic->hc_tm = mpic_tm_chip; mpic->hc_tm.name = name; mpic->num_sources = 0; /* so far */ if (mpic->flags & MPIC_LARGE_VECTORS) intvec_top = 2047; else intvec_top = 255; mpic->timer_vecs[0] = intvec_top - 12; mpic->timer_vecs[1] = intvec_top - 11; mpic->timer_vecs[2] = intvec_top - 10; mpic->timer_vecs[3] = intvec_top - 9; mpic->timer_vecs[4] = intvec_top - 8; mpic->timer_vecs[5] = intvec_top - 7; mpic->timer_vecs[6] = intvec_top - 6; mpic->timer_vecs[7] = intvec_top - 5; mpic->ipi_vecs[0] = intvec_top - 4; mpic->ipi_vecs[1] = intvec_top - 3; mpic->ipi_vecs[2] = intvec_top - 2; mpic->ipi_vecs[3] = intvec_top - 1; mpic->spurious_vec = intvec_top; /* Look for protected sources */ psrc = of_get_property(mpic->node, "protected-sources", &psize); if (psrc) { /* Allocate a bitmap with one bit per interrupt */ mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL); BUG_ON(mpic->protected == NULL); for (i = 0; i < psize/sizeof(u32); i++) { if (psrc[i] > intvec_top) continue; __set_bit(psrc[i], mpic->protected); } } #ifdef CONFIG_MPIC_WEIRD mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)]; #endif /* default register type */ if (mpic->flags & MPIC_BIG_ENDIAN) mpic->reg_type = mpic_access_mmio_be; else mpic->reg_type = mpic_access_mmio_le; /* * An MPIC with a "dcr-reg" property must be accessed that way, but * only if the kernel includes DCR support. */ #ifdef CONFIG_PPC_DCR if (mpic->flags & MPIC_USES_DCR) mpic->reg_type = mpic_access_dcr; #else BUG_ON(mpic->flags & MPIC_USES_DCR); #endif /* Map the global registers */ mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); if (mpic->flags & MPIC_FSL) { int ret; /* * Yes, Freescale really did put global registers in the * magic per-cpu area -- and they don't even show up in the * non-magic per-cpu copies that this driver normally uses. */ mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs, MPIC_CPU_THISBASE, 0x1000); fsl_version = fsl_mpic_get_version(mpic); /* Error interrupt mask register (EIMR) is required for * handling individual device error interrupts. EIMR * was added in MPIC version 4.1. * * Over here we reserve vector number space for error * interrupt vectors. This space is stolen from the * global vector number space, as in case of ipis * and timer interrupts. * * Available vector space = intvec_top - 13, where 13 * is the number of vectors which have been consumed by * ipis, timer interrupts and spurious. */ if (fsl_version >= 0x401) { ret = mpic_setup_error_int(mpic, intvec_top - 13); if (ret) return NULL; } } /* * EPR is only available starting with v4.0. To support * platforms that don't know the MPIC version at compile-time, * such as qemu-e500, turn off coreint if this MPIC doesn't * support it. Note that we never enable it if it wasn't * requested in the first place. * * This is done outside the MPIC_FSL check, so that we * also disable coreint if the MPIC node doesn't have * an "fsl,mpic" compatible at all. This will be the case * with device trees generated by older versions of QEMU. * fsl_version will be zero if MPIC_FSL is not set. */ if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) ppc_md.get_irq = mpic_get_irq; /* Reset */ /* When using a device-node, reset requests are only honored if the MPIC * is allowed to reset. */ if (!(mpic->flags & MPIC_NO_RESET)) { printk(KERN_DEBUG "mpic: Resetting\n"); mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_RESET); while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) & MPIC_GREG_GCONF_RESET) mb(); } /* CoreInt */ if (mpic->flags & MPIC_ENABLE_COREINT) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_COREINT); if (mpic->flags & MPIC_ENABLE_MCK) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_MCK); /* * The MPIC driver will crash if there are more cores than we * can initialize, so we may as well catch that problem here. */ BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS); /* Map the per-CPU registers */ for_each_possible_cpu(i) { unsigned int cpu = get_hard_smp_processor_id(i); mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu], MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE), 0x1000); } /* * Read feature register. For non-ISU MPICs, num sources as well. On * ISU MPICs, sources are counted as ISUs are added */ greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); /* * By default, the last source number comes from the MPIC, but the * device-tree and board support code can override it on buggy hw. * If we get passed an isu_size (multi-isu MPIC) then we use that * as a default instead of the value read from the HW. */ last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT; if (isu_size) last_irq = isu_size * MPIC_MAX_ISU - 1; of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq); if (irq_count) last_irq = irq_count - 1; /* Initialize main ISU if none provided */ if (!isu_size) { isu_size = last_irq + 1; mpic->num_sources = isu_size; mpic_map(mpic, mpic->paddr, &mpic->isus[0], MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * isu_size); } mpic->isu_size = isu_size; mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; mpic->irqhost = irq_domain_add_linear(mpic->node, intvec_top, &mpic_host_ops, mpic); /* * FIXME: The code leaks the MPIC object and mappings here; this * is very unlikely to fail but it ought to be fixed anyways. */ if (mpic->irqhost == NULL) return NULL; /* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: vers = "1.0"; break; case 2: vers = "1.2"; break; case 3: vers = "1.3"; break; default: vers = "<unknown>"; break; } printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," " max %d CPUs\n", name, vers, (unsigned long long)mpic->paddr, num_possible_cpus()); printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, mpic->isu_shift, mpic->isu_mask); mpic->next = mpics; mpics = mpic; if (!(mpic->flags & MPIC_SECONDARY)) { mpic_primary = mpic; irq_set_default_host(mpic->irqhost); } return mpic; err_of_node_put: of_node_put(node); return NULL; } void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, phys_addr_t paddr) { unsigned int isu_first = isu_num * mpic->isu_size; BUG_ON(isu_num >= MPIC_MAX_ISU); mpic_map(mpic, paddr, &mpic->isus[isu_num], 0, MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); if ((isu_first + mpic->isu_size) > mpic->num_sources) mpic->num_sources = isu_first + mpic->isu_size; } void __init mpic_init(struct mpic *mpic) { int i, cpu; int num_timers = 4; BUG_ON(mpic->num_sources == 0); printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); /* Set current processor priority to max */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); if (mpic->flags & MPIC_FSL) { u32 version = fsl_mpic_get_version(mpic); /* * Timer group B is present at the latest in MPIC 3.1 (e.g. * mpc8536). It is not present in MPIC 2.0 (e.g. mpc8544). * I don't know about the status of intermediate versions (or * whether they even exist). */ if (version >= 0x0301) num_timers = 8; } /* Initialize timers to our reserved vectors and mask them for now */ for (i = 0; i < num_timers; i++) { unsigned int offset = mpic_tm_offset(mpic, i); mpic_write(mpic->tmregs, offset + MPIC_INFO(TIMER_DESTINATION), 1 << hard_smp_processor_id()); mpic_write(mpic->tmregs, offset + MPIC_INFO(TIMER_VECTOR_PRI), MPIC_VECPRI_MASK | (9 << MPIC_VECPRI_PRIORITY_SHIFT) | (mpic->timer_vecs[0] + i)); } /* Initialize IPIs to our reserved vectors and mark them disabled for now */ mpic_test_broken_ipi(mpic); for (i = 0; i < 4; i++) { mpic_ipi_write(i, MPIC_VECPRI_MASK | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | (mpic->ipi_vecs[0] + i)); } /* Do the HT PIC fixups on U3 broken mpic */ DBG("MPIC flags: %x\n", mpic->flags); if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) { mpic_scan_ht_pics(mpic); mpic_u3msi_init(mpic); } mpic_pasemi_msi_init(mpic); cpu = mpic_processor_id(mpic); if (!(mpic->flags & MPIC_NO_RESET)) { for (i = 0; i < mpic->num_sources; i++) { /* start with vector = source number, and masked */ u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); /* check if protected */ if (mpic->protected && test_bit(i, mpic->protected)) continue; /* init hw */ mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); } } /* Init spurious vector */ mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); /* Disable 8259 passthrough, if supported */ if (!(mpic->flags & MPIC_NO_PTHROU_DIS)) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_8259_PTHROU_DIS); if (mpic->flags & MPIC_NO_BIAS) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_NO_BIAS); /* Set current processor priority to 0 */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); #ifdef CONFIG_PM /* allocate memory to save mpic state */ mpic->save_data = kmalloc_array(mpic->num_sources, sizeof(*mpic->save_data), GFP_KERNEL); BUG_ON(mpic->save_data == NULL); #endif /* Check if this MPIC is chained from a parent interrupt controller */ if (mpic->flags & MPIC_SECONDARY) { int virq = irq_of_parse_and_map(mpic->node, 0); if (virq) { printk(KERN_INFO "%pOF: hooking up to IRQ %d\n", mpic->node, virq); irq_set_handler_data(virq, mpic); irq_set_chained_handler(virq, &mpic_cascade); } } /* FSL mpic error interrupt initialization */ if (mpic->flags & MPIC_FSL_HAS_EIMR) mpic_err_int_init(mpic, MPIC_FSL_ERR_INT); } void mpic_irq_set_priority(unsigned int irq, unsigned int pri) { struct mpic *mpic = mpic_find(irq); unsigned int src = virq_to_hw(irq); unsigned long flags; u32 reg; if (!mpic) return; raw_spin_lock_irqsave(&mpic_lock, flags); if (mpic_is_ipi(mpic, src)) { reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_ipi_write(src - mpic->ipi_vecs[0], reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } else if (mpic_is_tm(mpic, src)) { reg = mpic_tm_read(src - mpic->timer_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_tm_write(src - mpic->timer_vecs[0], reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } else { reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } raw_spin_unlock_irqrestore(&mpic_lock, flags); } void mpic_setup_this_cpu(void) { #ifdef CONFIG_SMP struct mpic *mpic = mpic_primary; unsigned long flags; u32 msk = 1 << hard_smp_processor_id(); unsigned int i; BUG_ON(mpic == NULL); DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); raw_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we want intrs. default affinity is 0xffffffff * until changed via /proc. That's how it's done on x86. If we want * it differently, then we should make sure we also change the default * values of irq_desc[].affinity in irq.c. */ if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) { for (i = 0; i < mpic->num_sources ; i++) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); } /* Set current processor priority to 0 */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); raw_spin_unlock_irqrestore(&mpic_lock, flags); #endif /* CONFIG_SMP */ } int mpic_cpu_get_priority(void) { struct mpic *mpic = mpic_primary; return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI)); } void mpic_cpu_set_priority(int prio) { struct mpic *mpic = mpic_primary; prio &= MPIC_CPU_TASKPRI_MASK; mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio); } void mpic_teardown_this_cpu(int secondary) { struct mpic *mpic = mpic_primary; unsigned long flags; u32 msk = 1 << hard_smp_processor_id(); unsigned int i; BUG_ON(mpic == NULL); DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); raw_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we don't want intrs. */ for (i = 0; i < mpic->num_sources ; i++) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk); /* Set current processor priority to max */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); /* We need to EOI the IPI since not all platforms reset the MPIC * on boot and new interrupts wouldn't get delivered otherwise. */ mpic_eoi(mpic); raw_spin_unlock_irqrestore(&mpic_lock, flags); } static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) { u32 src; src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK); #ifdef DEBUG_LOW DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src); #endif if (unlikely(src == mpic->spurious_vec)) { if (mpic->flags & MPIC_SPV_EOI) mpic_eoi(mpic); return 0; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", mpic->name, (int)src); mpic_eoi(mpic); return 0; } return irq_linear_revmap(mpic->irqhost, src); } unsigned int mpic_get_one_irq(struct mpic *mpic) { return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK)); } unsigned int mpic_get_irq(void) { struct mpic *mpic = mpic_primary; BUG_ON(mpic == NULL); return mpic_get_one_irq(mpic); } unsigned int mpic_get_coreint_irq(void) { #ifdef CONFIG_BOOKE struct mpic *mpic = mpic_primary; u32 src; BUG_ON(mpic == NULL); src = mfspr(SPRN_EPR); if (unlikely(src == mpic->spurious_vec)) { if (mpic->flags & MPIC_SPV_EOI) mpic_eoi(mpic); return 0; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", mpic->name, (int)src); return 0; } return irq_linear_revmap(mpic->irqhost, src); #else return 0; #endif } unsigned int mpic_get_mcirq(void) { struct mpic *mpic = mpic_primary; BUG_ON(mpic == NULL); return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK)); } #ifdef CONFIG_SMP void __init mpic_request_ipis(void) { struct mpic *mpic = mpic_primary; int i; BUG_ON(mpic == NULL); printk(KERN_INFO "mpic: requesting IPIs...\n"); for (i = 0; i < 4; i++) { unsigned int vipi = irq_create_mapping(mpic->irqhost, mpic->ipi_vecs[0] + i); if (!vipi) { printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); continue; } smp_request_message_ipi(vipi, i); } } void smp_mpic_message_pass(int cpu, int msg) { struct mpic *mpic = mpic_primary; u32 physmask; BUG_ON(mpic == NULL); /* make sure we're sending something that translates to an IPI */ if ((unsigned int)msg > 3) { printk("SMP %d: smp_message_pass: unknown msg %d\n", smp_processor_id(), msg); return; } #ifdef DEBUG_IPI DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); #endif physmask = 1 << get_hard_smp_processor_id(cpu); mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); } void __init smp_mpic_probe(void) { int nr_cpus; DBG("smp_mpic_probe()...\n"); nr_cpus = num_possible_cpus(); DBG("nr_cpus: %d\n", nr_cpus); if (nr_cpus > 1) mpic_request_ipis(); } void smp_mpic_setup_cpu(int cpu) { mpic_setup_this_cpu(); } void mpic_reset_core(int cpu) { struct mpic *mpic = mpic_primary; u32 pir; int cpuid = get_hard_smp_processor_id(cpu); int i; /* Set target bit for core reset */ pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); pir |= (1 << cpuid); mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); /* Restore target bit after reset complete */ pir &= ~(1 << cpuid); mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); /* Perform 15 EOI on each reset core to clear pending interrupts. * This is required for FSL CoreNet based devices */ if (mpic->flags & MPIC_FSL) { for (i = 0; i < 15; i++) { _mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid], MPIC_CPU_EOI, 0); } } } #endif /* CONFIG_SMP */ #ifdef CONFIG_PM static void mpic_suspend_one(struct mpic *mpic) { int i; for (i = 0; i < mpic->num_sources; i++) { mpic->save_data[i].vecprio = mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI)); mpic->save_data[i].dest = mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); } } static int mpic_suspend(void) { struct mpic *mpic = mpics; while (mpic) { mpic_suspend_one(mpic); mpic = mpic->next; } return 0; } static void mpic_resume_one(struct mpic *mpic) { int i; for (i = 0; i < mpic->num_sources; i++) { mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), mpic->save_data[i].vecprio); mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic->save_data[i].dest); #ifdef CONFIG_MPIC_U3_HT_IRQS if (mpic->fixups) { struct mpic_irq_fixup *fixup = &mpic->fixups[i]; if (fixup->base) { /* we use the lowest bit in an inverted meaning */ if ((mpic->save_data[i].fixup_data & 1) == 0) continue; /* Enable and configure */ writeb(0x10 + 2 * fixup->index, fixup->base + 2); writel(mpic->save_data[i].fixup_data & ~1, fixup->base + 4); } } #endif } /* end for loop */ } static void mpic_resume(void) { struct mpic *mpic = mpics; while (mpic) { mpic_resume_one(mpic); mpic = mpic->next; } } static struct syscore_ops mpic_syscore_ops = { .resume = mpic_resume, .suspend = mpic_suspend, }; static int mpic_init_sys(void) { int rc; register_syscore_ops(&mpic_syscore_ops); rc = subsys_system_register(&mpic_subsys, NULL); if (rc) { unregister_syscore_ops(&mpic_syscore_ops); pr_err("mpic: Failed to register subsystem!\n"); return rc; } return 0; } device_initcall(mpic_init_sys); #endif
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/delay.h> #include "amdgpu.h" #include "lsdma_v6_0.h" #include "amdgpu_lsdma.h" #include "lsdma/lsdma_6_0_0_offset.h" #include "lsdma/lsdma_6_0_0_sh_mask.h" static int lsdma_v6_0_wait_pio_status(struct amdgpu_device *adev) { return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS), LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK, LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK); } static int lsdma_v6_0_copy_mem(struct amdgpu_device *adev, uint64_t src_addr, uint64_t dst_addr, uint64_t size) { int ret; uint32_t tmp; WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0); tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp); ret = lsdma_v6_0_wait_pio_status(adev); if (ret) dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n"); return ret; } static int lsdma_v6_0_fill_mem(struct amdgpu_device *adev, uint64_t dst_addr, uint32_t data, uint64_t size) { int ret; uint32_t tmp; WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr)); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0); tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0); tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1); WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp); ret = lsdma_v6_0_wait_pio_status(adev); if (ret) dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n"); return ret; } static void lsdma_v6_0_update_memory_power_gating(struct amdgpu_device *adev, bool enable) { uint32_t tmp; tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL); tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0); WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp); tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable); WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp); } const struct amdgpu_lsdma_funcs lsdma_v6_0_funcs = { .copy_mem = lsdma_v6_0_copy_mem, .fill_mem = lsdma_v6_0_fill_mem, .update_memory_power_gating = lsdma_v6_0_update_memory_power_gating };
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (C) 2020 Arm Ltd. * * DT nodes common between Orange Pi Zero 2 and Orange Pi Zero 3. * Excludes PMIC nodes and properties, since they are different between the two. */ #include "sun50i-h616.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/leds/common.h> / { aliases { ethernet0 = &emac0; serial0 = &uart0; }; chosen { stdout-path = "serial0:115200n8"; }; leds { compatible = "gpio-leds"; led-0 { function = LED_FUNCTION_POWER; color = <LED_COLOR_ID_RED>; gpios = <&pio 2 12 GPIO_ACTIVE_HIGH>; /* PC12 */ default-state = "on"; }; led-1 { function = LED_FUNCTION_STATUS; color = <LED_COLOR_ID_GREEN>; gpios = <&pio 2 13 GPIO_ACTIVE_HIGH>; /* PC13 */ }; }; reg_vcc5v: vcc5v { /* board wide 5V supply directly from the USB-C socket */ compatible = "regulator-fixed"; regulator-name = "vcc-5v"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; }; reg_usb1_vbus: regulator-usb1-vbus { compatible = "regulator-fixed"; regulator-name = "usb1-vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; vin-supply = <&reg_vcc5v>; enable-active-high; gpio = <&pio 2 16 GPIO_ACTIVE_HIGH>; /* PC16 */ }; }; &codec { allwinner,audio-routing = "Line Out", "LINEOUT"; status = "okay"; }; &ehci1 { status = "okay"; }; /* USB 2 & 3 are on headers only. */ &emac0 { pinctrl-names = "default"; pinctrl-0 = <&ext_rgmii_pins>; phy-handle = <&ext_rgmii_phy>; status = "okay"; }; &mdio0 { ext_rgmii_phy: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <1>; }; }; &mmc0 { cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ disable-wp; bus-width = <4>; status = "okay"; }; &ohci1 { status = "okay"; }; &spi0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>, <&spi0_cs0_pin>; flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <40000000>; }; }; &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_ph_pins>; status = "okay"; }; &usbotg { /* * PHY0 pins are connected to a USB-C socket, but a role switch * is not implemented: both CC pins are pulled to GND. * The VBUS pins power the device, so a fixed peripheral mode * is the best choice. * The board can be powered via GPIOs, in this case port0 *can* * act as a host (with a cable/adapter ignoring CC), as VBUS is * then provided by the GPIOs. Any user of this setup would * need to adjust the DT accordingly: dr_mode set to "host", * enabling OHCI0 and EHCI0. */ dr_mode = "peripheral"; status = "okay"; }; &usbphy { usb1_vbus-supply = <&reg_usb1_vbus>; status = "okay"; };
// SPDX-License-Identifier: GPL-2.0 /* * ACPI Time and Alarm (TAD) Device Driver * * Copyright (C) 2018 Intel Corporation * Author: Rafael J. Wysocki <[email protected]> * * This driver is based on Section 9.18 of the ACPI 6.2 specification revision. * * It only supports the system wakeup capabilities of the TAD. * * Provided are sysfs attributes, available under the TAD platform device, * allowing user space to manage the AC and DC wakeup timers of the TAD: * set and read their values, set and check their expire timer wake policies, * check and clear their status and check the capabilities of the TAD reported * by AML. The DC timer attributes are only present if the TAD supports a * separate DC alarm timer. * * The wakeup events handling and power management of the TAD is expected to * be taken care of by the ACPI PM domain attached to its platform device. */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> MODULE_DESCRIPTION("ACPI Time and Alarm (TAD) Device Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rafael J. Wysocki"); /* ACPI TAD capability flags (ACPI 6.2, Section 9.18.2) */ #define ACPI_TAD_AC_WAKE BIT(0) #define ACPI_TAD_DC_WAKE BIT(1) #define ACPI_TAD_RT BIT(2) #define ACPI_TAD_RT_IN_MS BIT(3) #define ACPI_TAD_S4_S5__GWS BIT(4) #define ACPI_TAD_AC_S4_WAKE BIT(5) #define ACPI_TAD_AC_S5_WAKE BIT(6) #define ACPI_TAD_DC_S4_WAKE BIT(7) #define ACPI_TAD_DC_S5_WAKE BIT(8) /* ACPI TAD alarm timer selection */ #define ACPI_TAD_AC_TIMER (u32)0 #define ACPI_TAD_DC_TIMER (u32)1 /* Special value for disabled timer or expired timer wake policy. */ #define ACPI_TAD_WAKE_DISABLED (~(u32)0) struct acpi_tad_driver_data { u32 capabilities; }; struct acpi_tad_rt { u16 year; /* 1900 - 9999 */ u8 month; /* 1 - 12 */ u8 day; /* 1 - 31 */ u8 hour; /* 0 - 23 */ u8 minute; /* 0 - 59 */ u8 second; /* 0 - 59 */ u8 valid; /* 0 (failed) or 1 (success) for reads, 0 for writes */ u16 msec; /* 1 - 1000 */ s16 tz; /* -1440 to 1440 or 2047 (unspecified) */ u8 daylight; u8 padding[3]; /* must be 0 */ } __packed; static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object args[] = { { .type = ACPI_TYPE_BUFFER, }, }; struct acpi_object_list arg_list = { .pointer = args, .count = ARRAY_SIZE(args), }; unsigned long long retval; acpi_status status; if (rt->year < 1900 || rt->year > 9999 || rt->month < 1 || rt->month > 12 || rt->hour > 23 || rt->minute > 59 || rt->second > 59 || rt->tz < -1440 || (rt->tz > 1440 && rt->tz != 2047) || rt->daylight > 3) return -ERANGE; args[0].buffer.pointer = (u8 *)rt; args[0].buffer.length = sizeof(*rt); pm_runtime_get_sync(dev); status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status) || retval) return -EIO; return 0; } static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt) { acpi_handle handle = ACPI_HANDLE(dev); struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER }; union acpi_object *out_obj; struct acpi_tad_rt *data; acpi_status status; int ret = -EIO; pm_runtime_get_sync(dev); status = acpi_evaluate_object(handle, "_GRT", NULL, &output); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status)) goto out_free; out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) goto out_free; if (out_obj->buffer.length != sizeof(*rt)) goto out_free; data = (struct acpi_tad_rt *)(out_obj->buffer.pointer); if (!data->valid) goto out_free; memcpy(rt, data, sizeof(*rt)); ret = 0; out_free: ACPI_FREE(output.pointer); return ret; } static char *acpi_tad_rt_next_field(char *s, int *val) { char *p; p = strchr(s, ':'); if (!p) return NULL; *p = '\0'; if (kstrtoint(s, 10, val)) return NULL; return p + 1; } static ssize_t time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_tad_rt rt; char *str, *s; int val, ret = -ENODATA; str = kmemdup_nul(buf, count, GFP_KERNEL); if (!str) return -ENOMEM; s = acpi_tad_rt_next_field(str, &val); if (!s) goto out_free; rt.year = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.month = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.day = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.hour = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.minute = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.second = val; s = acpi_tad_rt_next_field(s, &val); if (!s) goto out_free; rt.tz = val; if (kstrtoint(s, 10, &val)) goto out_free; rt.daylight = val; rt.valid = 0; rt.msec = 0; memset(rt.padding, 0, 3); ret = acpi_tad_set_real_time(dev, &rt); out_free: kfree(str); return ret ? ret : count; } static ssize_t time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_tad_rt rt; int ret; ret = acpi_tad_get_real_time(dev, &rt); if (ret) return ret; return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n", rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second, rt.tz, rt.daylight); } static DEVICE_ATTR_RW(time); static struct attribute *acpi_tad_time_attrs[] = { &dev_attr_time.attr, NULL, }; static const struct attribute_group acpi_tad_time_attr_group = { .attrs = acpi_tad_time_attrs, }; static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id, u32 value) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object args[] = { { .type = ACPI_TYPE_INTEGER, }, { .type = ACPI_TYPE_INTEGER, }, }; struct acpi_object_list arg_list = { .pointer = args, .count = ARRAY_SIZE(args), }; unsigned long long retval; acpi_status status; args[0].integer.value = timer_id; args[1].integer.value = value; pm_runtime_get_sync(dev); status = acpi_evaluate_integer(handle, method, &arg_list, &retval); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status) || retval) return -EIO; return 0; } static int acpi_tad_wake_write(struct device *dev, const char *buf, char *method, u32 timer_id, const char *specval) { u32 value; if (sysfs_streq(buf, specval)) { value = ACPI_TAD_WAKE_DISABLED; } else { int ret = kstrtou32(buf, 0, &value); if (ret) return ret; if (value == ACPI_TAD_WAKE_DISABLED) return -EINVAL; } return acpi_tad_wake_set(dev, method, timer_id, value); } static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method, u32 timer_id, const char *specval) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object args[] = { { .type = ACPI_TYPE_INTEGER, }, }; struct acpi_object_list arg_list = { .pointer = args, .count = ARRAY_SIZE(args), }; unsigned long long retval; acpi_status status; args[0].integer.value = timer_id; pm_runtime_get_sync(dev); status = acpi_evaluate_integer(handle, method, &arg_list, &retval); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status)) return -EIO; if ((u32)retval == ACPI_TAD_WAKE_DISABLED) return sprintf(buf, "%s\n", specval); return sprintf(buf, "%u\n", (u32)retval); } static const char *alarm_specval = "disabled"; static int acpi_tad_alarm_write(struct device *dev, const char *buf, u32 timer_id) { return acpi_tad_wake_write(dev, buf, "_STV", timer_id, alarm_specval); } static ssize_t acpi_tad_alarm_read(struct device *dev, char *buf, u32 timer_id) { return acpi_tad_wake_read(dev, buf, "_TIV", timer_id, alarm_specval); } static const char *policy_specval = "never"; static int acpi_tad_policy_write(struct device *dev, const char *buf, u32 timer_id) { return acpi_tad_wake_write(dev, buf, "_STP", timer_id, policy_specval); } static ssize_t acpi_tad_policy_read(struct device *dev, char *buf, u32 timer_id) { return acpi_tad_wake_read(dev, buf, "_TIP", timer_id, policy_specval); } static int acpi_tad_clear_status(struct device *dev, u32 timer_id) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object args[] = { { .type = ACPI_TYPE_INTEGER, }, }; struct acpi_object_list arg_list = { .pointer = args, .count = ARRAY_SIZE(args), }; unsigned long long retval; acpi_status status; args[0].integer.value = timer_id; pm_runtime_get_sync(dev); status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status) || retval) return -EIO; return 0; } static int acpi_tad_status_write(struct device *dev, const char *buf, u32 timer_id) { int ret, value; ret = kstrtoint(buf, 0, &value); if (ret) return ret; if (value) return -EINVAL; return acpi_tad_clear_status(dev, timer_id); } static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object args[] = { { .type = ACPI_TYPE_INTEGER, }, }; struct acpi_object_list arg_list = { .pointer = args, .count = ARRAY_SIZE(args), }; unsigned long long retval; acpi_status status; args[0].integer.value = timer_id; pm_runtime_get_sync(dev); status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval); pm_runtime_put_sync(dev); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "0x%02X\n", (u32)retval); } static ssize_t caps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); return sprintf(buf, "0x%02X\n", dd->capabilities); } static DEVICE_ATTR_RO(caps); static ssize_t ac_alarm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_alarm_write(dev, buf, ACPI_TAD_AC_TIMER); return ret ? ret : count; } static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER); } static DEVICE_ATTR_RW(ac_alarm); static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_policy_write(dev, buf, ACPI_TAD_AC_TIMER); return ret ? ret : count; } static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER); } static DEVICE_ATTR_RW(ac_policy); static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_status_write(dev, buf, ACPI_TAD_AC_TIMER); return ret ? ret : count; } static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER); } static DEVICE_ATTR_RW(ac_status); static struct attribute *acpi_tad_attrs[] = { &dev_attr_caps.attr, &dev_attr_ac_alarm.attr, &dev_attr_ac_policy.attr, &dev_attr_ac_status.attr, NULL, }; static const struct attribute_group acpi_tad_attr_group = { .attrs = acpi_tad_attrs, }; static ssize_t dc_alarm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_alarm_write(dev, buf, ACPI_TAD_DC_TIMER); return ret ? ret : count; } static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER); } static DEVICE_ATTR_RW(dc_alarm); static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_policy_write(dev, buf, ACPI_TAD_DC_TIMER); return ret ? ret : count; } static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER); } static DEVICE_ATTR_RW(dc_policy); static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = acpi_tad_status_write(dev, buf, ACPI_TAD_DC_TIMER); return ret ? ret : count; } static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr, char *buf) { return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER); } static DEVICE_ATTR_RW(dc_status); static struct attribute *acpi_tad_dc_attrs[] = { &dev_attr_dc_alarm.attr, &dev_attr_dc_policy.attr, &dev_attr_dc_status.attr, NULL, }; static const struct attribute_group acpi_tad_dc_attr_group = { .attrs = acpi_tad_dc_attrs, }; static int acpi_tad_disable_timer(struct device *dev, u32 timer_id) { return acpi_tad_wake_set(dev, "_STV", timer_id, ACPI_TAD_WAKE_DISABLED); } static void acpi_tad_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; acpi_handle handle = ACPI_HANDLE(dev); struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); device_init_wakeup(dev, false); pm_runtime_get_sync(dev); if (dd->capabilities & ACPI_TAD_DC_WAKE) sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group); sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group); acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER); acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER); if (dd->capabilities & ACPI_TAD_DC_WAKE) { acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER); acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER); } pm_runtime_put_sync(dev); pm_runtime_disable(dev); acpi_remove_cmos_rtc_space_handler(handle); } static int acpi_tad_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; acpi_handle handle = ACPI_HANDLE(dev); struct acpi_tad_driver_data *dd; acpi_status status; unsigned long long caps; int ret; ret = acpi_install_cmos_rtc_space_handler(handle); if (ret < 0) { dev_info(dev, "Unable to install space handler\n"); return -ENODEV; } /* * Initialization failure messages are mostly about firmware issues, so * print them at the "info" level. */ status = acpi_evaluate_integer(handle, "_GCP", NULL, &caps); if (ACPI_FAILURE(status)) { dev_info(dev, "Unable to get capabilities\n"); ret = -ENODEV; goto remove_handler; } if (!(caps & ACPI_TAD_AC_WAKE)) { dev_info(dev, "Unsupported capabilities\n"); ret = -ENODEV; goto remove_handler; } if (!acpi_has_method(handle, "_PRW")) { dev_info(dev, "Missing _PRW\n"); ret = -ENODEV; goto remove_handler; } dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL); if (!dd) { ret = -ENOMEM; goto remove_handler; } dd->capabilities = caps; dev_set_drvdata(dev, dd); /* * Assume that the ACPI PM domain has been attached to the device and * simply enable system wakeup and runtime PM and put the device into * runtime suspend. Everything else should be taken care of by the ACPI * PM domain callbacks. */ device_init_wakeup(dev, true); dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | DPM_FLAG_MAY_SKIP_RESUME); /* * The platform bus type layer tells the ACPI PM domain powers up the * device, so set the runtime PM status of it to "active". */ pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_suspend(dev); ret = sysfs_create_group(&dev->kobj, &acpi_tad_attr_group); if (ret) goto fail; if (caps & ACPI_TAD_DC_WAKE) { ret = sysfs_create_group(&dev->kobj, &acpi_tad_dc_attr_group); if (ret) goto fail; } if (caps & ACPI_TAD_RT) { ret = sysfs_create_group(&dev->kobj, &acpi_tad_time_attr_group); if (ret) goto fail; } return 0; fail: acpi_tad_remove(pdev); /* Don't fallthrough because cmos rtc space handler is removed in acpi_tad_remove() */ return ret; remove_handler: acpi_remove_cmos_rtc_space_handler(handle); return ret; } static const struct acpi_device_id acpi_tad_ids[] = { {"ACPI000E", 0}, {} }; static struct platform_driver acpi_tad_driver = { .driver = { .name = "acpi-tad", .acpi_match_table = acpi_tad_ids, }, .probe = acpi_tad_probe, .remove = acpi_tad_remove, }; MODULE_DEVICE_TABLE(acpi, acpi_tad_ids); module_platform_driver(acpi_tad_driver);
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #define STACK_MAX_LEN 50 #include "pyperf.h"
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __ISP_PRIVATE_H_INCLUDED__ #define __ISP_PRIVATE_H_INCLUDED__ #ifdef HRT_MEMORY_ACCESS #include <hrt/api.h> #endif #include "isp_public.h" #include "device_access.h" #include "assert_support.h" #include "type_support.h" STORAGE_CLASS_ISP_C void isp_ctrl_store( const isp_ID_t ID, const unsigned int reg, const hrt_data value) { assert(ID < N_ISP_ID); assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) ia_css_device_store_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value); #else hrt_master_port_store_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data), value); #endif return; } STORAGE_CLASS_ISP_C hrt_data isp_ctrl_load( const isp_ID_t ID, const unsigned int reg) { assert(ID < N_ISP_ID); assert(ISP_CTRL_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) return ia_css_device_load_uint32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data)); #else return hrt_master_port_uload_32(ISP_CTRL_BASE[ID] + reg * sizeof(hrt_data)); #endif } STORAGE_CLASS_ISP_C bool isp_ctrl_getbit( const isp_ID_t ID, const unsigned int reg, const unsigned int bit) { hrt_data val = isp_ctrl_load(ID, reg); return (val & (1UL << bit)) != 0; } STORAGE_CLASS_ISP_C void isp_ctrl_setbit( const isp_ID_t ID, const unsigned int reg, const unsigned int bit) { hrt_data data = isp_ctrl_load(ID, reg); isp_ctrl_store(ID, reg, (data | (1UL << bit))); return; } STORAGE_CLASS_ISP_C void isp_ctrl_clearbit( const isp_ID_t ID, const unsigned int reg, const unsigned int bit) { hrt_data data = isp_ctrl_load(ID, reg); isp_ctrl_store(ID, reg, (data & ~(1UL << bit))); return; } STORAGE_CLASS_ISP_C void isp_dmem_store( const isp_ID_t ID, unsigned int addr, const void *data, const size_t size) { assert(ID < N_ISP_ID); assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) ia_css_device_store(ISP_DMEM_BASE[ID] + addr, data, size); #else hrt_master_port_store(ISP_DMEM_BASE[ID] + addr, data, size); #endif return; } STORAGE_CLASS_ISP_C void isp_dmem_load( const isp_ID_t ID, const unsigned int addr, void *data, const size_t size) { assert(ID < N_ISP_ID); assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) ia_css_device_load(ISP_DMEM_BASE[ID] + addr, data, size); #else hrt_master_port_load(ISP_DMEM_BASE[ID] + addr, data, size); #endif return; } STORAGE_CLASS_ISP_C void isp_dmem_store_uint32( const isp_ID_t ID, unsigned int addr, const uint32_t data) { assert(ID < N_ISP_ID); assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1); (void)ID; #if !defined(HRT_MEMORY_ACCESS) ia_css_device_store_uint32(ISP_DMEM_BASE[ID] + addr, data); #else hrt_master_port_store_32(ISP_DMEM_BASE[ID] + addr, data); #endif return; } STORAGE_CLASS_ISP_C uint32_t isp_dmem_load_uint32( const isp_ID_t ID, const unsigned int addr) { assert(ID < N_ISP_ID); assert(ISP_DMEM_BASE[ID] != (hrt_address) - 1); (void)ID; #if !defined(HRT_MEMORY_ACCESS) return ia_css_device_load_uint32(ISP_DMEM_BASE[ID] + addr); #else return hrt_master_port_uload_32(ISP_DMEM_BASE[ID] + addr); #endif } STORAGE_CLASS_ISP_C uint32_t isp_2w_cat_1w( const u16 x0, const uint16_t x1) { u32 out = ((uint32_t)(x1 & HIVE_ISP_VMEM_MASK) << ISP_VMEM_ELEMBITS) | (x0 & HIVE_ISP_VMEM_MASK); return out; } #endif /* __ISP_PRIVATE_H_INCLUDED__ */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Texas Instruments, Inc. */ #ifndef __OMAPFB_DSS_H #define __OMAPFB_DSS_H #include <linux/list.h> #include <linux/kobject.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/platform_data/omapdss.h> #include <video/videomode.h> #define DISPC_IRQ_FRAMEDONE (1 << 0) #define DISPC_IRQ_VSYNC (1 << 1) #define DISPC_IRQ_EVSYNC_EVEN (1 << 2) #define DISPC_IRQ_EVSYNC_ODD (1 << 3) #define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4) #define DISPC_IRQ_PROG_LINE_NUM (1 << 5) #define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6) #define DISPC_IRQ_GFX_END_WIN (1 << 7) #define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8) #define DISPC_IRQ_OCP_ERR (1 << 9) #define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10) #define DISPC_IRQ_VID1_END_WIN (1 << 11) #define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12) #define DISPC_IRQ_VID2_END_WIN (1 << 13) #define DISPC_IRQ_SYNC_LOST (1 << 14) #define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) #define DISPC_IRQ_WAKEUP (1 << 16) #define DISPC_IRQ_SYNC_LOST2 (1 << 17) #define DISPC_IRQ_VSYNC2 (1 << 18) #define DISPC_IRQ_VID3_END_WIN (1 << 19) #define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20) #define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21) #define DISPC_IRQ_FRAMEDONE2 (1 << 22) #define DISPC_IRQ_FRAMEDONEWB (1 << 23) #define DISPC_IRQ_FRAMEDONETV (1 << 24) #define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) #define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26) #define DISPC_IRQ_SYNC_LOST3 (1 << 27) #define DISPC_IRQ_VSYNC3 (1 << 28) #define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29) #define DISPC_IRQ_FRAMEDONE3 (1 << 30) struct omap_dss_device; struct omap_overlay_manager; struct dss_lcd_mgr_config; struct snd_aes_iec958; struct snd_cea_861_aud_if; struct hdmi_avi_infoframe; enum omap_display_type { OMAP_DISPLAY_TYPE_NONE = 0, OMAP_DISPLAY_TYPE_DPI = 1 << 0, OMAP_DISPLAY_TYPE_DBI = 1 << 1, OMAP_DISPLAY_TYPE_SDI = 1 << 2, OMAP_DISPLAY_TYPE_DSI = 1 << 3, OMAP_DISPLAY_TYPE_VENC = 1 << 4, OMAP_DISPLAY_TYPE_HDMI = 1 << 5, OMAP_DISPLAY_TYPE_DVI = 1 << 6, }; enum omap_plane { OMAP_DSS_GFX = 0, OMAP_DSS_VIDEO1 = 1, OMAP_DSS_VIDEO2 = 2, OMAP_DSS_VIDEO3 = 3, OMAP_DSS_WB = 4, }; enum omap_channel { OMAP_DSS_CHANNEL_LCD = 0, OMAP_DSS_CHANNEL_DIGIT = 1, OMAP_DSS_CHANNEL_LCD2 = 2, OMAP_DSS_CHANNEL_LCD3 = 3, OMAP_DSS_CHANNEL_WB = 4, }; enum omap_color_mode { OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */ OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */ OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */ OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */ OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ }; enum omap_dss_load_mode { OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, OMAP_DSS_LOAD_CLUT_ONLY = 1, OMAP_DSS_LOAD_FRAME_ONLY = 2, OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3, }; enum omap_dss_trans_key_type { OMAP_DSS_COLOR_KEY_GFX_DST = 0, OMAP_DSS_COLOR_KEY_VID_SRC = 1, }; enum omap_dss_signal_level { OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, }; enum omap_dss_signal_edge { OMAPDSS_DRIVE_SIG_FALLING_EDGE, OMAPDSS_DRIVE_SIG_RISING_EDGE, }; enum omap_dss_venc_type { OMAP_DSS_VENC_TYPE_COMPOSITE, OMAP_DSS_VENC_TYPE_SVIDEO, }; enum omap_dss_dsi_pixel_format { OMAP_DSS_DSI_FMT_RGB888, OMAP_DSS_DSI_FMT_RGB666, OMAP_DSS_DSI_FMT_RGB666_PACKED, OMAP_DSS_DSI_FMT_RGB565, }; enum omap_dss_dsi_mode { OMAP_DSS_DSI_CMD_MODE = 0, OMAP_DSS_DSI_VIDEO_MODE, }; enum omap_display_caps { OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1, }; enum omap_dss_display_state { OMAP_DSS_DISPLAY_DISABLED = 0, OMAP_DSS_DISPLAY_ACTIVE, }; enum omap_dss_rotation_type { OMAP_DSS_ROT_DMA = 1 << 0, OMAP_DSS_ROT_VRFB = 1 << 1, OMAP_DSS_ROT_TILER = 1 << 2, }; /* clockwise rotation angle */ enum omap_dss_rotation_angle { OMAP_DSS_ROT_0 = 0, OMAP_DSS_ROT_90 = 1, OMAP_DSS_ROT_180 = 2, OMAP_DSS_ROT_270 = 3, }; enum omap_overlay_caps { OMAP_DSS_OVL_CAP_SCALE = 1 << 0, OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1, OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2, OMAP_DSS_OVL_CAP_ZORDER = 1 << 3, OMAP_DSS_OVL_CAP_POS = 1 << 4, OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, }; enum omap_dss_output_id { OMAP_DSS_OUTPUT_DPI = 1 << 0, OMAP_DSS_OUTPUT_DBI = 1 << 1, OMAP_DSS_OUTPUT_SDI = 1 << 2, OMAP_DSS_OUTPUT_DSI1 = 1 << 3, OMAP_DSS_OUTPUT_DSI2 = 1 << 4, OMAP_DSS_OUTPUT_VENC = 1 << 5, OMAP_DSS_OUTPUT_HDMI = 1 << 6, }; /* DSI */ enum omap_dss_dsi_trans_mode { /* Sync Pulses: both sync start and end packets sent */ OMAP_DSS_DSI_PULSE_MODE, /* Sync Events: only sync start packets sent */ OMAP_DSS_DSI_EVENT_MODE, /* Burst: only sync start packets sent, pixels are time compressed */ OMAP_DSS_DSI_BURST_MODE, }; struct omap_dss_dsi_videomode_timings { unsigned long hsclk; unsigned ndl; unsigned bitspp; /* pixels */ u16 hact; /* lines */ u16 vact; /* DSI video mode blanking data */ /* Unit: byte clock cycles */ u16 hss; u16 hsa; u16 hse; u16 hfp; u16 hbp; /* Unit: line clocks */ u16 vsa; u16 vfp; u16 vbp; /* DSI blanking modes */ int blanking_mode; int hsa_blanking_mode; int hbp_blanking_mode; int hfp_blanking_mode; enum omap_dss_dsi_trans_mode trans_mode; bool ddr_clk_always_on; int window_sync; }; struct omap_dss_dsi_config { enum omap_dss_dsi_mode mode; enum omap_dss_dsi_pixel_format pixel_format; const struct omap_video_timings *timings; unsigned long hs_clk_min, hs_clk_max; unsigned long lp_clk_min, lp_clk_max; bool ddr_clk_always_on; enum omap_dss_dsi_trans_mode trans_mode; }; struct omap_video_timings { /* Unit: pixels */ u16 x_res; /* Unit: pixels */ u16 y_res; /* Unit: Hz */ u32 pixelclock; /* Unit: pixel clocks */ u16 hsw; /* Horizontal synchronization pulse width */ /* Unit: pixel clocks */ u16 hfp; /* Horizontal front porch */ /* Unit: pixel clocks */ u16 hbp; /* Horizontal back porch */ /* Unit: line clocks */ u16 vsw; /* Vertical synchronization pulse width */ /* Unit: line clocks */ u16 vfp; /* Vertical front porch */ /* Unit: line clocks */ u16 vbp; /* Vertical back porch */ /* Vsync logic level */ enum omap_dss_signal_level vsync_level; /* Hsync logic level */ enum omap_dss_signal_level hsync_level; /* Interlaced or Progressive timings */ bool interlace; /* Pixel clock edge to drive LCD data */ enum omap_dss_signal_edge data_pclk_edge; /* Data enable logic level */ enum omap_dss_signal_level de_level; /* Pixel clock edges to drive HSYNC and VSYNC signals */ enum omap_dss_signal_edge sync_pclk_edge; bool double_pixel; }; /* Hardcoded timings for tv modes. Venc only uses these to * identify the mode, and does not actually use the configs * itself. However, the configs should be something that * a normal monitor can also show */ extern const struct omap_video_timings omap_dss_pal_timings; extern const struct omap_video_timings omap_dss_ntsc_timings; struct omap_dss_cpr_coefs { s16 rr, rg, rb; s16 gr, gg, gb; s16 br, bg, bb; }; struct omap_overlay_info { dma_addr_t paddr; dma_addr_t p_uv_addr; /* for NV12 format */ u16 screen_width; u16 width; u16 height; enum omap_color_mode color_mode; u8 rotation; enum omap_dss_rotation_type rotation_type; bool mirror; u16 pos_x; u16 pos_y; u16 out_width; /* if 0, out_width == width */ u16 out_height; /* if 0, out_height == height */ u8 global_alpha; u8 pre_mult_alpha; u8 zorder; }; struct omap_overlay { struct kobject kobj; struct list_head list; /* static fields */ const char *name; enum omap_plane id; enum omap_color_mode supported_modes; enum omap_overlay_caps caps; /* dynamic fields */ struct omap_overlay_manager *manager; /* * The following functions do not block: * * is_enabled * set_overlay_info * get_overlay_info * * The rest of the functions may block and cannot be called from * interrupt context */ int (*enable)(struct omap_overlay *ovl); int (*disable)(struct omap_overlay *ovl); bool (*is_enabled)(struct omap_overlay *ovl); int (*set_manager)(struct omap_overlay *ovl, struct omap_overlay_manager *mgr); int (*unset_manager)(struct omap_overlay *ovl); int (*set_overlay_info)(struct omap_overlay *ovl, struct omap_overlay_info *info); void (*get_overlay_info)(struct omap_overlay *ovl, struct omap_overlay_info *info); int (*wait_for_go)(struct omap_overlay *ovl); struct omap_dss_device *(*get_device)(struct omap_overlay *ovl); }; struct omap_overlay_manager_info { u32 default_color; enum omap_dss_trans_key_type trans_key_type; u32 trans_key; bool trans_enabled; bool partial_alpha_enabled; bool cpr_enable; struct omap_dss_cpr_coefs cpr_coefs; }; struct omap_overlay_manager { struct kobject kobj; /* static fields */ const char *name; enum omap_channel id; struct list_head overlays; enum omap_display_type supported_displays; enum omap_dss_output_id supported_outputs; /* dynamic fields */ struct omap_dss_device *output; /* * The following functions do not block: * * set_manager_info * get_manager_info * apply * * The rest of the functions may block and cannot be called from * interrupt context */ int (*set_output)(struct omap_overlay_manager *mgr, struct omap_dss_device *output); int (*unset_output)(struct omap_overlay_manager *mgr); int (*set_manager_info)(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info); void (*get_manager_info)(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info); int (*apply)(struct omap_overlay_manager *mgr); int (*wait_for_go)(struct omap_overlay_manager *mgr); int (*wait_for_vsync)(struct omap_overlay_manager *mgr); struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr); }; /* 22 pins means 1 clk lane and 10 data lanes */ #define OMAP_DSS_MAX_DSI_PINS 22 struct omap_dsi_pin_config { int num_pins; /* * pin numbers in the following order: * clk+, clk- * data1+, data1- * data2+, data2- * ... */ int pins[OMAP_DSS_MAX_DSI_PINS]; }; struct omap_dss_writeback_info { u32 paddr; u32 p_uv_addr; u16 buf_width; u16 width; u16 height; enum omap_color_mode color_mode; u8 rotation; enum omap_dss_rotation_type rotation_type; bool mirror; u8 pre_mult_alpha; }; struct omapdss_dpi_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines); }; struct omapdss_sdi_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs); }; struct omapdss_dvi_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); }; struct omapdss_atv_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_type)(struct omap_dss_device *dssdev, enum omap_dss_venc_type type); void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev, bool invert_polarity); int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); u32 (*get_wss)(struct omap_dss_device *dssdev); }; struct omapdss_hdmi_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); bool (*detect)(struct omap_dss_device *dssdev); int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); }; struct omapdss_dsi_ops { int (*connect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, bool enter_ulps); /* bus configuration */ int (*set_config)(struct omap_dss_device *dssdev, const struct omap_dss_dsi_config *cfg); int (*configure_pins)(struct omap_dss_device *dssdev, const struct omap_dsi_pin_config *pin_cfg); void (*enable_hs)(struct omap_dss_device *dssdev, int channel, bool enable); int (*enable_te)(struct omap_dss_device *dssdev, bool enable); int (*update)(struct omap_dss_device *dssdev, int channel, void (*callback)(int, void *), void *data); void (*bus_lock)(struct omap_dss_device *dssdev); void (*bus_unlock)(struct omap_dss_device *dssdev); int (*enable_video_output)(struct omap_dss_device *dssdev, int channel); void (*disable_video_output)(struct omap_dss_device *dssdev, int channel); int (*request_vc)(struct omap_dss_device *dssdev, int *channel); int (*set_vc_id)(struct omap_dss_device *dssdev, int channel, int vc_id); void (*release_vc)(struct omap_dss_device *dssdev, int channel); /* data transfer */ int (*dcs_write)(struct omap_dss_device *dssdev, int channel, u8 *data, int len); int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel, u8 *data, int len); int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, u8 *data, int len); int (*gen_write)(struct omap_dss_device *dssdev, int channel, u8 *data, int len); int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel, u8 *data, int len); int (*gen_read)(struct omap_dss_device *dssdev, int channel, u8 *reqdata, int reqlen, u8 *data, int len); int (*bta_sync)(struct omap_dss_device *dssdev, int channel); int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev, int channel, u16 plen); }; struct omap_dss_device { struct kobject kobj; struct device *dev; struct module *owner; struct list_head panel_list; /* alias in the form of "display%d" */ char alias[16]; enum omap_display_type type; enum omap_display_type output_type; union { struct { u8 data_lines; } dpi; struct { u8 datapairs; } sdi; struct { int module; } dsi; struct { enum omap_dss_venc_type type; bool invert_polarity; } venc; } phy; struct { struct omap_video_timings timings; enum omap_dss_dsi_pixel_format dsi_pix_fmt; enum omap_dss_dsi_mode dsi_mode; } panel; struct { u8 pixel_size; } ctrl; const char *name; /* used to match device to driver */ const char *driver_name; void *data; struct omap_dss_driver *driver; union { const struct omapdss_dpi_ops *dpi; const struct omapdss_sdi_ops *sdi; const struct omapdss_dvi_ops *dvi; const struct omapdss_hdmi_ops *hdmi; const struct omapdss_atv_ops *atv; const struct omapdss_dsi_ops *dsi; } ops; /* helper variable for driver suspend/resume */ bool activate_after_resume; enum omap_display_caps caps; struct omap_dss_device *src; enum omap_dss_display_state state; /* OMAP DSS output specific fields */ struct list_head list; /* DISPC channel for this output */ enum omap_channel dispc_channel; bool dispc_channel_connected; /* output instance */ enum omap_dss_output_id id; /* the port number in the DT node */ int port_num; /* dynamic fields */ struct omap_overlay_manager *manager; struct omap_dss_device *dst; }; struct omap_dss_driver { int (*probe)(struct omap_dss_device *); void (*remove)(struct omap_dss_device *); int (*connect)(struct omap_dss_device *dssdev); void (*disconnect)(struct omap_dss_device *dssdev); int (*enable)(struct omap_dss_device *display); void (*disable)(struct omap_dss_device *display); int (*run_test)(struct omap_dss_device *display, int test); int (*update)(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h); int (*sync)(struct omap_dss_device *dssdev); int (*enable_te)(struct omap_dss_device *dssdev, bool enable); int (*get_te)(struct omap_dss_device *dssdev); u8 (*get_rotate)(struct omap_dss_device *dssdev); int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); bool (*get_mirror)(struct omap_dss_device *dssdev); int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); int (*memory_read)(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); void (*get_resolution)(struct omap_dss_device *dssdev, u16 *xres, u16 *yres); void (*get_dimensions)(struct omap_dss_device *dssdev, u32 *width, u32 *height); int (*get_recommended_bpp)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*set_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); void (*get_timings)(struct omap_dss_device *dssdev, struct omap_video_timings *timings); int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); u32 (*get_wss)(struct omap_dss_device *dssdev); int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); bool (*detect)(struct omap_dss_device *dssdev); int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); }; #define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); #if IS_ENABLED(CONFIG_FB_OMAP2) enum omapdss_version omapdss_get_version(void); bool omapdss_is_initialized(void); int omap_dss_register_driver(struct omap_dss_driver *); void omap_dss_unregister_driver(struct omap_dss_driver *); int omapdss_register_display(struct omap_dss_device *dssdev); void omapdss_unregister_display(struct omap_dss_device *dssdev); struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); struct omap_dss_device *omap_dss_find_device(void *data, int (*match)(struct omap_dss_device *dssdev, void *data)); const char *omapdss_get_default_display_name(void); void videomode_to_omap_video_timings(const struct videomode *vm, struct omap_video_timings *ovt); void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, struct videomode *vm); int dss_feat_get_num_mgrs(void); int dss_feat_get_num_ovls(void); enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); int omap_dss_get_num_overlay_managers(void); struct omap_overlay_manager *omap_dss_get_overlay_manager(int num); int omap_dss_get_num_overlays(void); struct omap_overlay *omap_dss_get_overlay(int num); int omapdss_register_output(struct omap_dss_device *output); void omapdss_unregister_output(struct omap_dss_device *output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); struct omap_dss_device *omap_dss_find_output(const char *name); struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev); void omapdss_default_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres); int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); void omapdss_default_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omapdss_compat_init(void); void omapdss_compat_uninit(void); static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) { return dssdev->src; } static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) { return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; } struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node); #else static inline enum omapdss_version omapdss_get_version(void) { return OMAPDSS_VER_UNKNOWN; }; static inline bool omapdss_is_initialized(void) { return false; }; static inline int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) { return 0; }; static inline int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask) { return 0; }; static inline struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) { return NULL; }; static inline struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) {return NULL; }; static inline void omap_dss_put_device(struct omap_dss_device *dssdev) {}; static inline int omapdss_compat_init(void) { return 0; }; static inline void omapdss_compat_uninit(void) {}; static inline int omap_dss_get_num_overlay_managers(void) { return 0; }; static inline struct omap_overlay_manager *omap_dss_get_overlay_manager(int num) { return NULL; }; static inline int omap_dss_get_num_overlays(void) { return 0; }; static inline struct omap_overlay *omap_dss_get_overlay(int num) { return NULL; }; #endif /* FB_OMAP2 */ #endif /* __OMAPFB_DSS_H */
// SPDX-License-Identifier: GPL-2.0 #include <sys/select.h> #include <unistd.h> #include <errno.h> int read_with_timeout(int fd, char *buf, size_t count, long usec) { const long M = 1000 * 1000; struct timeval tv = { usec / M, usec % M }; fd_set fds; int err; FD_ZERO(&fds); FD_SET(fd, &fds); err = select(fd + 1, &fds, NULL, NULL, &tv); if (err < 0) return err; if (FD_ISSET(fd, &fds)) return read(fd, buf, count); return -EAGAIN; }
// SPDX-License-Identifier: GPL-2.0-only /* * amdtp-dot.c - a part of driver for Digidesign Digi 002/003 family * * Copyright (c) 2014-2015 Takashi Sakamoto * Copyright (C) 2012 Robin Gareus <[email protected]> * Copyright (C) 2012 Damien Zammit <[email protected]> */ #include <sound/pcm.h> #include "digi00x.h" #define CIP_FMT_AM 0x10 /* 'Clock-based rate control mode' is just supported. */ #define AMDTP_FDF_AM824 0x00 /* * Nominally 3125 bytes/second, but the MIDI port's clock might be * 1% too slow, and the bus clock 100 ppm too fast. */ #define MIDI_BYTES_PER_SECOND 3093 /* * Several devices look only at the first eight data blocks. * In any case, this is more than enough for the MIDI data rate. */ #define MAX_MIDI_RX_BLOCKS 8 /* 3 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) + 1. */ #define MAX_MIDI_PORTS 3 /* * The double-oh-three algorithm was discovered by Robin Gareus and Damien * Zammit in 2012, with reverse-engineering for Digi 003 Rack. */ struct dot_state { u8 carry; u8 idx; unsigned int off; }; struct amdtp_dot { unsigned int pcm_channels; struct dot_state state; struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS]; int midi_fifo_used[MAX_MIDI_PORTS]; int midi_fifo_limit; }; /* * double-oh-three look up table * * @param idx index byte (audio-sample data) 0x00..0xff * @param off channel offset shift * @return salt to XOR with given data */ #define BYTE_PER_SAMPLE (4) #define MAGIC_DOT_BYTE (2) #define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE) static u8 dot_scrt(const u8 idx, const unsigned int off) { /* * the length of the added pattern only depends on the lower nibble * of the last non-zero data */ static const u8 len[16] = {0, 1, 3, 5, 7, 9, 11, 13, 14, 12, 10, 8, 6, 4, 2, 0}; /* * the lower nibble of the salt. Interleaved sequence. * this is walked backwards according to len[] */ static const u8 nib[15] = {0x8, 0x7, 0x9, 0x6, 0xa, 0x5, 0xb, 0x4, 0xc, 0x3, 0xd, 0x2, 0xe, 0x1, 0xf}; /* circular list for the salt's hi nibble. */ static const u8 hir[15] = {0x0, 0x6, 0xf, 0x8, 0x7, 0x5, 0x3, 0x4, 0xc, 0xd, 0xe, 0x1, 0x2, 0xb, 0xa}; /* * start offset for upper nibble mapping. * note: 9 is /special/. In the case where the high nibble == 0x9, * hir[] is not used and - coincidentally - the salt's hi nibble is * 0x09 regardless of the offset. */ static const u8 hio[16] = {0, 11, 12, 6, 7, 5, 1, 4, 3, 0x00, 14, 13, 8, 9, 10, 2}; const u8 ln = idx & 0xf; const u8 hn = (idx >> 4) & 0xf; const u8 hr = (hn == 0x9) ? 0x9 : hir[(hio[hn] + off) % 15]; if (len[ln] < off) return 0x00; return ((nib[14 + off - len[ln]]) | (hr << 4)); } static void dot_encode_step(struct dot_state *state, __be32 *const buffer) { u8 * const data = (u8 *) buffer; if (data[MAGIC_DOT_BYTE] != 0x00) { state->off = 0; state->idx = data[MAGIC_DOT_BYTE] ^ state->carry; } data[MAGIC_DOT_BYTE] ^= state->carry; state->carry = dot_scrt(state->idx, ++(state->off)); } int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate, unsigned int pcm_channels) { struct amdtp_dot *p = s->protocol; int err; if (amdtp_stream_running(s)) return -EBUSY; /* * A first data channel is for MIDI messages, the rest is Multi Bit * Linear Audio data channel. */ err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1, 1); if (err < 0) return err; s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; p->pcm_channels = pcm_channels; /* * We do not know the actual MIDI FIFO size of most devices. Just * assume two bytes, i.e., one byte can be received over the bus while * the previous one is transmitted over MIDI. * (The value here is adjusted for midi_ratelimit_per_packet().) */ p->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1; return 0; } static void write_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames, unsigned int pcm_frames) { struct amdtp_dot *p = s->protocol; unsigned int channels = p->pcm_channels; struct snd_pcm_runtime *runtime = pcm->runtime; unsigned int pcm_buffer_pointer; int remaining_frames; const u32 *src; int i, c; pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames; pcm_buffer_pointer %= runtime->buffer_size; src = (void *)runtime->dma_area + frames_to_bytes(runtime, pcm_buffer_pointer); remaining_frames = runtime->buffer_size - pcm_buffer_pointer; buffer++; for (i = 0; i < frames; ++i) { for (c = 0; c < channels; ++c) { buffer[c] = cpu_to_be32((*src >> 8) | 0x40000000); dot_encode_step(&p->state, &buffer[c]); src++; } buffer += s->data_block_quadlets; if (--remaining_frames == 0) src = (void *)runtime->dma_area; } } static void read_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm, __be32 *buffer, unsigned int frames, unsigned int pcm_frames) { struct amdtp_dot *p = s->protocol; unsigned int channels = p->pcm_channels; struct snd_pcm_runtime *runtime = pcm->runtime; unsigned int pcm_buffer_pointer; int remaining_frames; u32 *dst; int i, c; pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames; pcm_buffer_pointer %= runtime->buffer_size; dst = (void *)runtime->dma_area + frames_to_bytes(runtime, pcm_buffer_pointer); remaining_frames = runtime->buffer_size - pcm_buffer_pointer; buffer++; for (i = 0; i < frames; ++i) { for (c = 0; c < channels; ++c) { *dst = be32_to_cpu(buffer[c]) << 8; dst++; } buffer += s->data_block_quadlets; if (--remaining_frames == 0) dst = (void *)runtime->dma_area; } } static void write_pcm_silence(struct amdtp_stream *s, __be32 *buffer, unsigned int data_blocks) { struct amdtp_dot *p = s->protocol; unsigned int channels, i, c; channels = p->pcm_channels; buffer++; for (i = 0; i < data_blocks; ++i) { for (c = 0; c < channels; ++c) buffer[c] = cpu_to_be32(0x40000000); buffer += s->data_block_quadlets; } } static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port) { struct amdtp_dot *p = s->protocol; int used; used = p->midi_fifo_used[port]; if (used == 0) return true; used -= MIDI_BYTES_PER_SECOND * s->syt_interval; used = max(used, 0); p->midi_fifo_used[port] = used; return used < p->midi_fifo_limit; } static inline void midi_use_bytes(struct amdtp_stream *s, unsigned int port, unsigned int count) { struct amdtp_dot *p = s->protocol; p->midi_fifo_used[port] += amdtp_rate_table[s->sfc] * count; } static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer, unsigned int data_blocks, unsigned int data_block_counter) { struct amdtp_dot *p = s->protocol; unsigned int f, port; int len; u8 *b; for (f = 0; f < data_blocks; f++) { port = (data_block_counter + f) % 8; b = (u8 *)&buffer[0]; len = 0; if (port < MAX_MIDI_PORTS && midi_ratelimit_per_packet(s, port) && p->midi[port] != NULL) len = snd_rawmidi_transmit(p->midi[port], b + 1, 2); if (len > 0) { /* * Upper 4 bits of LSB represent port number. * - 0000b: physical MIDI port 1. * - 0010b: physical MIDI port 2. * - 1110b: console MIDI port. */ if (port == 2) b[3] = 0xe0; else if (port == 1) b[3] = 0x20; else b[3] = 0x00; b[3] |= len; midi_use_bytes(s, port, len); } else { b[1] = 0; b[2] = 0; b[3] = 0; } b[0] = 0x80; buffer += s->data_block_quadlets; } } static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer, unsigned int data_blocks) { struct amdtp_dot *p = s->protocol; unsigned int f, port, len; u8 *b; for (f = 0; f < data_blocks; f++) { b = (u8 *)&buffer[0]; len = b[3] & 0x0f; if (len > 0) { /* * Upper 4 bits of LSB represent port number. * - 0000b: physical MIDI port 1. Use port 0. * - 1110b: console MIDI port. Use port 2. */ if (b[3] >> 4 > 0) port = 2; else port = 0; if (port < MAX_MIDI_PORTS && p->midi[port]) snd_rawmidi_receive(p->midi[port], b + 1, len); } buffer += s->data_block_quadlets; } } int amdtp_dot_add_pcm_hw_constraints(struct amdtp_stream *s, struct snd_pcm_runtime *runtime) { int err; /* This protocol delivers 24 bit data in 32bit data channel. */ err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); if (err < 0) return err; return amdtp_stream_add_pcm_hw_constraints(s, runtime); } void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port, struct snd_rawmidi_substream *midi) { struct amdtp_dot *p = s->protocol; if (port < MAX_MIDI_PORTS) WRITE_ONCE(p->midi[port], midi); } static void process_ir_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc, unsigned int count, struct snd_pcm_substream *pcm) { unsigned int pcm_frames = 0; int i; for (i = 0; i < count; ++i) { __be32 *buf = desc->ctx_payload; unsigned int data_blocks = desc->data_blocks; if (pcm) { read_pcm_s32(s, pcm, buf, data_blocks, pcm_frames); pcm_frames += data_blocks; } read_midi_messages(s, buf, data_blocks); desc = amdtp_stream_next_packet_desc(s, desc); } } static void process_it_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc, unsigned int count, struct snd_pcm_substream *pcm) { unsigned int pcm_frames = 0; int i; for (i = 0; i < count; ++i) { __be32 *buf = desc->ctx_payload; unsigned int data_blocks = desc->data_blocks; if (pcm) { write_pcm_s32(s, pcm, buf, data_blocks, pcm_frames); pcm_frames += data_blocks; } else { write_pcm_silence(s, buf, data_blocks); } write_midi_messages(s, buf, data_blocks, desc->data_block_counter); desc = amdtp_stream_next_packet_desc(s, desc); } } int amdtp_dot_init(struct amdtp_stream *s, struct fw_unit *unit, enum amdtp_stream_direction dir) { amdtp_stream_process_ctx_payloads_t process_ctx_payloads; unsigned int flags = CIP_NONBLOCKING | CIP_UNAWARE_SYT; // Use different mode between incoming/outgoing. if (dir == AMDTP_IN_STREAM) process_ctx_payloads = process_ir_ctx_payloads; else process_ctx_payloads = process_it_ctx_payloads; return amdtp_stream_init(s, unit, dir, flags, CIP_FMT_AM, process_ctx_payloads, sizeof(struct amdtp_dot)); } void amdtp_dot_reset(struct amdtp_stream *s) { struct amdtp_dot *p = s->protocol; p->state.carry = 0x00; p->state.idx = 0x00; p->state.off = 0; }
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2013 MundoReader S.L. * Author: Heiko Stuebner <[email protected]> */ #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/soc/rockchip,boot-mode.h> / { #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&gic>; aliases { ethernet0 = &emac; gpio0 = &gpio0; gpio1 = &gpio1; gpio2 = &gpio2; gpio3 = &gpio3; i2c0 = &i2c0; i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; i2c4 = &i2c4; serial0 = &uart0; serial1 = &uart1; serial2 = &uart2; serial3 = &uart3; spi0 = &spi0; spi1 = &spi1; }; xin24m: oscillator { compatible = "fixed-clock"; clock-frequency = <24000000>; #clock-cells = <0>; clock-output-names = "xin24m"; }; gpu: gpu@10090000 { compatible = "arm,mali-400"; reg = <0x10090000 0x10000>; clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>; clock-names = "bus", "core"; assigned-clocks = <&cru ACLK_GPU>; assigned-clock-rates = <100000000>; resets = <&cru SRST_GPU>; status = "disabled"; }; vpu: video-codec@10104000 { compatible = "rockchip,rk3066-vpu"; reg = <0x10104000 0x800>; interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "vepu", "vdpu"; clocks = <&cru ACLK_VDPU>, <&cru HCLK_VDPU>, <&cru ACLK_VEPU>, <&cru HCLK_VEPU>; clock-names = "aclk_vdpu", "hclk_vdpu", "aclk_vepu", "hclk_vepu"; }; L2: cache-controller@10138000 { compatible = "arm,pl310-cache"; reg = <0x10138000 0x1000>; cache-unified; cache-level = <2>; }; scu@1013c000 { compatible = "arm,cortex-a9-scu"; reg = <0x1013c000 0x100>; }; global_timer: global-timer@1013c200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x1013c200 0x20>; interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>; clocks = <&cru CORE_PERI>; status = "disabled"; /* The clock source and the sched_clock provided by the arm_global_timer * on Rockchip rk3066a/rk3188 are quite unstable because their rates * depend on the CPU frequency. * Keep the arm_global_timer disabled in order to have the * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default. */ }; local_timer: local-timer@1013c600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x1013c600 0x20>; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>; clocks = <&cru CORE_PERI>; }; gic: interrupt-controller@1013d000 { compatible = "arm,cortex-a9-gic"; interrupt-controller; #interrupt-cells = <3>; reg = <0x1013d000 0x1000>, <0x1013c100 0x0100>; }; uart0: serial@10124000 { compatible = "snps,dw-apb-uart"; reg = <0x10124000 0x400>; interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <1>; clock-names = "baudclk", "apb_pclk"; clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>; status = "disabled"; }; uart1: serial@10126000 { compatible = "snps,dw-apb-uart"; reg = <0x10126000 0x400>; interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <1>; clock-names = "baudclk", "apb_pclk"; clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; status = "disabled"; }; qos_gpu: qos@1012d000 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012d000 0x20>; }; qos_vpu: qos@1012e000 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012e000 0x20>; }; qos_lcdc0: qos@1012f000 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f000 0x20>; }; qos_cif0: qos@1012f080 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f080 0x20>; }; qos_ipp: qos@1012f100 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f100 0x20>; }; qos_lcdc1: qos@1012f180 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f180 0x20>; }; qos_cif1: qos@1012f200 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f200 0x20>; }; qos_rga: qos@1012f280 { compatible = "rockchip,rk3066-qos", "syscon"; reg = <0x1012f280 0x20>; }; usb_otg: usb@10180000 { compatible = "rockchip,rk3066-usb", "snps,dwc2"; reg = <0x10180000 0x40000>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_OTG0>; clock-names = "otg"; dr_mode = "otg"; g-np-tx-fifo-size = <16>; g-rx-fifo-size = <275>; g-tx-fifo-size = <256 128 128 64 64 32>; phys = <&usbphy0>; phy-names = "usb2-phy"; status = "disabled"; }; usb_host: usb@101c0000 { compatible = "snps,dwc2"; reg = <0x101c0000 0x40000>; interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_OTG1>; clock-names = "otg"; dr_mode = "host"; phys = <&usbphy1>; phy-names = "usb2-phy"; status = "disabled"; }; emac: ethernet@10204000 { compatible = "rockchip,rk3066-emac"; reg = <0x10204000 0x3c>; interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; clock-names = "hclk", "macref"; max-speed = <100>; phy-mode = "rmii"; rockchip,grf = <&grf>; status = "disabled"; }; mmc0: mmc@10214000 { compatible = "rockchip,rk2928-dw-mshc"; reg = <0x10214000 0x1000>; interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>; clock-names = "biu", "ciu"; dmas = <&dmac2 1>; dma-names = "rx-tx"; fifo-depth = <256>; resets = <&cru SRST_SDMMC>; reset-names = "reset"; status = "disabled"; }; mmc1: mmc@10218000 { compatible = "rockchip,rk2928-dw-mshc"; reg = <0x10218000 0x1000>; interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>; clock-names = "biu", "ciu"; dmas = <&dmac2 3>; dma-names = "rx-tx"; fifo-depth = <256>; resets = <&cru SRST_SDIO>; reset-names = "reset"; status = "disabled"; }; emmc: mmc@1021c000 { compatible = "rockchip,rk2928-dw-mshc"; reg = <0x1021c000 0x1000>; interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>; clock-names = "biu", "ciu"; dmas = <&dmac2 4>; dma-names = "rx-tx"; fifo-depth = <256>; resets = <&cru SRST_EMMC>; reset-names = "reset"; status = "disabled"; }; nfc: nand-controller@10500000 { compatible = "rockchip,rk2928-nfc"; reg = <0x10500000 0x4000>; interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cru HCLK_NANDC0>; clock-names = "ahb"; status = "disabled"; }; pmu: pmu@20004000 { compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd"; reg = <0x20004000 0x100>; reboot-mode { compatible = "syscon-reboot-mode"; offset = <0x40>; mode-normal = <BOOT_NORMAL>; mode-recovery = <BOOT_RECOVERY>; mode-bootloader = <BOOT_FASTBOOT>; mode-loader = <BOOT_BL_DOWNLOAD>; }; }; grf: grf@20008000 { compatible = "syscon", "simple-mfd"; reg = <0x20008000 0x200>; }; dmac1_s: dma-controller@20018000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x20018000 0x4000>; interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; arm,pl330-broken-no-flushp; arm,pl330-periph-burst; clocks = <&cru ACLK_DMA1>; clock-names = "apb_pclk"; }; dmac1_ns: dma-controller@2001c000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x2001c000 0x4000>; interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; arm,pl330-broken-no-flushp; arm,pl330-periph-burst; clocks = <&cru ACLK_DMA1>; clock-names = "apb_pclk"; status = "disabled"; }; i2c0: i2c@2002d000 { compatible = "rockchip,rk3066-i2c"; reg = <0x2002d000 0x1000>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; rockchip,grf = <&grf>; clock-names = "i2c"; clocks = <&cru PCLK_I2C0>; status = "disabled"; }; i2c1: i2c@2002f000 { compatible = "rockchip,rk3066-i2c"; reg = <0x2002f000 0x1000>; interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; rockchip,grf = <&grf>; clocks = <&cru PCLK_I2C1>; clock-names = "i2c"; status = "disabled"; }; pwm0: pwm@20030000 { compatible = "rockchip,rk2928-pwm"; reg = <0x20030000 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM01>; status = "disabled"; }; pwm1: pwm@20030010 { compatible = "rockchip,rk2928-pwm"; reg = <0x20030010 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM01>; status = "disabled"; }; wdt: watchdog@2004c000 { compatible = "snps,dw-wdt"; reg = <0x2004c000 0x100>; clocks = <&cru PCLK_WDT>; interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; pwm2: pwm@20050020 { compatible = "rockchip,rk2928-pwm"; reg = <0x20050020 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM23>; status = "disabled"; }; pwm3: pwm@20050030 { compatible = "rockchip,rk2928-pwm"; reg = <0x20050030 0x10>; #pwm-cells = <2>; clocks = <&cru PCLK_PWM23>; status = "disabled"; }; i2c2: i2c@20056000 { compatible = "rockchip,rk3066-i2c"; reg = <0x20056000 0x1000>; interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; rockchip,grf = <&grf>; clocks = <&cru PCLK_I2C2>; clock-names = "i2c"; status = "disabled"; }; i2c3: i2c@2005a000 { compatible = "rockchip,rk3066-i2c"; reg = <0x2005a000 0x1000>; interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; rockchip,grf = <&grf>; clocks = <&cru PCLK_I2C3>; clock-names = "i2c"; status = "disabled"; }; i2c4: i2c@2005e000 { compatible = "rockchip,rk3066-i2c"; reg = <0x2005e000 0x1000>; interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; rockchip,grf = <&grf>; clocks = <&cru PCLK_I2C4>; clock-names = "i2c"; status = "disabled"; }; uart2: serial@20064000 { compatible = "snps,dw-apb-uart"; reg = <0x20064000 0x400>; interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <1>; clock-names = "baudclk", "apb_pclk"; clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>; status = "disabled"; }; uart3: serial@20068000 { compatible = "snps,dw-apb-uart"; reg = <0x20068000 0x400>; interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <1>; clock-names = "baudclk", "apb_pclk"; clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>; status = "disabled"; }; saradc: saradc@2006c000 { compatible = "rockchip,saradc"; reg = <0x2006c000 0x100>; interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; #io-channel-cells = <1>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clock-names = "saradc", "apb_pclk"; resets = <&cru SRST_SARADC>; reset-names = "saradc-apb"; status = "disabled"; }; spi0: spi@20070000 { compatible = "rockchip,rk3066-spi"; clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; clock-names = "spiclk", "apb_pclk"; interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; reg = <0x20070000 0x1000>; #address-cells = <1>; #size-cells = <0>; dmas = <&dmac2 10>, <&dmac2 11>; dma-names = "tx", "rx"; status = "disabled"; }; spi1: spi@20074000 { compatible = "rockchip,rk3066-spi"; clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>; clock-names = "spiclk", "apb_pclk"; interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>; reg = <0x20074000 0x1000>; #address-cells = <1>; #size-cells = <0>; dmas = <&dmac2 12>, <&dmac2 13>; dma-names = "tx", "rx"; status = "disabled"; }; dmac2: dma-controller@20078000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x20078000 0x4000>; interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; arm,pl330-broken-no-flushp; arm,pl330-periph-burst; clocks = <&cru ACLK_DMA2>; clock-names = "apb_pclk"; }; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Data Access Monitor Unit Tests * * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. * * Author: SeongJae Park <[email protected]> */ #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST #ifndef _DAMON_VADDR_TEST_H #define _DAMON_VADDR_TEST_H #include <kunit/test.h> static int __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas, ssize_t nr_vmas) { int i, ret = -ENOMEM; MA_STATE(mas, mt, 0, 0); if (!nr_vmas) return 0; mas_lock(&mas); for (i = 0; i < nr_vmas; i++) { mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1); if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL)) goto failed; } ret = 0; failed: mas_unlock(&mas); return ret; } /* * Test __damon_va_three_regions() function * * In case of virtual memory address spaces monitoring, DAMON converts the * complex and dynamic memory mappings of each target task to three * discontiguous regions which cover every mapped areas. However, the three * regions should not include the two biggest unmapped areas in the original * mapping, because the two biggest areas are normally the areas between 1) * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack. * Because these two unmapped areas are very huge but obviously never accessed, * covering the region is just a waste. * * '__damon_va_three_regions() receives an address space of a process. It * first identifies the start of mappings, end of mappings, and the two biggest * unmapped areas. After that, based on the information, it constructs the * three regions and returns. For more detail, refer to the comment of * 'damon_init_regions_of()' function definition in 'mm/damon.c' file. * * For example, suppose virtual address ranges of 10-20, 20-25, 200-210, * 210-220, 300-305, and 307-330 (Other comments represent this mappings in * more short form: 10-20-25, 200-210-220, 300-305, 307-330) of a process are * mapped. To cover every mappings, the three regions should start with 10, * and end with 305. The process also has three unmapped areas, 25-200, * 220-300, and 305-307. Among those, 25-200 and 220-300 are the biggest two * unmapped areas, and thus it should be converted to three regions of 10-25, * 200-220, and 300-330. */ static void damon_test_three_regions_in_vmas(struct kunit *test) { static struct mm_struct mm; struct damon_addr_range regions[3] = {0}; /* 10-20-25, 200-210-220, 300-305, 307-330 */ struct vm_area_struct vmas[] = { (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, }; mt_init_flags(&mm.mm_mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU); if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas))) kunit_skip(test, "Failed to create VMA tree"); __damon_va_three_regions(&mm, regions); KUNIT_EXPECT_EQ(test, 10ul, regions[0].start); KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); KUNIT_EXPECT_EQ(test, 200ul, regions[1].start); KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); KUNIT_EXPECT_EQ(test, 300ul, regions[2].start); KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); } static struct damon_region *__nth_region_of(struct damon_target *t, int idx) { struct damon_region *r; unsigned int i = 0; damon_for_each_region(r, t) { if (i++ == idx) return r; } return NULL; } /* * Test 'damon_set_regions()' * * test kunit object * regions an array containing start/end addresses of current * monitoring target regions * nr_regions the number of the addresses in 'regions' * three_regions The three regions that need to be applied now * expected start/end addresses of monitoring target regions that * 'three_regions' are applied * nr_expected the number of addresses in 'expected' * * The memory mapping of the target processes changes dynamically. To follow * the change, DAMON periodically reads the mappings, simplifies it to the * three regions, and updates the monitoring target regions to fit in the three * regions. The update of current target regions is the role of * 'damon_set_regions()'. * * This test passes the given target regions and the new three regions that * need to be applied to the function and check whether it updates the regions * as expected. */ static void damon_do_test_apply_three_regions(struct kunit *test, unsigned long *regions, int nr_regions, struct damon_addr_range *three_regions, unsigned long *expected, int nr_expected) { struct damon_target *t; struct damon_region *r; int i; t = damon_new_target(); for (i = 0; i < nr_regions / 2; i++) { r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); damon_add_region(r, t); } damon_set_regions(t, three_regions, 3); for (i = 0; i < nr_expected / 2; i++) { r = __nth_region_of(t, i); KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); } damon_destroy_target(t); } /* * This function test most common case where the three big regions are only * slightly changed. Target regions should adjust their boundary (10-20-30, * 50-55, 70-80, 90-100) to fit with the new big regions or remove target * regions (57-79) that now out of the three regions. */ static void damon_test_apply_three_regions1(struct kunit *test) { /* 10-20-30, 50-55-57-59, 70-80-90-100 */ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 70, 80, 80, 90, 90, 100}; /* 5-27, 45-55, 73-104 */ struct damon_addr_range new_three_regions[3] = { (struct damon_addr_range){.start = 5, .end = 27}, (struct damon_addr_range){.start = 45, .end = 55}, (struct damon_addr_range){.start = 73, .end = 104} }; /* 5-20-27, 45-55, 73-80-90-104 */ unsigned long expected[] = {5, 20, 20, 27, 45, 55, 73, 80, 80, 90, 90, 104}; damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), new_three_regions, expected, ARRAY_SIZE(expected)); } /* * Test slightly bigger change. Similar to above, but the second big region * now require two target regions (50-55, 57-59) to be removed. */ static void damon_test_apply_three_regions2(struct kunit *test) { /* 10-20-30, 50-55-57-59, 70-80-90-100 */ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 70, 80, 80, 90, 90, 100}; /* 5-27, 56-57, 65-104 */ struct damon_addr_range new_three_regions[3] = { (struct damon_addr_range){.start = 5, .end = 27}, (struct damon_addr_range){.start = 56, .end = 57}, (struct damon_addr_range){.start = 65, .end = 104} }; /* 5-20-27, 56-57, 65-80-90-104 */ unsigned long expected[] = {5, 20, 20, 27, 56, 57, 65, 80, 80, 90, 90, 104}; damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), new_three_regions, expected, ARRAY_SIZE(expected)); } /* * Test a big change. The second big region has totally freed and mapped to * different area (50-59 -> 61-63). The target regions which were in the old * second big region (50-55-57-59) should be removed and new target region * covering the second big region (61-63) should be created. */ static void damon_test_apply_three_regions3(struct kunit *test) { /* 10-20-30, 50-55-57-59, 70-80-90-100 */ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 70, 80, 80, 90, 90, 100}; /* 5-27, 61-63, 65-104 */ struct damon_addr_range new_three_regions[3] = { (struct damon_addr_range){.start = 5, .end = 27}, (struct damon_addr_range){.start = 61, .end = 63}, (struct damon_addr_range){.start = 65, .end = 104} }; /* 5-20-27, 61-63, 65-80-90-104 */ unsigned long expected[] = {5, 20, 20, 27, 61, 63, 65, 80, 80, 90, 90, 104}; damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), new_three_regions, expected, ARRAY_SIZE(expected)); } /* * Test another big change. Both of the second and third big regions (50-59 * and 70-100) has totally freed and mapped to different area (30-32 and * 65-68). The target regions which were in the old second and third big * regions should now be removed and new target regions covering the new second * and third big regions should be created. */ static void damon_test_apply_three_regions4(struct kunit *test) { /* 10-20-30, 50-55-57-59, 70-80-90-100 */ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, 70, 80, 80, 90, 90, 100}; /* 5-7, 30-32, 65-68 */ struct damon_addr_range new_three_regions[3] = { (struct damon_addr_range){.start = 5, .end = 7}, (struct damon_addr_range){.start = 30, .end = 32}, (struct damon_addr_range){.start = 65, .end = 68} }; /* expect 5-7, 30-32, 65-68 */ unsigned long expected[] = {5, 7, 30, 32, 65, 68}; damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), new_three_regions, expected, ARRAY_SIZE(expected)); } static void damon_test_split_evenly_fail(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { struct damon_target *t = damon_new_target(); struct damon_region *r = damon_new_region(start, end); damon_add_region(r, t); KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); damon_for_each_region(r, t) { KUNIT_EXPECT_EQ(test, r->ar.start, start); KUNIT_EXPECT_EQ(test, r->ar.end, end); } damon_free_target(t); } static void damon_test_split_evenly_succ(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { struct damon_target *t = damon_new_target(); struct damon_region *r = damon_new_region(start, end); unsigned long expected_width = (end - start) / nr_pieces; unsigned long i = 0; damon_add_region(r, t); KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, nr_pieces), 0); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces); damon_for_each_region(r, t) { if (i == nr_pieces - 1) { KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width); KUNIT_EXPECT_EQ(test, r->ar.end, end); break; } KUNIT_EXPECT_EQ(test, r->ar.start, start + i++ * expected_width); KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width); } damon_free_target(t); } static void damon_test_split_evenly(struct kunit *test) { KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), -EINVAL); damon_test_split_evenly_fail(test, 0, 100, 0); damon_test_split_evenly_succ(test, 0, 100, 10); damon_test_split_evenly_succ(test, 5, 59, 5); damon_test_split_evenly_succ(test, 4, 6, 1); damon_test_split_evenly_succ(test, 0, 3, 2); damon_test_split_evenly_fail(test, 5, 6, 2); } static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_three_regions_in_vmas), KUNIT_CASE(damon_test_apply_three_regions1), KUNIT_CASE(damon_test_apply_three_regions2), KUNIT_CASE(damon_test_apply_three_regions3), KUNIT_CASE(damon_test_apply_three_regions4), KUNIT_CASE(damon_test_split_evenly), {}, }; static struct kunit_suite damon_test_suite = { .name = "damon-operations", .test_cases = damon_test_cases, }; kunit_test_suite(damon_test_suite); #endif /* _DAMON_VADDR_TEST_H */ #endif /* CONFIG_DAMON_VADDR_KUNIT_TEST */
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * Internal non-public definitions that provide either classic * or preemptible semantics. * * Copyright Red Hat, 2009 * Copyright IBM Corporation, 2009 * Copyright SUSE, 2021 * * Author: Ingo Molnar <[email protected]> * Paul E. McKenney <[email protected]> * Frederic Weisbecker <[email protected]> */ #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) { /* Race on early boot between thread creation and assignment */ if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread) return true; if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread) if (in_task()) return true; return false; } /* * Offload callback processing from the boot-time-specified set of CPUs * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads * created that pull the callbacks from the corresponding CPU, wait for * a grace period to elapse, and invoke the callbacks. These kthreads * are organized into GP kthreads, which manage incoming callbacks, wait for * grace periods, and awaken CB kthreads, and the CB kthreads, which only * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs * do a wake_up() on their GP kthread when they insert a callback into any * empty list, unless the rcu_nocb_poll boot parameter has been specified, * in which case each kthread actively polls its CPU. (Which isn't so great * for energy efficiency, but which does reduce RCU's overhead on that CPU.) * * This is intended to be used in conjunction with Frederic Weisbecker's * adaptive-idle work, which would seriously reduce OS jitter on CPUs * running CPU-bound user-mode computations. * * Offloading of callbacks can also be used as an energy-efficiency * measure because CPUs with no RCU callbacks queued are more aggressive * about entering dyntick-idle mode. */ /* * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. * If the list is invalid, a warning is emitted and all CPUs are offloaded. */ static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); if (*str == '=') { if (cpulist_parse(++str, rcu_nocb_mask)) { pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); cpumask_setall(rcu_nocb_mask); } } rcu_state.nocb_is_setup = true; return 1; } __setup("rcu_nocbs", rcu_nocb_setup); static int __init parse_rcu_nocb_poll(char *arg) { rcu_nocb_poll = true; return 1; } __setup("rcu_nocb_poll", parse_rcu_nocb_poll); /* * Don't bother bypassing ->cblist if the call_rcu() rate is low. * After all, the main point of bypassing is to avoid lock contention * on ->nocb_lock, which only can happen at high call_rcu() rates. */ static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ; module_param(nocb_nobypass_lim_per_jiffy, int, 0); /* * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the * lock isn't immediately available, perform minimal sanity check. */ static void rcu_nocb_bypass_lock(struct rcu_data *rdp) __acquires(&rdp->nocb_bypass_lock) { lockdep_assert_irqs_disabled(); if (raw_spin_trylock(&rdp->nocb_bypass_lock)) return; /* * Contention expected only when local enqueue collide with * remote flush from kthreads. */ WARN_ON_ONCE(smp_processor_id() != rdp->cpu); raw_spin_lock(&rdp->nocb_bypass_lock); } /* * Conditionally acquire the specified rcu_data structure's * ->nocb_bypass_lock. */ static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp) { lockdep_assert_irqs_disabled(); return raw_spin_trylock(&rdp->nocb_bypass_lock); } /* * Release the specified rcu_data structure's ->nocb_bypass_lock. */ static void rcu_nocb_bypass_unlock(struct rcu_data *rdp) __releases(&rdp->nocb_bypass_lock) { lockdep_assert_irqs_disabled(); raw_spin_unlock(&rdp->nocb_bypass_lock); } /* * Acquire the specified rcu_data structure's ->nocb_lock, but only * if it corresponds to a no-CBs CPU. */ static void rcu_nocb_lock(struct rcu_data *rdp) { lockdep_assert_irqs_disabled(); if (!rcu_rdp_is_offloaded(rdp)) return; raw_spin_lock(&rdp->nocb_lock); } /* * Release the specified rcu_data structure's ->nocb_lock, but only * if it corresponds to a no-CBs CPU. */ static void rcu_nocb_unlock(struct rcu_data *rdp) { if (rcu_rdp_is_offloaded(rdp)) { lockdep_assert_irqs_disabled(); raw_spin_unlock(&rdp->nocb_lock); } } /* * Release the specified rcu_data structure's ->nocb_lock and restore * interrupts, but only if it corresponds to a no-CBs CPU. */ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, unsigned long flags) { if (rcu_rdp_is_offloaded(rdp)) { lockdep_assert_irqs_disabled(); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); } else { local_irq_restore(flags); } } /* Lockdep check that ->cblist may be safely accessed. */ static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) { lockdep_assert_irqs_disabled(); if (rcu_rdp_is_offloaded(rdp)) lockdep_assert_held(&rdp->nocb_lock); } /* * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended * grace period. */ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) { swake_up_all(sq); } static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) { return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; } static void rcu_init_one_nocb(struct rcu_node *rnp) { init_swait_queue_head(&rnp->nocb_gp_wq[0]); init_swait_queue_head(&rnp->nocb_gp_wq[1]); } static bool __wake_nocb_gp(struct rcu_data *rdp_gp, struct rcu_data *rdp, bool force, unsigned long flags) __releases(rdp_gp->nocb_gp_lock) { bool needwake = false; if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("AlreadyAwake")); return false; } if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); del_timer(&rdp_gp->nocb_timer); } if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); needwake = true; } raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); if (needwake) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); swake_up_one_online(&rdp_gp->nocb_gp_wq); } return needwake; } /* * Kick the GP kthread for this NOCB group. */ static bool wake_nocb_gp(struct rcu_data *rdp, bool force) { unsigned long flags; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); return __wake_nocb_gp(rdp_gp, rdp, force, flags); } #ifdef CONFIG_RCU_LAZY /* * LAZY_FLUSH_JIFFIES decides the maximum amount of time that * can elapse before lazy callbacks are flushed. Lazy callbacks * could be flushed much earlier for a number of other reasons * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are * left unsubmitted to RCU after those many jiffies. */ #define LAZY_FLUSH_JIFFIES (10 * HZ) static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES; // To be called only from test code. void rcu_set_jiffies_lazy_flush(unsigned long jif) { jiffies_lazy_flush = jif; } EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush); unsigned long rcu_get_jiffies_lazy_flush(void) { return jiffies_lazy_flush; } EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush); #endif /* * Arrange to wake the GP kthread for this NOCB group at some future * time when it is safe to do so. */ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, const char *reason) { unsigned long flags; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); /* * Bypass wakeup overrides previous deferments. In case of * callback storms, no need to wake up too early. */ if (waketype == RCU_NOCB_WAKE_LAZY && rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) { mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush()); WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); } else if (waketype == RCU_NOCB_WAKE_BYPASS) { mod_timer(&rdp_gp->nocb_timer, jiffies + 2); WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); } else { if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE) mod_timer(&rdp_gp->nocb_timer, jiffies + 1); if (rdp_gp->nocb_defer_wakeup < waketype) WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); } raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); } /* * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL. * However, if there is a callback to be enqueued and if ->nocb_bypass * proves to be initially empty, just return false because the no-CB GP * kthread may need to be awakened in this case. * * Return true if there was something to be flushed and it succeeded, otherwise * false. * * Note that this function always returns true if rhp is NULL. */ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in, unsigned long j, bool lazy) { struct rcu_cblist rcl; struct rcu_head *rhp = rhp_in; WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)); rcu_lockdep_assert_cblist_protected(rdp); lockdep_assert_held(&rdp->nocb_bypass_lock); if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { raw_spin_unlock(&rdp->nocb_bypass_lock); return false; } /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */ if (rhp) rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ /* * If the new CB requested was a lazy one, queue it onto the main * ->cblist so that we can take advantage of the grace-period that will * happen regardless. But queue it onto the bypass list first so that * the lazy CB is ordered with the existing CBs in the bypass list. */ if (lazy && rhp) { rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); rhp = NULL; } rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); WRITE_ONCE(rdp->lazy_len, 0); rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); WRITE_ONCE(rdp->nocb_bypass_first, j); rcu_nocb_bypass_unlock(rdp); return true; } /* * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL. * However, if there is a callback to be enqueued and if ->nocb_bypass * proves to be initially empty, just return false because the no-CB GP * kthread may need to be awakened in this case. * * Note that this function always returns true if rhp is NULL. */ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, unsigned long j, bool lazy) { if (!rcu_rdp_is_offloaded(rdp)) return true; rcu_lockdep_assert_cblist_protected(rdp); rcu_nocb_bypass_lock(rdp); return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy); } /* * If the ->nocb_bypass_lock is immediately available, flush the * ->nocb_bypass queue into ->cblist. */ static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j) { rcu_lockdep_assert_cblist_protected(rdp); if (!rcu_rdp_is_offloaded(rdp) || !rcu_nocb_bypass_trylock(rdp)) return; WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false)); } /* * See whether it is appropriate to use the ->nocb_bypass list in order * to control contention on ->nocb_lock. A limited number of direct * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass * is non-empty, further callbacks must be placed into ->nocb_bypass, * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch * back to direct use of ->cblist. However, ->nocb_bypass should not be * used if ->cblist is empty, because otherwise callbacks can be stranded * on ->nocb_bypass because we cannot count on the current CPU ever again * invoking call_rcu(). The general rule is that if ->nocb_bypass is * non-empty, the corresponding no-CBs grace-period kthread must not be * in an indefinite sleep state. * * Finally, it is not permitted to use the bypass during early boot, * as doing so would confuse the auto-initialization code. Besides * which, there is no point in worrying about lock contention while * there is only one CPU in operation. */ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, bool *was_alldone, unsigned long flags, bool lazy) { unsigned long c; unsigned long cur_gp_seq; unsigned long j = jiffies; long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len)); lockdep_assert_irqs_disabled(); // Pure softirq/rcuc based processing: no bypassing, no // locking. if (!rcu_rdp_is_offloaded(rdp)) { *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); return false; } // Don't use ->nocb_bypass during early boot. if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) { rcu_nocb_lock(rdp); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); return false; } // If we have advanced to a new jiffy, reset counts to allow // moving back from ->nocb_bypass to ->cblist. if (j == rdp->nocb_nobypass_last) { c = rdp->nocb_nobypass_count + 1; } else { WRITE_ONCE(rdp->nocb_nobypass_last, j); c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; if (ULONG_CMP_LT(rdp->nocb_nobypass_count, nocb_nobypass_lim_per_jiffy)) c = 0; else if (c > nocb_nobypass_lim_per_jiffy) c = nocb_nobypass_lim_per_jiffy; } WRITE_ONCE(rdp->nocb_nobypass_count, c); // If there hasn't yet been all that many ->cblist enqueues // this jiffy, tell the caller to enqueue onto ->cblist. But flush // ->nocb_bypass first. // Lazy CBs throttle this back and do immediate bypass queuing. if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) { rcu_nocb_lock(rdp); *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); if (*was_alldone) trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstQ")); WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false)); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); return false; // Caller must enqueue the callback. } // If ->nocb_bypass has been used too long or is too full, // flush ->nocb_bypass to ->cblist. if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) || (ncbs && bypass_is_lazy && (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) || ncbs >= qhimark) { rcu_nocb_lock(rdp); *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) { if (*was_alldone) trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstQ")); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); return false; // Caller must enqueue the callback. } if (j != rdp->nocb_gp_adv_time && rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { rcu_advance_cbs_nowake(rdp->mynode, rdp); rdp->nocb_gp_adv_time = j; } // The flush succeeded and we moved CBs into the regular list. // Don't wait for the wake up timer as it may be too far ahead. // Wake up the GP thread now instead, if the cblist was empty. __call_rcu_nocb_wake(rdp, *was_alldone, flags); return true; // Callback already enqueued. } // We need to use the bypass. rcu_nocb_bypass_lock(rdp); ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); if (lazy) WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1); if (!ncbs) { WRITE_ONCE(rdp->nocb_bypass_first, j); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); } rcu_nocb_bypass_unlock(rdp); // A wake up of the grace period kthread or timer adjustment // needs to be done only if: // 1. Bypass list was fully empty before (this is the first // bypass list entry), or: // 2. Both of these conditions are met: // a. The bypass list previously had only lazy CBs, and: // b. The new CB is non-lazy. if (!ncbs || (bypass_is_lazy && !lazy)) { // No-CBs GP kthread might be indefinitely asleep, if so, wake. rcu_nocb_lock(rdp); // Rare during call_rcu() flood. if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQwake")); __call_rcu_nocb_wake(rdp, true, flags); } else { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQnoWake")); rcu_nocb_unlock(rdp); } } return true; // Callback already enqueued. } /* * Awaken the no-CBs grace-period kthread if needed, either due to it * legitimately being asleep or due to overload conditions. * * If warranted, also wake up the kthread servicing this CPUs queues. */ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, unsigned long flags) __releases(rdp->nocb_lock) { long bypass_len; unsigned long cur_gp_seq; unsigned long j; long lazy_len; long len; struct task_struct *t; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; // If we are being polled or there is no kthread, just leave. t = READ_ONCE(rdp->nocb_gp_kthread); if (rcu_nocb_poll || !t) { rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNotPoll")); return; } // Need to actually to a wakeup. len = rcu_segcblist_n_cbs(&rdp->cblist); bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass); lazy_len = READ_ONCE(rdp->lazy_len); if (was_alldone) { rdp->qlen_last_fqs_check = len; // Only lazy CBs in bypass list if (lazy_len && bypass_len == lazy_len) { rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY, TPS("WakeLazy")); } else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) { /* ... if queue was empty ... */ rcu_nocb_unlock(rdp); wake_nocb_gp(rdp, false); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { /* * Don't do the wake-up upfront on fragile paths. * Also offline CPUs can't call swake_up_one_online() from * (soft-)IRQs. Rely on the final deferred wake-up from * rcutree_report_cpu_dead() */ rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, TPS("WakeEmptyIsDeferred")); } } else if (len > rdp->qlen_last_fqs_check + qhimark) { /* ... or if many callbacks queued. */ rdp->qlen_last_fqs_check = len; j = jiffies; if (j != rdp->nocb_gp_adv_time && rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { rcu_advance_cbs_nowake(rdp->mynode, rdp); rdp->nocb_gp_adv_time = j; } smp_mb(); /* Enqueue before timer_pending(). */ if ((rdp->nocb_cb_sleep || !rcu_segcblist_ready_cbs(&rdp->cblist)) && !timer_pending(&rdp_gp->nocb_timer)) { rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, TPS("WakeOvfIsDeferred")); } else { rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } } else { rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } } static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func, unsigned long flags, bool lazy) { bool was_alldone; if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) { /* Not enqueued on bypass but locked, do regular enqueue */ rcutree_enqueue(rdp, head, func); __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ } } static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp) { struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; /* * Locking orders future de-offloaded callbacks enqueue against previous * handling of this rdp. Ie: Make sure rcuog is done with this rdp before * deoffloaded callbacks can be enqueued. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { /* * Offloading. Set our flag and notify the offload worker. * We will handle this rdp until it ever gets de-offloaded. */ list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED); } else { /* * De-offloading. Clear our flag and notify the de-offload worker. * We will ignore this rdp until it ever gets re-offloaded. */ list_del(&rdp->nocb_entry_rdp); rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED); } raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); } static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu) { trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep")); swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, !READ_ONCE(my_rdp->nocb_gp_sleep)); trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep")); } /* * No-CBs GP kthreads come here to wait for additional callbacks to show up * or for grace periods to end. */ static void nocb_gp_wait(struct rcu_data *my_rdp) { bool bypass = false; int __maybe_unused cpu = my_rdp->cpu; unsigned long cur_gp_seq; unsigned long flags; bool gotcbs = false; unsigned long j = jiffies; bool lazy = false; bool needwait_gp = false; // This prevents actual uninitialized use. bool needwake; bool needwake_gp; struct rcu_data *rdp, *rdp_toggling = NULL; struct rcu_node *rnp; unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning. bool wasempty = false; /* * Each pass through the following loop checks for CBs and for the * nearest grace period (if any) to wait for next. The CB kthreads * and the global grace-period kthread are awakened if needed. */ WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); /* * An rcu_data structure is removed from the list after its * CPU is de-offloaded and added to the list before that CPU is * (re-)offloaded. If the following loop happens to be referencing * that rcu_data structure during the time that the corresponding * CPU is de-offloaded and then immediately re-offloaded, this * loop's rdp pointer will be carried to the end of the list by * the resulting pair of list operations. This can cause the loop * to skip over some of the rcu_data structures that were supposed * to have been scanned. Fortunately a new iteration through the * entire loop is forced after a given CPU's rcu_data structure * is added to the list, so the skipped-over rcu_data structures * won't be ignored for long. */ list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) { long bypass_ncbs; bool flush_bypass = false; long lazy_ncbs; trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); rcu_nocb_lock_irqsave(rdp, flags); lockdep_assert_held(&rdp->nocb_lock); bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); lazy_ncbs = READ_ONCE(rdp->lazy_len); if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) && (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) || bypass_ncbs > 2 * qhimark)) { flush_bypass = true; } else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) && (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || bypass_ncbs > 2 * qhimark)) { flush_bypass = true; } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { rcu_nocb_unlock_irqrestore(rdp, flags); continue; /* No callbacks here, try next. */ } if (flush_bypass) { // Bypass full or old, so flush it. (void)rcu_nocb_try_flush_bypass(rdp, j); bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); lazy_ncbs = READ_ONCE(rdp->lazy_len); } if (bypass_ncbs) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass")); if (bypass_ncbs == lazy_ncbs) lazy = true; else bypass = true; } rnp = rdp->mynode; // Advance callbacks if helpful and low contention. needwake_gp = false; if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL) || (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { raw_spin_lock_rcu_node(rnp); /* irqs disabled. */ needwake_gp = rcu_advance_cbs(rnp, rdp); wasempty = rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL); raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */ } // Need to wait on some grace period? WARN_ON_ONCE(wasempty && !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)); if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { if (!needwait_gp || ULONG_CMP_LT(cur_gp_seq, wait_gp_seq)) wait_gp_seq = cur_gp_seq; needwait_gp = true; trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("NeedWaitGP")); } if (rcu_segcblist_ready_cbs(&rdp->cblist)) { needwake = rdp->nocb_cb_sleep; WRITE_ONCE(rdp->nocb_cb_sleep, false); } else { needwake = false; } rcu_nocb_unlock_irqrestore(rdp, flags); if (needwake) { swake_up_one(&rdp->nocb_cb_wq); gotcbs = true; } if (needwake_gp) rcu_gp_kthread_wake(); } my_rdp->nocb_gp_bypass = bypass; my_rdp->nocb_gp_gp = needwait_gp; my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; // At least one child with non-empty ->nocb_bypass, so set // timer in order to avoid stranding its callbacks. if (!rcu_nocb_poll) { // If bypass list only has lazy CBs. Add a deferred lazy wake up. if (lazy && !bypass) { wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY, TPS("WakeLazyIsDeferred")); // Otherwise add a deferred bypass wake up. } else if (bypass) { wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS, TPS("WakeBypassIsDeferred")); } } if (rcu_nocb_poll) { /* Polling, so trace if first poll in the series. */ if (gotcbs) trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll")); if (list_empty(&my_rdp->nocb_head_rdp)) { raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); if (!my_rdp->nocb_toggling_rdp) WRITE_ONCE(my_rdp->nocb_gp_sleep, true); raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); /* Wait for any offloading rdp */ nocb_gp_sleep(my_rdp, cpu); } else { schedule_timeout_idle(1); } } else if (!needwait_gp) { /* Wait for callbacks to appear. */ nocb_gp_sleep(my_rdp, cpu); } else { rnp = my_rdp->mynode; trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait")); swait_event_interruptible_exclusive( rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || !READ_ONCE(my_rdp->nocb_gp_sleep)); trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait")); } if (!rcu_nocb_poll) { raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); // (De-)queue an rdp to/from the group if its nocb state is changing rdp_toggling = my_rdp->nocb_toggling_rdp; if (rdp_toggling) my_rdp->nocb_toggling_rdp = NULL; if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); del_timer(&my_rdp->nocb_timer); } WRITE_ONCE(my_rdp->nocb_gp_sleep, true); raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); } else { rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp); if (rdp_toggling) { /* * Paranoid locking to make sure nocb_toggling_rdp is well * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could * race with another round of nocb toggling for this rdp. * Nocb locking should prevent from that already but we stick * to paranoia, especially in rare path. */ raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); my_rdp->nocb_toggling_rdp = NULL; raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); } } if (rdp_toggling) { nocb_gp_toggle_rdp(my_rdp, rdp_toggling); swake_up_one(&rdp_toggling->nocb_state_wq); } my_rdp->nocb_gp_seq = -1; WARN_ON(signal_pending(current)); } /* * No-CBs grace-period-wait kthread. There is one of these per group * of CPUs, but only once at least one CPU in that group has come online * at least once since boot. This kthread checks for newly posted * callbacks from any of the CPUs it is responsible for, waits for a * grace period, then awakens all of the rcu_nocb_cb_kthread() instances * that then have callback-invocation work to do. */ static int rcu_nocb_gp_kthread(void *arg) { struct rcu_data *rdp = arg; for (;;) { WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); nocb_gp_wait(rdp); cond_resched_tasks_rcu_qs(); } return 0; } static inline bool nocb_cb_wait_cond(struct rcu_data *rdp) { return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park(); } /* * Invoke any ready callbacks from the corresponding no-CBs CPU, * then, if there are no more, wait for more to appear. */ static void nocb_cb_wait(struct rcu_data *rdp) { struct rcu_segcblist *cblist = &rdp->cblist; unsigned long cur_gp_seq; unsigned long flags; bool needwake_gp = false; struct rcu_node *rnp = rdp->mynode; swait_event_interruptible_exclusive(rdp->nocb_cb_wq, nocb_cb_wait_cond(rdp)); if (kthread_should_park()) { /* * kthread_park() must be preceded by an rcu_barrier(). * But yet another rcu_barrier() might have sneaked in between * the barrier callback execution and the callbacks counter * decrement. */ if (rdp->nocb_cb_sleep) { rcu_nocb_lock_irqsave(rdp, flags); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); rcu_nocb_unlock_irqrestore(rdp, flags); kthread_parkme(); } } else if (READ_ONCE(rdp->nocb_cb_sleep)) { WARN_ON(signal_pending(current)); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); } WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)); local_irq_save(flags); rcu_momentary_eqs(); local_irq_restore(flags); /* * Disable BH to provide the expected environment. Also, when * transitioning to/from NOCB mode, a self-requeuing callback might * be invoked from softirq. A short grace period could cause both * instances of this callback would execute concurrently. */ local_bh_disable(); rcu_do_batch(rdp); local_bh_enable(); lockdep_assert_irqs_enabled(); rcu_nocb_lock_irqsave(rdp, flags); if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) && rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */ needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ } if (!rcu_segcblist_ready_cbs(cblist)) { WRITE_ONCE(rdp->nocb_cb_sleep, true); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); } else { WRITE_ONCE(rdp->nocb_cb_sleep, false); } rcu_nocb_unlock_irqrestore(rdp, flags); if (needwake_gp) rcu_gp_kthread_wake(); } /* * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke * nocb_cb_wait() to do the dirty work. */ static int rcu_nocb_cb_kthread(void *arg) { struct rcu_data *rdp = arg; // Each pass through this loop does one callback batch, and, // if there are no more ready callbacks, waits for them. for (;;) { nocb_cb_wait(rdp); cond_resched_tasks_rcu_qs(); } return 0; } /* Is a deferred wakeup of rcu_nocb_kthread() required? */ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level) { return READ_ONCE(rdp->nocb_defer_wakeup) >= level; } /* Do a deferred wakeup of rcu_nocb_kthread(). */ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp, struct rcu_data *rdp, int level, unsigned long flags) __releases(rdp_gp->nocb_gp_lock) { int ndw; int ret; if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) { raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); return false; } ndw = rdp_gp->nocb_defer_wakeup; ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); return ret; } /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ static void do_nocb_deferred_wakeup_timer(struct timer_list *t) { unsigned long flags; struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags); smp_mb__after_spinlock(); /* Timer expire before wakeup. */ do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags); } /* * Do a deferred wakeup of rcu_nocb_kthread() from fastpath. * This means we do an inexact common-case check. Note that if * we miss, ->nocb_timer will eventually clean things up. */ static bool do_nocb_deferred_wakeup(struct rcu_data *rdp) { unsigned long flags; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE)) return false; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags); } void rcu_nocb_flush_deferred_wakeup(void) { do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data)); } EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup); static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp) { struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; bool wake_gp = false; unsigned long flags; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); // Queue this rdp for add/del to/from the list to iterate on rcuog WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp); if (rdp_gp->nocb_gp_sleep) { rdp_gp->nocb_gp_sleep = false; wake_gp = true; } raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); return wake_gp; } static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp) { unsigned long flags; bool ret; /* * Locking makes sure rcuog is done handling this rdp before deoffloaded * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable * while the ->nocb_lock is held. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); return ret; } static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) { unsigned long flags; int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; /* CPU must be offline, unless it's early boot */ WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id()); pr_info("De-offloading %d\n", rdp->cpu); /* Flush all callbacks from segcblist and bypass */ rcu_barrier(); /* * Make sure the rcuoc kthread isn't in the middle of a nocb locked * sequence while offloading is deactivated, along with nocb locking. */ if (rdp->nocb_cb_kthread) kthread_park(rdp->nocb_cb_kthread); rcu_nocb_lock_irqsave(rdp, flags); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); rcu_nocb_unlock_irqrestore(rdp, flags); wake_gp = rcu_nocb_queue_toggle_rdp(rdp); mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); if (rdp_gp->nocb_gp_kthread) { if (wake_gp) wake_up_process(rdp_gp->nocb_gp_kthread); swait_event_exclusive(rdp->nocb_state_wq, rcu_nocb_rdp_deoffload_wait_cond(rdp)); } else { /* * No kthread to clear the flags for us or remove the rdp from the nocb list * to iterate. Do it here instead. Locking doesn't look stricly necessary * but we stick to paranoia in this rare path. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); list_del(&rdp->nocb_entry_rdp); } mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); return 0; } int rcu_nocb_cpu_deoffload(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); int ret = 0; cpus_read_lock(); mutex_lock(&rcu_state.nocb_mutex); if (rcu_rdp_is_offloaded(rdp)) { if (!cpu_online(cpu)) { ret = rcu_nocb_rdp_deoffload(rdp); if (!ret) cpumask_clear_cpu(cpu, rcu_nocb_mask); } else { pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu); ret = -EINVAL; } } mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload); static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp) { unsigned long flags; bool ret; raw_spin_lock_irqsave(&rdp->nocb_lock, flags); ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); return ret; } static int rcu_nocb_rdp_offload(struct rcu_data *rdp) { int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; WARN_ON_ONCE(cpu_online(rdp->cpu)); /* * For now we only support re-offload, ie: the rdp must have been * offloaded on boot first. */ if (!rdp->nocb_gp_rdp) return -EINVAL; if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread)) return -EINVAL; pr_info("Offloading %d\n", rdp->cpu); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); wake_gp = rcu_nocb_queue_toggle_rdp(rdp); if (wake_gp) wake_up_process(rdp_gp->nocb_gp_kthread); swait_event_exclusive(rdp->nocb_state_wq, rcu_nocb_rdp_offload_wait_cond(rdp)); kthread_unpark(rdp->nocb_cb_kthread); return 0; } int rcu_nocb_cpu_offload(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); int ret = 0; cpus_read_lock(); mutex_lock(&rcu_state.nocb_mutex); if (!rcu_rdp_is_offloaded(rdp)) { if (!cpu_online(cpu)) { ret = rcu_nocb_rdp_offload(rdp); if (!ret) cpumask_set_cpu(cpu, rcu_nocb_mask); } else { pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu); ret = -EINVAL; } } mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload); #ifdef CONFIG_RCU_LAZY static unsigned long lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { int cpu; unsigned long count = 0; if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask))) return 0; /* Protect rcu_nocb_mask against concurrent (de-)offloading. */ if (!mutex_trylock(&rcu_state.nocb_mutex)) return 0; /* Snapshot count of all CPUs */ for_each_cpu(cpu, rcu_nocb_mask) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); count += READ_ONCE(rdp->lazy_len); } mutex_unlock(&rcu_state.nocb_mutex); return count ? count : SHRINK_EMPTY; } static unsigned long lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { int cpu; unsigned long flags; unsigned long count = 0; if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask))) return 0; /* * Protect against concurrent (de-)offloading. Otherwise nocb locking * may be ignored or imbalanced. */ if (!mutex_trylock(&rcu_state.nocb_mutex)) { /* * But really don't insist if nocb_mutex is contended since we * can't guarantee that it will never engage in a dependency * chain involving memory allocation. The lock is seldom contended * anyway. */ return 0; } /* Snapshot count of all CPUs */ for_each_cpu(cpu, rcu_nocb_mask) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); int _count; if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp))) continue; if (!READ_ONCE(rdp->lazy_len)) continue; rcu_nocb_lock_irqsave(rdp, flags); /* * Recheck under the nocb lock. Since we are not holding the bypass * lock we may still race with increments from the enqueuer but still * we know for sure if there is at least one lazy callback. */ _count = READ_ONCE(rdp->lazy_len); if (!_count) { rcu_nocb_unlock_irqrestore(rdp, flags); continue; } rcu_nocb_try_flush_bypass(rdp, jiffies); rcu_nocb_unlock_irqrestore(rdp, flags); wake_nocb_gp(rdp, false); sc->nr_to_scan -= _count; count += _count; if (sc->nr_to_scan <= 0) break; } mutex_unlock(&rcu_state.nocb_mutex); return count ? count : SHRINK_STOP; } #endif // #ifdef CONFIG_RCU_LAZY void __init rcu_init_nohz(void) { int cpu; struct rcu_data *rdp; const struct cpumask *cpumask = NULL; struct shrinker * __maybe_unused lazy_rcu_shrinker; #if defined(CONFIG_NO_HZ_FULL) if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask)) cpumask = tick_nohz_full_mask; #endif if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) && !rcu_state.nocb_is_setup && !cpumask) cpumask = cpu_possible_mask; if (cpumask) { if (!cpumask_available(rcu_nocb_mask)) { if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); return; } } cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask); rcu_state.nocb_is_setup = true; } if (!rcu_state.nocb_is_setup) return; #ifdef CONFIG_RCU_LAZY lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy"); if (!lazy_rcu_shrinker) { pr_err("Failed to allocate lazy_rcu shrinker!\n"); } else { lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count; lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan; shrinker_register(lazy_rcu_shrinker); } #endif // #ifdef CONFIG_RCU_LAZY if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n"); cpumask_and(rcu_nocb_mask, cpu_possible_mask, rcu_nocb_mask); } if (cpumask_empty(rcu_nocb_mask)) pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); else pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", cpumask_pr_args(rcu_nocb_mask)); if (rcu_nocb_poll) pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); for_each_cpu(cpu, rcu_nocb_mask) { rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_segcblist_empty(&rdp->cblist)) rcu_segcblist_init(&rdp->cblist); rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); } rcu_organize_nocb_kthreads(); } /* Initialize per-rcu_data variables for no-CBs CPUs. */ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) { init_swait_queue_head(&rdp->nocb_cb_wq); init_swait_queue_head(&rdp->nocb_gp_wq); init_swait_queue_head(&rdp->nocb_state_wq); raw_spin_lock_init(&rdp->nocb_lock); raw_spin_lock_init(&rdp->nocb_bypass_lock); raw_spin_lock_init(&rdp->nocb_gp_lock); timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); rcu_cblist_init(&rdp->nocb_bypass); WRITE_ONCE(rdp->lazy_len, 0); mutex_init(&rdp->nocb_gp_kthread_mutex); } /* * If the specified CPU is a no-CBs CPU that does not already have its * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread * for this CPU's group has not yet been created, spawn it as well. */ static void rcu_spawn_cpu_nocb_kthread(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp_gp; struct task_struct *t; struct sched_param sp; if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup) return; /* If there already is an rcuo kthread, then nothing to do. */ if (rdp->nocb_cb_kthread) return; /* If we didn't spawn the GP kthread first, reorganize! */ sp.sched_priority = kthread_prio; rdp_gp = rdp->nocb_gp_rdp; mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); if (!rdp_gp->nocb_gp_kthread) { t = kthread_run(rcu_nocb_gp_kthread, rdp_gp, "rcuog/%d", rdp_gp->cpu); if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) { mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); goto err; } WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); if (kthread_prio) sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); } mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); /* Spawn the kthread for this CPU. */ t = kthread_create(rcu_nocb_cb_kthread, rdp, "rcuo%c/%d", rcu_state.abbr, cpu); if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__)) goto err; if (rcu_rdp_is_offloaded(rdp)) wake_up_process(t); else kthread_park(t); if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio) sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); WRITE_ONCE(rdp->nocb_cb_kthread, t); WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); return; err: /* * No need to protect against concurrent rcu_barrier() * because the number of callbacks should be 0 for a non-boot CPU, * therefore rcu_barrier() shouldn't even try to grab the nocb_lock. * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker. */ WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist)); mutex_lock(&rcu_state.nocb_mutex); if (rcu_rdp_is_offloaded(rdp)) { rcu_nocb_rdp_deoffload(rdp); cpumask_clear_cpu(cpu, rcu_nocb_mask); } mutex_unlock(&rcu_state.nocb_mutex); } /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ static int rcu_nocb_gp_stride = -1; module_param(rcu_nocb_gp_stride, int, 0444); /* * Initialize GP-CB relationships for all no-CBs CPU. */ static void __init rcu_organize_nocb_kthreads(void) { int cpu; bool firsttime = true; bool gotnocbs = false; bool gotnocbscbs = true; int ls = rcu_nocb_gp_stride; int nl = 0; /* Next GP kthread. */ struct rcu_data *rdp; struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */ if (!cpumask_available(rcu_nocb_mask)) return; if (ls == -1) { ls = nr_cpu_ids / int_sqrt(nr_cpu_ids); rcu_nocb_gp_stride = ls; } /* * Each pass through this loop sets up one rcu_data structure. * Should the corresponding CPU come online in the future, then * we will spawn the needed set of rcu_nocb_kthread() kthreads. */ for_each_possible_cpu(cpu) { rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu >= nl) { /* New GP kthread, set up for CBs & next GP. */ gotnocbs = true; nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; rdp_gp = rdp; INIT_LIST_HEAD(&rdp->nocb_head_rdp); if (dump_tree) { if (!firsttime) pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); gotnocbscbs = false; firsttime = false; pr_alert("%s: No-CB GP kthread CPU %d:", __func__, cpu); } } else { /* Another CB kthread, link to previous GP kthread. */ gotnocbscbs = true; if (dump_tree) pr_cont(" %d", cpu); } rdp->nocb_gp_rdp = rdp_gp; if (cpumask_test_cpu(cpu, rcu_nocb_mask)) list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); } if (gotnocbs && dump_tree) pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); } /* * Bind the current task to the offloaded CPUs. If there are no offloaded * CPUs, leave the task unbound. Splat if the bind attempt fails. */ void rcu_bind_current_to_nocb(void) { if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask)) WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); } EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); // The ->on_cpu field is available only in CONFIG_SMP=y, so... #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) { return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : ""; } #else // #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) { return ""; } #endif // #else #ifdef CONFIG_SMP /* * Dump out nocb grace-period kthread state for the specified rcu_data * structure. */ static void show_rcu_nocb_gp_state(struct rcu_data *rdp) { struct rcu_node *rnp = rdp->mynode; pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n", rdp->cpu, "kK"[!!rdp->nocb_gp_kthread], "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], "dD"[!!rdp->nocb_defer_wakeup], "tT"[timer_pending(&rdp->nocb_timer)], "sS"[!!rdp->nocb_gp_sleep], ".W"[swait_active(&rdp->nocb_gp_wq)], ".W"[swait_active(&rnp->nocb_gp_wq[0])], ".W"[swait_active(&rnp->nocb_gp_wq[1])], ".B"[!!rdp->nocb_gp_bypass], ".G"[!!rdp->nocb_gp_gp], (long)rdp->nocb_gp_seq, rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops), rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.', rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread)); } /* Dump out nocb kthread state for the specified rcu_data structure. */ static void show_rcu_nocb_state(struct rcu_data *rdp) { char bufw[20]; char bufr[20]; struct rcu_data *nocb_next_rdp; struct rcu_segcblist *rsclp = &rdp->cblist; bool waslocked; bool wassleep; if (rdp->nocb_gp_rdp == rdp) show_rcu_nocb_gp_state(rdp); nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp, &rdp->nocb_entry_rdp, typeof(*rdp), nocb_entry_rdp); sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", rdp->cpu, rdp->nocb_gp_rdp->cpu, nocb_next_rdp ? nocb_next_rdp->cpu : -1, "kK"[!!rdp->nocb_cb_kthread], "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], "lL"[raw_spin_is_locked(&rdp->nocb_lock)], "sS"[!!rdp->nocb_cb_sleep], ".W"[swait_active(&rdp->nocb_cb_wq)], jiffies - rdp->nocb_bypass_first, jiffies - rdp->nocb_nobypass_last, rdp->nocb_nobypass_count, ".D"[rcu_segcblist_ready_cbs(rsclp)], ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)], rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw, ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)], rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr, ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)], ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], rcu_segcblist_n_cbs(&rdp->cblist), rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.', rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread)); /* It is OK for GP kthreads to have GP state. */ if (rdp->nocb_gp_rdp == rdp) return; waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); wassleep = swait_active(&rdp->nocb_gp_wq); if (!rdp->nocb_gp_sleep && !waslocked && !wassleep) return; /* Nothing untoward. */ pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n", "lL"[waslocked], "dD"[!!rdp->nocb_defer_wakeup], "sS"[!!rdp->nocb_gp_sleep], ".W"[wassleep]); } #else /* #ifdef CONFIG_RCU_NOCB_CPU */ /* No ->nocb_lock to acquire. */ static void rcu_nocb_lock(struct rcu_data *rdp) { } /* No ->nocb_lock to release. */ static void rcu_nocb_unlock(struct rcu_data *rdp) { } /* No ->nocb_lock to release. */ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, unsigned long flags) { local_irq_restore(flags); } /* Lockdep check that ->cblist may be safely accessed. */ static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) { lockdep_assert_irqs_disabled(); } static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) { } static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) { return NULL; } static void rcu_init_one_nocb(struct rcu_node *rnp) { } static bool wake_nocb_gp(struct rcu_data *rdp, bool force) { return false; } static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, unsigned long j, bool lazy) { return true; } static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func, unsigned long flags, bool lazy) { WARN_ON_ONCE(1); /* Should be dead code! */ } static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, unsigned long flags) { WARN_ON_ONCE(1); /* Should be dead code! */ } static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) { } static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level) { return false; } static bool do_nocb_deferred_wakeup(struct rcu_data *rdp) { return false; } static void rcu_spawn_cpu_nocb_kthread(int cpu) { } static void show_rcu_nocb_state(struct rcu_data *rdp) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) // // Copyright 2021-2022 NXP // // Author: Peng Zhang <[email protected]> // // Hardware interface for audio DSP on i.MX8ULP #include <linux/arm-smccc.h> #include <linux/clk.h> #include <linux/firmware.h> #include <linux/firmware/imx/dsp.h> #include <linux/firmware/imx/ipc.h> #include <linux/firmware/imx/svc/misc.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of_reserved_mem.h> #include <sound/sof.h> #include <sound/sof/xtensa.h> #include "../ops.h" #include "../sof-of-dev.h" #include "imx-common.h" #define FSL_SIP_HIFI_XRDC 0xc200000e /* SIM Domain register */ #define SYSCTRL0 0x8 #define EXECUTE_BIT BIT(13) #define RESET_BIT BIT(16) #define HIFI4_CLK_BIT BIT(17) #define PB_CLK_BIT BIT(18) #define PLAT_CLK_BIT BIT(19) #define DEBUG_LOGIC_BIT BIT(25) #define MBOX_OFFSET 0x800000 #define MBOX_SIZE 0x1000 struct imx8ulp_priv { struct device *dev; struct snd_sof_dev *sdev; /* DSP IPC handler */ struct imx_dsp_ipc *dsp_ipc; struct platform_device *ipc_dev; struct regmap *regmap; struct clk_bulk_data *clks; int clk_num; }; static void imx8ulp_sim_lpav_start(struct imx8ulp_priv *priv) { /* Controls the HiFi4 DSP Reset: 1 in reset, 0 out of reset */ regmap_update_bits(priv->regmap, SYSCTRL0, RESET_BIT, 0); /* Reset HiFi4 DSP Debug logic: 1 debug reset, 0 out of reset*/ regmap_update_bits(priv->regmap, SYSCTRL0, DEBUG_LOGIC_BIT, 0); /* Stall HIFI4 DSP Execution: 1 stall, 0 run */ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, 0); } static int imx8ulp_get_mailbox_offset(struct snd_sof_dev *sdev) { return MBOX_OFFSET; } static int imx8ulp_get_window_offset(struct snd_sof_dev *sdev, u32 id) { return MBOX_OFFSET; } static void imx8ulp_dsp_handle_reply(struct imx_dsp_ipc *ipc) { struct imx8ulp_priv *priv = imx_dsp_get_data(ipc); unsigned long flags; spin_lock_irqsave(&priv->sdev->ipc_lock, flags); snd_sof_ipc_process_reply(priv->sdev, 0); spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags); } static void imx8ulp_dsp_handle_request(struct imx_dsp_ipc *ipc) { struct imx8ulp_priv *priv = imx_dsp_get_data(ipc); u32 p; /* panic code */ /* Read the message from the debug box. */ sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4, &p, sizeof(p)); /* Check to see if the message is a panic code (0x0dead***) */ if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) snd_sof_dsp_panic(priv->sdev, p, true); else snd_sof_ipc_msgs_rx(priv->sdev); } static struct imx_dsp_ops dsp_ops = { .handle_reply = imx8ulp_dsp_handle_reply, .handle_request = imx8ulp_dsp_handle_request, }; static int imx8ulp_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg) { struct imx8ulp_priv *priv = sdev->pdata->hw_pdata; sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data, msg->msg_size); imx_dsp_ring_doorbell(priv->dsp_ipc, 0); return 0; } static int imx8ulp_run(struct snd_sof_dev *sdev) { struct imx8ulp_priv *priv = sdev->pdata->hw_pdata; imx8ulp_sim_lpav_start(priv); return 0; } static int imx8ulp_reset(struct snd_sof_dev *sdev) { struct imx8ulp_priv *priv = sdev->pdata->hw_pdata; struct arm_smccc_res smc_resource; /* HiFi4 Platform Clock Enable: 1 enabled, 0 disabled */ regmap_update_bits(priv->regmap, SYSCTRL0, PLAT_CLK_BIT, PLAT_CLK_BIT); /* HiFi4 PBCLK clock enable: 1 enabled, 0 disabled */ regmap_update_bits(priv->regmap, SYSCTRL0, PB_CLK_BIT, PB_CLK_BIT); /* HiFi4 Clock Enable: 1 enabled, 0 disabled */ regmap_update_bits(priv->regmap, SYSCTRL0, HIFI4_CLK_BIT, HIFI4_CLK_BIT); regmap_update_bits(priv->regmap, SYSCTRL0, RESET_BIT, RESET_BIT); usleep_range(1, 2); /* Stall HIFI4 DSP Execution: 1 stall, 0 not stall */ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, EXECUTE_BIT); usleep_range(1, 2); arm_smccc_smc(FSL_SIP_HIFI_XRDC, 0, 0, 0, 0, 0, 0, 0, &smc_resource); return 0; } static int imx8ulp_probe(struct snd_sof_dev *sdev) { struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev); struct device_node *np = pdev->dev.of_node; struct device_node *res_node; struct resource *mmio; struct imx8ulp_priv *priv; struct resource res; u32 base, size; int ret = 0; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; sdev->num_cores = 1; sdev->pdata->hw_pdata = priv; priv->dev = sdev->dev; priv->sdev = sdev; /* System integration module(SIM) control dsp configuration */ priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl"); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp", PLATFORM_DEVID_NONE, pdev, sizeof(*pdev)); if (IS_ERR(priv->ipc_dev)) return PTR_ERR(priv->ipc_dev); priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev); if (!priv->dsp_ipc) { /* DSP IPC driver not probed yet, try later */ ret = -EPROBE_DEFER; dev_err(sdev->dev, "Failed to get drvdata\n"); goto exit_pdev_unregister; } imx_dsp_set_data(priv->dsp_ipc, priv); priv->dsp_ipc->ops = &dsp_ops; /* DSP base */ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (mmio) { base = mmio->start; size = resource_size(mmio); } else { dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n"); ret = -EINVAL; goto exit_pdev_unregister; } sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size); if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) { dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n", base, size); ret = -ENODEV; goto exit_pdev_unregister; } sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM; res_node = of_parse_phandle(np, "memory-reserved", 0); if (!res_node) { dev_err(&pdev->dev, "failed to get memory region node\n"); ret = -ENODEV; goto exit_pdev_unregister; } ret = of_address_to_resource(res_node, 0, &res); of_node_put(res_node); if (ret) { dev_err(&pdev->dev, "failed to get reserved region address\n"); goto exit_pdev_unregister; } sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start, resource_size(&res)); if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) { dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n", base, size); ret = -ENOMEM; goto exit_pdev_unregister; } sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM; /* set default mailbox offset for FW ready message */ sdev->dsp_box.offset = MBOX_OFFSET; ret = of_reserved_mem_device_init(sdev->dev); if (ret) { dev_err(&pdev->dev, "failed to init reserved memory region %d\n", ret); goto exit_pdev_unregister; } ret = devm_clk_bulk_get_all(sdev->dev, &priv->clks); if (ret < 0) { dev_err(sdev->dev, "failed to fetch clocks: %d\n", ret); goto exit_pdev_unregister; } priv->clk_num = ret; ret = clk_bulk_prepare_enable(priv->clk_num, priv->clks); if (ret < 0) { dev_err(sdev->dev, "failed to enable clocks: %d\n", ret); goto exit_pdev_unregister; } return 0; exit_pdev_unregister: platform_device_unregister(priv->ipc_dev); return ret; } static void imx8ulp_remove(struct snd_sof_dev *sdev) { struct imx8ulp_priv *priv = sdev->pdata->hw_pdata; clk_bulk_disable_unprepare(priv->clk_num, priv->clks); platform_device_unregister(priv->ipc_dev); } /* on i.MX8 there is 1 to 1 match between type and BAR idx */ static int imx8ulp_get_bar_index(struct snd_sof_dev *sdev, u32 type) { return type; } static int imx8ulp_suspend(struct snd_sof_dev *sdev) { int i; struct imx8ulp_priv *priv = (struct imx8ulp_priv *)sdev->pdata->hw_pdata; /*Stall DSP, release in .run() */ regmap_update_bits(priv->regmap, SYSCTRL0, EXECUTE_BIT, EXECUTE_BIT); for (i = 0; i < DSP_MU_CHAN_NUM; i++) imx_dsp_free_channel(priv->dsp_ipc, i); clk_bulk_disable_unprepare(priv->clk_num, priv->clks); return 0; } static int imx8ulp_resume(struct snd_sof_dev *sdev) { struct imx8ulp_priv *priv = (struct imx8ulp_priv *)sdev->pdata->hw_pdata; int i, ret; ret = clk_bulk_prepare_enable(priv->clk_num, priv->clks); if (ret < 0) { dev_err(sdev->dev, "failed to enable clocks: %d\n", ret); return ret; } for (i = 0; i < DSP_MU_CHAN_NUM; i++) imx_dsp_request_channel(priv->dsp_ipc, i); return 0; } static int imx8ulp_dsp_runtime_resume(struct snd_sof_dev *sdev) { const struct sof_dsp_power_state target_dsp_state = { .state = SOF_DSP_PM_D0, .substate = 0, }; imx8ulp_resume(sdev); return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); } static int imx8ulp_dsp_runtime_suspend(struct snd_sof_dev *sdev) { const struct sof_dsp_power_state target_dsp_state = { .state = SOF_DSP_PM_D3, .substate = 0, }; imx8ulp_suspend(sdev); return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); } static int imx8ulp_dsp_suspend(struct snd_sof_dev *sdev, unsigned int target_state) { const struct sof_dsp_power_state target_dsp_state = { .state = target_state, .substate = 0, }; if (!pm_runtime_suspended(sdev->dev)) imx8ulp_suspend(sdev); return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); } static int imx8ulp_dsp_resume(struct snd_sof_dev *sdev) { const struct sof_dsp_power_state target_dsp_state = { .state = SOF_DSP_PM_D0, .substate = 0, }; imx8ulp_resume(sdev); if (pm_runtime_suspended(sdev->dev)) { pm_runtime_disable(sdev->dev); pm_runtime_set_active(sdev->dev); pm_runtime_mark_last_busy(sdev->dev); pm_runtime_enable(sdev->dev); pm_runtime_idle(sdev->dev); } return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); } static struct snd_soc_dai_driver imx8ulp_dai[] = { { .name = "sai5", .playback = { .channels_min = 1, .channels_max = 32, }, .capture = { .channels_min = 1, .channels_max = 32, }, }, { .name = "sai6", .playback = { .channels_min = 1, .channels_max = 32, }, .capture = { .channels_min = 1, .channels_max = 32, }, }, }; static int imx8ulp_dsp_set_power_state(struct snd_sof_dev *sdev, const struct sof_dsp_power_state *target_state) { sdev->dsp_power_state = *target_state; return 0; } /* i.MX8 ops */ static const struct snd_sof_dsp_ops sof_imx8ulp_ops = { /* probe and remove */ .probe = imx8ulp_probe, .remove = imx8ulp_remove, /* DSP core boot */ .run = imx8ulp_run, .reset = imx8ulp_reset, /* Block IO */ .block_read = sof_block_read, .block_write = sof_block_write, /* Module IO */ .read64 = sof_io_read64, /* Mailbox IO */ .mailbox_read = sof_mailbox_read, .mailbox_write = sof_mailbox_write, /* ipc */ .send_msg = imx8ulp_send_msg, .get_mailbox_offset = imx8ulp_get_mailbox_offset, .get_window_offset = imx8ulp_get_window_offset, .ipc_msg_data = sof_ipc_msg_data, .set_stream_data_offset = sof_set_stream_data_offset, /* stream callbacks */ .pcm_open = sof_stream_pcm_open, .pcm_close = sof_stream_pcm_close, /* module loading */ .get_bar_index = imx8ulp_get_bar_index, /* firmware loading */ .load_firmware = snd_sof_load_firmware_memcpy, /* Debug information */ .dbg_dump = imx8_dump, /* Firmware ops */ .dsp_arch_ops = &sof_xtensa_arch_ops, /* DAI drivers */ .drv = imx8ulp_dai, .num_drv = ARRAY_SIZE(imx8ulp_dai), /* ALSA HW info flags */ .hw_info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, /* PM */ .runtime_suspend = imx8ulp_dsp_runtime_suspend, .runtime_resume = imx8ulp_dsp_runtime_resume, .suspend = imx8ulp_dsp_suspend, .resume = imx8ulp_dsp_resume, .set_power_state = imx8ulp_dsp_set_power_state, }; static struct snd_sof_of_mach sof_imx8ulp_machs[] = { { .compatible = "fsl,imx8ulp-evk", .sof_tplg_filename = "sof-imx8ulp-btsco.tplg", .drv_name = "asoc-audio-graph-card2", }, {} }; static struct sof_dev_desc sof_of_imx8ulp_desc = { .of_machines = sof_imx8ulp_machs, .ipc_supported_mask = BIT(SOF_IPC_TYPE_3), .ipc_default = SOF_IPC_TYPE_3, .default_fw_path = { [SOF_IPC_TYPE_3] = "imx/sof", }, .default_tplg_path = { [SOF_IPC_TYPE_3] = "imx/sof-tplg", }, .default_fw_filename = { [SOF_IPC_TYPE_3] = "sof-imx8ulp.ri", }, .nocodec_tplg_filename = "sof-imx8ulp-nocodec.tplg", .ops = &sof_imx8ulp_ops, }; static const struct of_device_id sof_of_imx8ulp_ids[] = { { .compatible = "fsl,imx8ulp-dsp", .data = &sof_of_imx8ulp_desc}, { } }; MODULE_DEVICE_TABLE(of, sof_of_imx8ulp_ids); /* DT driver definition */ static struct platform_driver snd_sof_of_imx8ulp_driver = { .probe = sof_of_probe, .remove = sof_of_remove, .driver = { .name = "sof-audio-of-imx8ulp", .pm = &sof_of_pm, .of_match_table = sof_of_imx8ulp_ids, }, }; module_platform_driver(snd_sof_of_imx8ulp_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("SOF support for IMX8ULP platforms"); MODULE_IMPORT_NS("SND_SOC_SOF_XTENSA");
// SPDX-License-Identifier: GPL-2.0-or-later // sma1307.c -- sma1307 ALSA SoC Audio driver // // Copyright 2024 Iron Device Corporation // // Auther: Gyuhwa Park <[email protected]> // Auther: Kiseok Jo <[email protected]> #include <linux/firmware.h> #include <linux/i2c.h> #include <linux/of_gpio.h> #include <linux/regmap.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "sma1307.h" #define CHECK_PERIOD_TIME 1 /* sec per HZ */ #define PLL_MATCH(_input_clk_name, _output_clk_name, _input_clk,\ _post_n, _n, _vco, _p_cp)\ {\ .input_clk_name = _input_clk_name,\ .output_clk_name = _output_clk_name,\ .input_clk = _input_clk,\ .post_n = _post_n,\ .n = _n,\ .vco = _vco,\ .p_cp = _p_cp,\ } static const char *setting_file = "sma1307_setting.bin"; #define SMA1307_SETTING_CHECKSUM 0x100000 /* PLL clock setting Table */ struct sma1307_pll_match { char *input_clk_name; char *output_clk_name; unsigned int input_clk; unsigned int post_n; unsigned int n; unsigned int vco; unsigned int p_cp; }; struct sma1307_data { char *name; void (*init)(struct regmap *regmap); }; struct sma1307_priv { bool check_fault_status; bool force_mute_status; bool sw_ot1_prot; char *name; enum sma1307_mode amp_mode; int binary_mode; int dapm_aif_in; int dapm_aif_out0; int dapm_aif_out1; int dapm_sdo_en; int dapm_sdo_setting; int num_of_pll_matches; int check_fault_period; struct delayed_work check_fault_work; struct device *dev; struct kobject *kobj; struct mutex default_lock; struct regmap *regmap; struct sma1307_setting_file set; const struct sma1307_pll_match *pll_matches; const struct sma1307_data *data; unsigned int cur_vol; unsigned int format; unsigned int frame_size; unsigned int init_vol; unsigned int last_bclk; unsigned int otp_trm2; unsigned int otp_trm3; unsigned int rev_num; unsigned int sys_clk_id; unsigned int tdm_slot0_rx; unsigned int tdm_slot1_rx; unsigned int tdm_slot0_tx; unsigned int tdm_slot1_tx; unsigned int tsdw_cnt; }; static const struct sma1307_pll_match sma1307_pll_matches[] = { /* in_clk_name, out_clk_name, input_clk post_n, n, vco, p_cp */ PLL_MATCH("1.411MHz", "24.554MHz", 1411200, 0x06, 0xD1, 0x88, 0x00), PLL_MATCH("1.536MHz", "24.576MHz", 1536000, 0x06, 0xC0, 0x88, 0x00), PLL_MATCH("2.822MHz", "24.554MHz", 2822400, 0x06, 0xD1, 0x88, 0x04), PLL_MATCH("3.072MHz", "24.576MHz", 3072000, 0x06, 0x60, 0x88, 0x00), PLL_MATCH("6.144MHz", "24.576MHz", 6144000, 0x06, 0x60, 0x88, 0x04), PLL_MATCH("12.288MHz", "24.576MHz", 12288000, 0x06, 0x60, 0x88, 0x08), PLL_MATCH("19.2MHz", "24.48MHz", 19200000, 0x06, 0x7B, 0x88, 0x0C), PLL_MATCH("24.576MHz", "24.576MHz", 24576000, 0x06, 0x60, 0x88, 0x0C), }; static struct snd_soc_component *sma1307_amp_component; static void sma1307_startup(struct snd_soc_component *); static void sma1307_shutdown(struct snd_soc_component *); static void sma1307_reset(struct snd_soc_component *); static void sma1307_set_binary(struct snd_soc_component *); static void sma1307_set_default(struct snd_soc_component *); /* Initial register value - 6.0W SPK (8ohm load) */ static const struct reg_default sma1307_reg_def[] = { { 0x00, 0x80 }, { 0x01, 0x00 }, { 0x02, 0x52 }, { 0x03, 0x4C }, { 0x04, 0x47 }, { 0x05, 0x42 }, { 0x06, 0x40 }, { 0x07, 0x40 }, { 0x08, 0x3C }, { 0x09, 0x2F }, { 0x0A, 0x32 }, { 0x0B, 0x50 }, { 0x0C, 0x8C }, { 0x0D, 0x00 }, { 0x0E, 0x3F }, { 0x0F, 0x00 }, { 0x10, 0x00 }, { 0x11, 0x00 }, { 0x12, 0x00 }, { 0x13, 0x09 }, { 0x14, 0x12 }, { 0x1C, 0x00 }, { 0x1D, 0x85 }, { 0x1E, 0xA1 }, { 0x1F, 0x67 }, { 0x22, 0x00 }, { 0x23, 0x1F }, { 0x24, 0x7A }, { 0x25, 0x00 }, { 0x26, 0xFF }, { 0x27, 0x39 }, { 0x28, 0x54 }, { 0x29, 0x92 }, { 0x2A, 0xB0 }, { 0x2B, 0xED }, { 0x2C, 0xED }, { 0x2D, 0xFF }, { 0x2E, 0xFF }, { 0x2F, 0xFF }, { 0x30, 0xFF }, { 0x31, 0xFF }, { 0x32, 0xFF }, { 0x34, 0x01 }, { 0x35, 0x17 }, { 0x36, 0x92 }, { 0x37, 0x00 }, { 0x38, 0x01 }, { 0x39, 0x10 }, { 0x3E, 0x01 }, { 0x3F, 0x08 }, { 0x8B, 0x05 }, { 0x8C, 0x50 }, { 0x8D, 0x80 }, { 0x8E, 0x10 }, { 0x8F, 0x02 }, { 0x90, 0x02 }, { 0x91, 0x83 }, { 0x92, 0xC0 }, { 0x93, 0x00 }, { 0x94, 0xA4 }, { 0x95, 0x74 }, { 0x96, 0x57 }, { 0xA2, 0xCC }, { 0xA3, 0x28 }, { 0xA4, 0x40 }, { 0xA5, 0x01 }, { 0xA6, 0x41 }, { 0xA7, 0x08 }, { 0xA8, 0x04 }, { 0xA9, 0x27 }, { 0xAA, 0x10 }, { 0xAB, 0x10 }, { 0xAC, 0x10 }, { 0xAD, 0x0F }, { 0xAE, 0xCD }, { 0xAF, 0x70 }, { 0xB0, 0x03 }, { 0xB1, 0xEF }, { 0xB2, 0x03 }, { 0xB3, 0xEF }, { 0xB4, 0xF3 }, { 0xB5, 0x3D }, }; static bool sma1307_readable_register(struct device *dev, unsigned int reg) { if (reg > SMA1307_FF_DEVICE_INDEX) return false; switch (reg) { case SMA1307_00_SYSTEM_CTRL ... SMA1307_1F_TONE_FINE_VOLUME: case SMA1307_22_COMP_HYS_SEL ... SMA1307_32_BROWN_OUT_PROT19: case SMA1307_34_OCP_SPK ... SMA1307_39_PMT_NZ_VAL: case SMA1307_3B_TEST1 ... SMA1307_3F_ATEST2: case SMA1307_8B_PLL_POST_N ... SMA1307_9A_OTP_TRM3: case SMA1307_A0_PAD_CTRL0 ... SMA1307_BE_MCBS_CTRL2: case SMA1307_F5_READY_FOR_V_SAR: case SMA1307_F7_READY_FOR_T_SAR ... SMA1307_FF_DEVICE_INDEX: break; default: return false; } return true; } static bool sma1307_writeable_register(struct device *dev, unsigned int reg) { if (reg > SMA1307_FF_DEVICE_INDEX) return false; switch (reg) { case SMA1307_00_SYSTEM_CTRL ... SMA1307_1F_TONE_FINE_VOLUME: case SMA1307_22_COMP_HYS_SEL ... SMA1307_32_BROWN_OUT_PROT19: case SMA1307_34_OCP_SPK ... SMA1307_39_PMT_NZ_VAL: case SMA1307_3B_TEST1 ... SMA1307_3F_ATEST2: case SMA1307_8B_PLL_POST_N ... SMA1307_9A_OTP_TRM3: case SMA1307_A0_PAD_CTRL0 ... SMA1307_BE_MCBS_CTRL2: break; default: return false; } return true; } static bool sma1307_volatile_register(struct device *dev, unsigned int reg) { if (reg > SMA1307_FF_DEVICE_INDEX) return false; switch (reg) { case SMA1307_F8_STATUS_T1 ... SMA1307_FF_DEVICE_INDEX: break; default: return false; } return true; } /* DB scale conversion of speaker volume */ static const DECLARE_TLV_DB_SCALE(sma1307_spk_tlv, -6000, 50, 0); static const char *const sma1307_aif_in_source_text[] = { "Mono", "Left", "Right" }; static const char *const sma1307_sdo_setting_text[] = { "Data_One_48k", "Data_Two_48k", "Data_Two_24k", "Clk_PLL", "Clk_OSC" }; static const char *const sma1307_aif_out_source_text[] = { "Disable", "After_FmtC", "After_Mixer", "After_DSP", "Vrms2_Avg", "Battery", "Temperature", "After_Delay" }; static const char *const sma1307_tdm_slot_text[] = { "Slot0", "Slot1", "Slot2", "Slot3", "Slot4", "Slot5", "Slot6", "Slot7" }; static const char *const sma1307_binary_mode_text[] = { "Mode0", "Mode1", "Mode2", "Mode3", "Mode4" }; static const char *const sma1307_reset_text[] = { "Reset" }; static const struct soc_enum sma1307_aif_in_source_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_aif_in_source_text), sma1307_aif_in_source_text); static const struct soc_enum sma1307_sdo_setting_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_sdo_setting_text), sma1307_sdo_setting_text); static const struct soc_enum sma1307_aif_out_source_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_aif_out_source_text), sma1307_aif_out_source_text); static const struct soc_enum sma1307_tdm_slot_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_tdm_slot_text), sma1307_tdm_slot_text); static const struct soc_enum sma1307_binary_mode_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_binary_mode_text), sma1307_binary_mode_text); static const struct soc_enum sma1307_reset_enum = SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(sma1307_reset_text), sma1307_reset_text); static int sma1307_force_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); ucontrol->value.integer.value[0] = (int)sma1307->force_mute_status; return 0; } static int sma1307_force_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); bool change = false, val = (bool)ucontrol->value.integer.value[0]; if (sma1307->force_mute_status == val) { change = false; } else { change = true; sma1307->force_mute_status = val; } return change; } static int sma1307_tdm_slot_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); int val1, val2; regmap_read(sma1307->regmap, SMA1307_A5_TDM1, &val1); regmap_read(sma1307->regmap, SMA1307_A6_TDM2, &val2); if (!strcmp(kcontrol->id.name, SMA1307_TDM_RX0_POS_NAME)) { ucontrol->value.integer.value[0] = (val1 & SMA1307_TDM_SLOT0_RX_POS_MASK) >> 3; sma1307->tdm_slot0_rx = ucontrol->value.integer.value[0]; } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_RX1_POS_NAME)) { ucontrol->value.integer.value[0] = val1 & SMA1307_TDM_SLOT1_RX_POS_MASK; sma1307->tdm_slot1_rx = ucontrol->value.integer.value[0]; } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_TX0_POS_NAME)) { ucontrol->value.integer.value[0] = (val2 & SMA1307_TDM_SLOT0_TX_POS_MASK) >> 3; sma1307->tdm_slot0_tx = ucontrol->value.integer.value[0]; } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_TX1_POS_NAME)) { ucontrol->value.integer.value[0] = val2 & SMA1307_TDM_SLOT1_TX_POS_MASK; sma1307->tdm_slot1_tx = ucontrol->value.integer.value[0]; } else { return -EINVAL; } return 0; } static int sma1307_tdm_slot_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); int val = (int)ucontrol->value.integer.value[0]; bool change; if (!strcmp(kcontrol->id.name, SMA1307_TDM_RX0_POS_NAME)) { if (sma1307->tdm_slot0_rx == val) change = false; else { change = true; sma1307->tdm_slot0_rx = val; regmap_update_bits(sma1307->regmap, SMA1307_A5_TDM1, SMA1307_TDM_SLOT0_RX_POS_MASK, val << 3); } } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_RX1_POS_NAME)) { if (sma1307->tdm_slot1_rx == val) change = false; else { change = true; sma1307->tdm_slot1_rx = val; regmap_update_bits(sma1307->regmap, SMA1307_A5_TDM1, SMA1307_TDM_SLOT1_RX_POS_MASK, val); } } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_TX0_POS_NAME)) { if (sma1307->tdm_slot0_tx == val) change = false; else { change = true; sma1307->tdm_slot0_tx = val; regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_SLOT0_TX_POS_MASK, val << 3); } } else if (!strcmp(kcontrol->id.name, SMA1307_TDM_TX1_POS_NAME)) { if (sma1307->tdm_slot1_tx == val) change = false; else { change = true; sma1307->tdm_slot1_tx = val; regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_SLOT1_TX_POS_MASK, val); } } else { dev_err(sma1307->dev, "%s: Invalid Control ID - %s\n", __func__, kcontrol->id.name); return -EINVAL; } return change; } static int sma1307_sw_ot1_prot_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); ucontrol->value.integer.value[0] = (int)sma1307->sw_ot1_prot; return 0; } static int sma1307_sw_ot1_prot_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); bool change = false, val = (bool)ucontrol->value.integer.value[0]; if (sma1307->sw_ot1_prot == val) change = false; else { change = true; sma1307->sw_ot1_prot = val; } return change; } static int sma1307_check_fault_status_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); ucontrol->value.integer.value[0] = (int)sma1307->check_fault_status; return 0; } static int sma1307_check_fault_status_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); bool change = false, val = (bool)ucontrol->value.integer.value[0]; if (sma1307->check_fault_status == val) { change = false; } else { change = true; sma1307->check_fault_status = val; } return change; } static int sma1307_check_fault_period_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); ucontrol->value.integer.value[0] = sma1307->check_fault_period; return 0; } static int sma1307_check_fault_period_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; bool change = false; int val = ucontrol->value.integer.value[0]; if (val < mc->min || val > mc->max) return -EINVAL; if (sma1307->check_fault_period == val) { change = false; } else { change = true; sma1307->check_fault_period = val; } return change; } static int sma1307_reset_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); regmap_update_bits(sma1307->regmap, SMA1307_00_SYSTEM_CTRL, SMA1307_RESET_MASK, SMA1307_RESET_ON); sma1307_reset(component); snd_ctl_notify(component->card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id); return true; } static int sma1307_binary_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct sma1307_priv *sma1307 = snd_kcontrol_chip(kcontrol); sma1307->binary_mode = (int)ucontrol->value.enumerated.item[0]; if (sma1307->set.status) sma1307_set_binary(component); return snd_soc_put_enum_double(kcontrol, ucontrol); } static void sma1307_startup(struct snd_soc_component *component) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_PLL_MASK, SMA1307_PLL_ON); regmap_update_bits(sma1307->regmap, SMA1307_00_SYSTEM_CTRL, SMA1307_POWER_MASK, SMA1307_POWER_ON); if (sma1307->amp_mode == SMA1307_MONO_MODE) { regmap_update_bits(sma1307->regmap, SMA1307_10_SYSTEM_CTRL1, SMA1307_SPK_MODE_MASK, SMA1307_SPK_MONO); } else { regmap_update_bits(sma1307->regmap, SMA1307_10_SYSTEM_CTRL1, SMA1307_SPK_MODE_MASK, SMA1307_SPK_STEREO); } if (sma1307->check_fault_status) { if (sma1307->check_fault_period > 0) queue_delayed_work(system_freezable_wq, &sma1307->check_fault_work, sma1307->check_fault_period * HZ); else queue_delayed_work(system_freezable_wq, &sma1307->check_fault_work, CHECK_PERIOD_TIME * HZ); } } static void sma1307_shutdown(struct snd_soc_component *component) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); /* for SMA1307A */ cancel_delayed_work_sync(&sma1307->check_fault_work); regmap_update_bits(sma1307->regmap, SMA1307_0E_MUTE_VOL_CTRL, SMA1307_SPK_MUTE_MASK, SMA1307_SPK_MUTE); /* Need to wait time for mute slope */ msleep(55); regmap_update_bits(sma1307->regmap, SMA1307_10_SYSTEM_CTRL1, SMA1307_SPK_MODE_MASK, SMA1307_SPK_OFF); regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_PLL_MASK, SMA1307_PLL_OFF); regmap_update_bits(sma1307->regmap, SMA1307_00_SYSTEM_CTRL, SMA1307_POWER_MASK, SMA1307_POWER_OFF); } static int sma1307_aif_in_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); unsigned int mux = sma1307->dapm_aif_in; switch (event) { case SND_SOC_DAPM_PRE_PMU: switch (mux) { case SMA1307_MONO_MODE: regmap_update_bits(sma1307->regmap, SMA1307_11_SYSTEM_CTRL2, SMA1307_MONOMIX_MASK, SMA1307_MONOMIX_ON); break; case SMA1307_LEFT_MODE: regmap_update_bits(sma1307->regmap, SMA1307_11_SYSTEM_CTRL2, SMA1307_MONOMIX_MASK, SMA1307_MONOMIX_OFF); regmap_update_bits(sma1307->regmap, SMA1307_11_SYSTEM_CTRL2, SMA1307_LR_DATA_SW_MASK, SMA1307_LR_DATA_SW_NORMAL); break; case SMA1307_RIGHT_MODE: regmap_update_bits(sma1307->regmap, SMA1307_11_SYSTEM_CTRL2, SMA1307_MONOMIX_MASK, SMA1307_MONOMIX_OFF); regmap_update_bits(sma1307->regmap, SMA1307_11_SYSTEM_CTRL2, SMA1307_LR_DATA_SW_MASK, SMA1307_LR_DATA_SW_SWAP); break; default: dev_err(sma1307->dev, "%s: Invalid value (%d)\n", __func__, mux); return -EINVAL; } sma1307->amp_mode = mux; break; } return 0; } static int sma1307_sdo_setting_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); unsigned int mux = sma1307->dapm_sdo_setting; switch (event) { case SND_SOC_DAPM_PRE_PMU: switch (mux) { case SMA1307_OUT_DATA_ONE_48K: regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_SDO_OUTPUT2_MASK, SMA1307_ONE_SDO_PER_CH); regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_SDO_OUTPUT3_MASK | SMA1307_DATA_CLK_SEL_MASK, SMA1307_SDO_OUTPUT3_DIS | SMA1307_SDO_DATA); break; case SMA1307_OUT_DATA_TWO_48K: regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_SDO_OUTPUT2_MASK, SMA1307_TWO_SDO_PER_CH); regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_SDO_OUTPUT3_MASK | SMA1307_DATA_CLK_SEL_MASK, SMA1307_SDO_OUTPUT3_DIS | SMA1307_SDO_DATA); break; case SMA1307_OUT_DATA_TWO_24K: regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_SDO_OUTPUT2_MASK, SMA1307_TWO_SDO_PER_CH); regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_SDO_OUTPUT3_MASK | SMA1307_DATA_CLK_SEL_MASK, SMA1307_TWO_SDO_PER_CH_24K | SMA1307_SDO_DATA); break; case SMA1307_OUT_CLK_PLL: regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_DATA_CLK_SEL_MASK, SMA1307_SDO_CLK_PLL); break; case SMA1307_OUT_CLK_OSC: regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_DATA_CLK_SEL_MASK, SMA1307_SDO_CLK_OSC); break; default: dev_err(sma1307->dev, "%s: Invalid value (%d)\n", __func__, mux); return -EINVAL; } break; } return 0; } static int sma1307_aif_out_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); unsigned int mux = 0, val = 0, mask = 0; if (!strcmp(w->name, SMA1307_AIF_OUT0_NAME)) { mux = sma1307->dapm_aif_out0; val = mux; mask = SMA1307_SDO_OUT0_SEL_MASK; } else if (!strcmp(w->name, SMA1307_AIF_OUT1_NAME)) { mux = sma1307->dapm_aif_out1; val = mux << 3; mask = SMA1307_SDO_OUT1_SEL_MASK; } else { dev_err(sma1307->dev, "%s: Invalid widget - %s\n", __func__, w->name); return -EINVAL; } switch (event) { case SND_SOC_DAPM_PRE_PMU: regmap_update_bits(sma1307->regmap, SMA1307_09_OUTPUT_CTRL, mask, val); break; } return 0; } static int sma1307_sdo_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); switch (event) { case SND_SOC_DAPM_PRE_PMU: regmap_update_bits(sma1307->regmap, SMA1307_09_OUTPUT_CTRL, SMA1307_PORT_CONFIG_MASK, SMA1307_OUTPUT_PORT_ENABLE); regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_SDO_OUTPUT_MASK, SMA1307_LOGIC_OUTPUT); break; case SND_SOC_DAPM_POST_PMD: regmap_update_bits(sma1307->regmap, SMA1307_09_OUTPUT_CTRL, SMA1307_PORT_CONFIG_MASK, SMA1307_INPUT_PORT_ONLY); regmap_update_bits(sma1307->regmap, SMA1307_A3_TOP_MAN2, SMA1307_SDO_OUTPUT_MASK, SMA1307_HIGH_Z_OUTPUT); break; } return 0; } static int sma1307_power_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); switch (event) { case SND_SOC_DAPM_POST_PMU: sma1307_startup(component); break; case SND_SOC_DAPM_PRE_PMD: sma1307_shutdown(component); break; } return 0; } static int sma1307_dapm_aif_in_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); ucontrol->value.enumerated.item[0] = (unsigned int)sma1307->dapm_aif_in; snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return 0; } static int sma1307_dapm_aif_in_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); int val = (int)ucontrol->value.enumerated.item[0]; bool change; if ((val < 0) || (val >= ARRAY_SIZE(sma1307_aif_in_source_text))) { dev_err(sma1307->dev, "%s: Out of range\n", __func__); return -EINVAL; } if (sma1307->dapm_aif_in != val) { change = true; sma1307->dapm_aif_in = val; } else change = false; snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return change; } static int sma1307_dapm_sdo_setting_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); ucontrol->value.enumerated.item[0] = (unsigned int)sma1307->dapm_sdo_setting; snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return 0; } static int sma1307_dapm_sdo_setting_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); int val = (int)ucontrol->value.enumerated.item[0]; bool change; if ((val < 0) || (val >= ARRAY_SIZE(sma1307_sdo_setting_text))) { dev_err(sma1307->dev, "%s: Out of range\n", __func__); return -EINVAL; } if (sma1307->dapm_sdo_setting != val) { change = true; sma1307->dapm_sdo_setting = val; } else change = false; snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return change; } static int sma1307_dapm_aif_out_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); unsigned int val = 0; if (!strcmp(kcontrol->id.name, SMA1307_AIF_OUT0_NAME)) { val = (unsigned int)sma1307->dapm_aif_out0; } else if (!strcmp(kcontrol->id.name, SMA1307_AIF_OUT1_NAME)) { val = (unsigned int)sma1307->dapm_aif_out1; } else { dev_err(sma1307->dev, "%s: Invalid Control ID - %s\n", __func__, kcontrol->id.name); return -EINVAL; } ucontrol->value.enumerated.item[0] = val; snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return 0; } static int sma1307_dapm_aif_out_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); int val = (int)ucontrol->value.enumerated.item[0]; bool change; if ((val < 0) || (val >= ARRAY_SIZE(sma1307_aif_out_source_text))) { dev_err(sma1307->dev, "%s: Out of range\n", __func__); return -EINVAL; } if (!strcmp(kcontrol->id.name, SMA1307_AIF_OUT0_NAME)) { if (sma1307->dapm_aif_out0 != val) { change = true; sma1307->dapm_aif_out0 = val; } else change = false; } else if (!strcmp(kcontrol->id.name, SMA1307_AIF_OUT1_NAME)) { if (sma1307->dapm_aif_out1 != val) { change = true; sma1307->dapm_aif_out1 = val; } else change = false; } else { dev_err(sma1307->dev, "%s: Invalid Control ID - %s\n", __func__, kcontrol->id.name); return -EINVAL; } snd_soc_dapm_put_enum_double(kcontrol, ucontrol); return change; } static int sma1307_dapm_sdo_enable_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); ucontrol->value.integer.value[0] = (long)sma1307->dapm_sdo_en; snd_soc_dapm_put_volsw(kcontrol, ucontrol); return 0; } static int sma1307_dapm_sdo_enable_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(dapm->component); int val = (int)ucontrol->value.integer.value[0]; bool change; if ((val < 0) || (val > 1)) { dev_err(sma1307->dev, "%s: Out of range\n", __func__); return -EINVAL; } if (sma1307->dapm_sdo_en != val) { change = true; sma1307->dapm_sdo_en = val; } else change = false; snd_soc_dapm_put_volsw(kcontrol, ucontrol); return change; } static const struct snd_kcontrol_new sma1307_aif_in_source_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SMA1307_AIF_IN_NAME, .info = snd_soc_info_enum_double, .get = sma1307_dapm_aif_in_get, .put = sma1307_dapm_aif_in_put, .private_value = (unsigned long)&sma1307_aif_in_source_enum }; static const struct snd_kcontrol_new sma1307_sdo_setting_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "SDO Setting", .info = snd_soc_info_enum_double, .get = sma1307_dapm_sdo_setting_get, .put = sma1307_dapm_sdo_setting_put, .private_value = (unsigned long)&sma1307_sdo_setting_enum }; static const struct snd_kcontrol_new sma1307_aif_out0_source_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SMA1307_AIF_OUT0_NAME, .info = snd_soc_info_enum_double, .get = sma1307_dapm_aif_out_get, .put = sma1307_dapm_aif_out_put, .private_value = (unsigned long)&sma1307_aif_out_source_enum }; static const struct snd_kcontrol_new sma1307_aif_out1_source_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SMA1307_AIF_OUT1_NAME, .info = snd_soc_info_enum_double, .get = sma1307_dapm_aif_out_get, .put = sma1307_dapm_aif_out_put, .private_value = (unsigned long)&sma1307_aif_out_source_enum }; static const struct snd_kcontrol_new sma1307_sdo_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Switch", .info = snd_soc_info_volsw, .get = sma1307_dapm_sdo_enable_get, .put = sma1307_dapm_sdo_enable_put, .private_value = SOC_SINGLE_VALUE(SND_SOC_NOPM, 0, 1, 0, 0) }; static const struct snd_kcontrol_new sma1307_enable_control = SOC_DAPM_SINGLE("Switch", SMA1307_00_SYSTEM_CTRL, 0, 1, 0); static const struct snd_kcontrol_new sma1307_binary_mode_control[] = { SOC_ENUM_EXT("Binary Mode", sma1307_binary_mode_enum, snd_soc_get_enum_double, sma1307_binary_mode_put), }; static const struct snd_kcontrol_new sma1307_snd_controls[] = { SOC_SINGLE_TLV(SMA1307_VOL_CTRL_NAME, SMA1307_0A_SPK_VOL, 0, 167, 1, sma1307_spk_tlv), SOC_ENUM_EXT(SMA1307_TDM_RX0_POS_NAME, sma1307_tdm_slot_enum, sma1307_tdm_slot_get, sma1307_tdm_slot_put), SOC_ENUM_EXT(SMA1307_TDM_RX1_POS_NAME, sma1307_tdm_slot_enum, sma1307_tdm_slot_get, sma1307_tdm_slot_put), SOC_ENUM_EXT(SMA1307_TDM_TX0_POS_NAME, sma1307_tdm_slot_enum, sma1307_tdm_slot_get, sma1307_tdm_slot_put), SOC_ENUM_EXT(SMA1307_TDM_TX1_POS_NAME, sma1307_tdm_slot_enum, sma1307_tdm_slot_get, sma1307_tdm_slot_put), SOC_ENUM_EXT(SMA1307_RESET_CTRL_NAME, sma1307_reset_enum, snd_soc_get_enum_double, sma1307_reset_put), SOC_SINGLE_BOOL_EXT(SMA1307_FORCE_MUTE_CTRL_NAME, 0, sma1307_force_mute_get, sma1307_force_mute_put), SOC_SINGLE_BOOL_EXT(SMA1307_OT1_SW_PROT_CTRL_NAME, 0, sma1307_sw_ot1_prot_get, sma1307_sw_ot1_prot_put), SOC_SINGLE_BOOL_EXT(SMA1307_CHECK_FAULT_STATUS_NAME, 0, sma1307_check_fault_status_get, sma1307_check_fault_status_put), SOC_SINGLE_EXT(SMA1307_CHECK_FAULT_PERIOD_NAME, SND_SOC_NOPM, 0, 600, 0, sma1307_check_fault_period_get, sma1307_check_fault_period_put), }; static const struct snd_soc_dapm_widget sma1307_dapm_widgets[] = { /* platform domain */ SND_SOC_DAPM_OUTPUT("SPK"), SND_SOC_DAPM_INPUT("SDO"), /* path domain */ SND_SOC_DAPM_MUX_E(SMA1307_AIF_IN_NAME, SND_SOC_NOPM, 0, 0, &sma1307_aif_in_source_control, sma1307_aif_in_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_MUX_E("SDO Setting", SND_SOC_NOPM, 0, 0, &sma1307_sdo_setting_control, sma1307_sdo_setting_event, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_MUX_E(SMA1307_AIF_OUT0_NAME, SND_SOC_NOPM, 0, 0, &sma1307_aif_out0_source_control, sma1307_aif_out_event, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_MUX_E(SMA1307_AIF_OUT1_NAME, SND_SOC_NOPM, 0, 0, &sma1307_aif_out1_source_control, sma1307_aif_out_event, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_SWITCH_E("SDO Enable", SND_SOC_NOPM, 0, 0, &sma1307_sdo_control, sma1307_sdo_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MIXER("Entry", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV_E("AMP Power", SND_SOC_NOPM, 0, 0, NULL, 0, sma1307_power_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SWITCH("AMP Enable", SND_SOC_NOPM, 0, 0, &sma1307_enable_control), /* stream domain */ SND_SOC_DAPM_AIF_IN("AIF IN", "Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF OUT", "Capture", 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_dapm_route sma1307_audio_map[] = { /* Playback */ { "AIF IN Source", "Mono", "AIF IN" }, { "AIF IN Source", "Left", "AIF IN" }, { "AIF IN Source", "Right", "AIF IN" }, { "SDO Enable", "Switch", "AIF IN" }, { "SDO Setting", "Data_One_48k", "SDO Enable" }, { "SDO Setting", "Data_Two_48k", "SDO Enable" }, { "SDO Setting", "Data_Two_24k", "SDO Enable" }, { "SDO Setting", "Clk_PLL", "SDO Enable" }, { "SDO Setting", "Clk_OSC", "SDO Enable" }, { "AIF OUT0 Source", "Disable", "SDO Setting" }, { "AIF OUT0 Source", "After_FmtC", "SDO Setting" }, { "AIF OUT0 Source", "After_Mixer", "SDO Setting" }, { "AIF OUT0 Source", "After_DSP", "SDO Setting" }, { "AIF OUT0 Source", "Vrms2_Avg", "SDO Setting" }, { "AIF OUT0 Source", "Battery", "SDO Setting" }, { "AIF OUT0 Source", "Temperature", "SDO Setting" }, { "AIF OUT0 Source", "After_Delay", "SDO Setting" }, { "AIF OUT1 Source", "Disable", "SDO Setting" }, { "AIF OUT1 Source", "After_FmtC", "SDO Setting" }, { "AIF OUT1 Source", "After_Mixer", "SDO Setting" }, { "AIF OUT1 Source", "After_DSP", "SDO Setting" }, { "AIF OUT1 Source", "Vrms2_Avg", "SDO Setting" }, { "AIF OUT1 Source", "Battery", "SDO Setting" }, { "AIF OUT1 Source", "Temperature", "SDO Setting" }, { "AIF OUT1 Source", "After_Delay", "SDO Setting" }, { "Entry", NULL, "AIF OUT0 Source" }, { "Entry", NULL, "AIF OUT1 Source" }, { "Entry", NULL, "AIF IN Source" }, { "AMP Power", NULL, "Entry" }, { "AMP Enable", "Switch", "AMP Power" }, { "SPK", NULL, "AMP Enable" }, /* Capture */ { "AIF OUT", NULL, "AMP Enable" }, }; static void sma1307_setup_pll(struct snd_soc_component *component, unsigned int bclk) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); int i = 0; dev_dbg(component->dev, "%s: BCLK = %dHz\n", __func__, bclk); if (sma1307->sys_clk_id == SMA1307_PLL_CLKIN_MCLK) { dev_warn(component->dev, "%s: MCLK is not supported\n", __func__); } else if (sma1307->sys_clk_id == SMA1307_PLL_CLKIN_BCLK) { for (i = 0; i < sma1307->num_of_pll_matches; i++) { if (sma1307->pll_matches[i].input_clk == bclk) break; } if (i == sma1307->num_of_pll_matches) { dev_warn(component->dev, "%s: No matching value between pll table and SCK\n", __func__); return; } regmap_update_bits(sma1307->regmap, SMA1307_A2_TOP_MAN1, SMA1307_PLL_MASK, SMA1307_PLL_ON); } regmap_write(sma1307->regmap, SMA1307_8B_PLL_POST_N, sma1307->pll_matches[i].post_n); regmap_write(sma1307->regmap, SMA1307_8C_PLL_N, sma1307->pll_matches[i].n); regmap_write(sma1307->regmap, SMA1307_8D_PLL_A_SETTING, sma1307->pll_matches[i].vco); regmap_write(sma1307->regmap, SMA1307_8E_PLL_P_CP, sma1307->pll_matches[i].p_cp); } static int sma1307_dai_hw_params_amp(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); unsigned int bclk = 0; if (sma1307->format == SND_SOC_DAIFMT_DSP_A) bclk = params_rate(params) * sma1307->frame_size; else bclk = params_rate(params) * params_physical_width(params) * params_channels(params); dev_dbg(component->dev, "%s: rate = %d : bit size = %d : channel = %d\n", __func__, params_rate(params), params_width(params), params_channels(params)); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (sma1307->sys_clk_id == SMA1307_PLL_CLKIN_BCLK) { if (sma1307->last_bclk != bclk) { sma1307_setup_pll(component, bclk); sma1307->last_bclk = bclk; } } switch (params_rate(params)) { case 8000: case 12000: case 16000: case 24000: case 32000: case 44100: case 48000: break; case 96000: dev_warn(component->dev, "%s: %d rate not support SDO\n", __func__, params_rate(params)); break; default: dev_err(component->dev, "%s: not support rate : %d\n", __func__, params_rate(params)); return -EINVAL; } /* substream->stream is SNDRV_PCM_STREAM_CAPTURE */ } else { switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_SCK_RATE_MASK | SMA1307_DATA_WIDTH_MASK, SMA1307_SCK_32FS | SMA1307_DATA_16BIT); break; case SNDRV_PCM_FORMAT_S24_LE: regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_SCK_RATE_MASK | SMA1307_DATA_WIDTH_MASK, SMA1307_SCK_64FS | SMA1307_DATA_24BIT); break; case SNDRV_PCM_FORMAT_S32_LE: regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_SCK_RATE_MASK | SMA1307_DATA_WIDTH_MASK, SMA1307_SCK_64FS | SMA1307_DATA_24BIT); break; default: dev_err(component->dev, "%s: not support data bit : %d\n", __func__, params_format(params)); return -EINVAL; } } switch (sma1307->format) { case SND_SOC_DAIFMT_I2S: regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_I2S_MODE_MASK, SMA1307_STANDARD_I2S); regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_INTERFACE_MASK, SMA1307_I2S_FORMAT); break; case SND_SOC_DAIFMT_LEFT_J: regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_I2S_MODE_MASK, SMA1307_LJ); regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_INTERFACE_MASK, SMA1307_LJ_FORMAT); break; case SND_SOC_DAIFMT_RIGHT_J: switch (params_width(params)) { case 16: regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_I2S_MODE_MASK, SMA1307_RJ_16BIT); break; case 24: case 32: regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_I2S_MODE_MASK, SMA1307_RJ_24BIT); break; } break; case SND_SOC_DAIFMT_DSP_A: regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_I2S_MODE_MASK, SMA1307_STANDARD_I2S); regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_INTERFACE_MASK, SMA1307_TDM_FORMAT); break; } switch (params_width(params)) { case 16: case 24: case 32: break; default: dev_err(component->dev, "%s: not support data bit : %d\n", __func__, params_format(params)); return -EINVAL; } return 0; } static int sma1307_dai_set_sysclk_amp(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_component *component = dai->component; struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); switch (clk_id) { case SMA1307_EXTERNAL_CLOCK_19_2: case SMA1307_EXTERNAL_CLOCK_24_576: case SMA1307_PLL_CLKIN_MCLK: case SMA1307_PLL_CLKIN_BCLK: break; default: dev_err(component->dev, "%s: Invalid clk id: %d\n", __func__, clk_id); return -EINVAL; } sma1307->sys_clk_id = clk_id; return 0; } static int sma1307_dai_set_fmt_amp(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBC_CFC: dev_dbg(component->dev, "%s: %s\n", __func__, "I2S/TDM Device mode"); regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_CONTROLLER_DEVICE_MASK, SMA1307_DEVICE_MODE); break; case SND_SOC_DAIFMT_CBP_CFP: dev_dbg(component->dev, "%s: %s\n", __func__, "I2S/TDM Controller mode"); regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_CONTROLLER_DEVICE_MASK, SMA1307_CONTROLLER_MODE); break; default: dev_err(component->dev, "%s: Unsupported Controller/Device : 0x%x\n", __func__, fmt); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: sma1307->format = fmt & SND_SOC_DAIFMT_FORMAT_MASK; break; default: dev_err(component->dev, "%s: Unsupported Audio Interface Format : 0x%x\n", __func__, fmt); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_NF: dev_dbg(component->dev, "%s: %s\n", __func__, "Invert BCLK + Normal Frame"); regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_SCK_RISING_MASK, SMA1307_SCK_RISING_EDGE); break; case SND_SOC_DAIFMT_IB_IF: dev_dbg(component->dev, "%s: %s\n", __func__, "Invert BCLK + Invert Frame"); regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_LEFTPOL_MASK | SMA1307_SCK_RISING_MASK, SMA1307_HIGH_FIRST_CH | SMA1307_SCK_RISING_EDGE); break; case SND_SOC_DAIFMT_NB_IF: dev_dbg(component->dev, "%s: %s\n", __func__, "Normal BCLK + Invert Frame"); regmap_update_bits(sma1307->regmap, SMA1307_01_INPUT_CTRL1, SMA1307_LEFTPOL_MASK, SMA1307_HIGH_FIRST_CH); break; case SND_SOC_DAIFMT_NB_NF: dev_dbg(component->dev, "%s: %s\n", __func__, "Normal BCLK + Normal Frame"); break; default: dev_err(component->dev, "%s: Unsupported Bit & Frameclock : 0x%x\n", __func__, fmt); return -EINVAL; } return 0; } static int sma1307_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct snd_soc_component *component = dai->component; struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); dev_dbg(component->dev, "%s: slots = %d, slot_width - %d\n", __func__, slots, slot_width); sma1307->frame_size = slot_width * slots; regmap_update_bits(sma1307->regmap, SMA1307_A4_TOP_MAN3, SMA1307_INTERFACE_MASK, SMA1307_TDM_FORMAT); regmap_update_bits(sma1307->regmap, SMA1307_A5_TDM1, SMA1307_TDM_TX_MODE_MASK, SMA1307_TDM_TX_MONO); switch (slot_width) { case 16: regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_DL_MASK, SMA1307_TDM_DL_16); break; case 32: regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_DL_MASK, SMA1307_TDM_DL_32); break; default: dev_err(component->dev, "%s: not support TDM %d slot_width\n", __func__, slot_width); return -EINVAL; } switch (slots) { case 4: regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_N_SLOT_MASK, SMA1307_TDM_N_SLOT_4); break; case 8: regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_N_SLOT_MASK, SMA1307_TDM_N_SLOT_8); break; default: dev_err(component->dev, "%s: not support TDM %d slots\n", __func__, slots); return -EINVAL; } if (sma1307->tdm_slot0_rx < slots) regmap_update_bits(sma1307->regmap, SMA1307_A5_TDM1, SMA1307_TDM_SLOT0_RX_POS_MASK, sma1307->tdm_slot0_rx << 3); else dev_err(component->dev, "%s: Incorrect tdm-slot0-rx %d set\n", __func__, sma1307->tdm_slot0_rx); if (sma1307->tdm_slot1_rx < slots) regmap_update_bits(sma1307->regmap, SMA1307_A5_TDM1, SMA1307_TDM_SLOT1_RX_POS_MASK, sma1307->tdm_slot1_rx); else dev_err(component->dev, "%s: Incorrect tdm-slot1-rx %d set\n", __func__, sma1307->tdm_slot1_rx); if (sma1307->tdm_slot0_tx < slots) regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_SLOT0_TX_POS_MASK, sma1307->tdm_slot0_tx << 3); else dev_err(component->dev, "%s: Incorrect tdm-slot0-tx %d set\n", __func__, sma1307->tdm_slot0_tx); if (sma1307->tdm_slot1_tx < slots) regmap_update_bits(sma1307->regmap, SMA1307_A6_TDM2, SMA1307_TDM_SLOT1_TX_POS_MASK, sma1307->tdm_slot1_tx); else dev_err(component->dev, "%s: Incorrect tdm-slot1-tx %d set\n", __func__, sma1307->tdm_slot1_tx); return 0; } static int sma1307_dai_mute_stream(struct snd_soc_dai *dai, int mute, int stream) { struct snd_soc_component *component = dai->component; struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); if (stream == SNDRV_PCM_STREAM_CAPTURE) return 0; if (mute) { dev_dbg(component->dev, "%s: %s\n", __func__, "MUTE"); regmap_update_bits(sma1307->regmap, SMA1307_0E_MUTE_VOL_CTRL, SMA1307_SPK_MUTE_MASK, SMA1307_SPK_MUTE); } else { if (!sma1307->force_mute_status) { dev_dbg(component->dev, "%s: %s\n", __func__, "UNMUTE"); regmap_update_bits(sma1307->regmap, SMA1307_0E_MUTE_VOL_CTRL, SMA1307_SPK_MUTE_MASK, SMA1307_SPK_UNMUTE); } else { dev_dbg(sma1307->dev, "%s: FORCE MUTE!!!\n", __func__); } } return 0; } static const struct snd_soc_dai_ops sma1307_dai_ops_amp = { .hw_params = sma1307_dai_hw_params_amp, .set_fmt = sma1307_dai_set_fmt_amp, .set_sysclk = sma1307_dai_set_sysclk_amp, .set_tdm_slot = sma1307_dai_set_tdm_slot, .mute_stream = sma1307_dai_mute_stream, }; #define SMA1307_RATES_PLAYBACK SNDRV_PCM_RATE_8000_96000 #define SMA1307_RATES_CAPTURE SNDRV_PCM_RATE_8000_48000 #define SMA1307_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver sma1307_dai[] = { { .name = "sma1307-amplifier", .id = 0, .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = SMA1307_RATES_PLAYBACK, .formats = SMA1307_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = SMA1307_RATES_CAPTURE, .formats = SMA1307_FORMATS, }, .ops = &sma1307_dai_ops_amp, }, }; static void sma1307_check_fault_worker(struct work_struct *work) { struct sma1307_priv *sma1307 = container_of(work, struct sma1307_priv, check_fault_work.work); unsigned int status1_val, status2_val; char *envp[3] = { NULL, NULL, NULL }; if (sma1307->tsdw_cnt) regmap_read(sma1307->regmap, SMA1307_0A_SPK_VOL, &sma1307->cur_vol); else regmap_read(sma1307->regmap, SMA1307_0A_SPK_VOL, &sma1307->init_vol); regmap_read(sma1307->regmap, SMA1307_FA_STATUS1, &status1_val); regmap_read(sma1307->regmap, SMA1307_FB_STATUS2, &status2_val); if (~status1_val & SMA1307_OT1_OK_STATUS) { dev_crit(sma1307->dev, "%s: OT1(Over Temperature Level 1)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=OT1"); if (sma1307->sw_ot1_prot) { /* Volume control (Current Volume -3dB) */ if ((sma1307->cur_vol + 6) <= 0xFA) { sma1307->cur_vol += 6; regmap_write(sma1307->regmap, SMA1307_0A_SPK_VOL, sma1307->cur_vol); envp[1] = kasprintf(GFP_KERNEL, "VOLUME=0x%02X", sma1307->cur_vol); } } sma1307->tsdw_cnt++; } else if (sma1307->tsdw_cnt) { regmap_write(sma1307->regmap, SMA1307_0A_SPK_VOL, sma1307->init_vol); sma1307->tsdw_cnt = 0; sma1307->cur_vol = sma1307->init_vol; envp[0] = kasprintf(GFP_KERNEL, "STATUS=OT1_CLEAR"); envp[1] = kasprintf(GFP_KERNEL, "VOLUME=0x%02X", sma1307->cur_vol); } if (~status1_val & SMA1307_OT2_OK_STATUS) { dev_crit(sma1307->dev, "%s: OT2(Over Temperature Level 2)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=OT2"); } if (status1_val & SMA1307_UVLO_STATUS) { dev_crit(sma1307->dev, "%s: UVLO(Under Voltage Lock Out)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=UVLO"); } if (status1_val & SMA1307_OVP_BST_STATUS) { dev_crit(sma1307->dev, "%s: OVP_BST(Over Voltage Protection)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=OVP_BST"); } if (status2_val & SMA1307_OCP_SPK_STATUS) { dev_crit(sma1307->dev, "%s: OCP_SPK(Over Current Protect SPK)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=OCP_SPK"); } if (status2_val & SMA1307_OCP_BST_STATUS) { dev_crit(sma1307->dev, "%s: OCP_BST(Over Current Protect Boost)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=OCP_BST"); } if (status2_val & SMA1307_CLK_MON_STATUS) { dev_crit(sma1307->dev, "%s: CLK_FAULT(No clock input)\n", __func__); envp[0] = kasprintf(GFP_KERNEL, "STATUS=CLK_FAULT"); } if (envp[0] != NULL) { if (kobject_uevent_env(sma1307->kobj, KOBJ_CHANGE, envp)) dev_err(sma1307->dev, "%s: Error sending uevent\n", __func__); kfree(envp[0]); kfree(envp[1]); } if (sma1307->check_fault_status) { if (sma1307->check_fault_period > 0) queue_delayed_work(system_freezable_wq, &sma1307->check_fault_work, sma1307->check_fault_period * HZ); else queue_delayed_work(system_freezable_wq, &sma1307->check_fault_work, CHECK_PERIOD_TIME * HZ); } } static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *file) { const struct firmware *fw; int *data, size, offset, num_mode; int ret; ret = request_firmware(&fw, file, sma1307->dev); if (ret) { dev_err(sma1307->dev, "%s: failed to read \"%s\": %pe\n", __func__, setting_file, ERR_PTR(ret)); sma1307->set.status = false; return; } else if ((fw->size) < SMA1307_SETTING_HEADER_SIZE) { dev_err(sma1307->dev, "%s: Invalid file\n", __func__); release_firmware(fw); sma1307->set.status = false; return; } data = kzalloc(fw->size, GFP_KERNEL); size = fw->size >> 2; memcpy(data, fw->data, fw->size); release_firmware(fw); /* HEADER */ sma1307->set.header_size = SMA1307_SETTING_HEADER_SIZE; sma1307->set.checksum = data[sma1307->set.header_size - 2]; sma1307->set.num_mode = data[sma1307->set.header_size - 1]; num_mode = sma1307->set.num_mode; sma1307->set.header = devm_kzalloc(sma1307->dev, sma1307->set.header_size, GFP_KERNEL); memcpy(sma1307->set.header, data, sma1307->set.header_size * sizeof(int)); if ((sma1307->set.checksum >> 8) != SMA1307_SETTING_CHECKSUM) { dev_err(sma1307->dev, "%s: failed by dismatch \"%s\"\n", __func__, setting_file); sma1307->set.status = false; return; } /* DEFAULT */ sma1307->set.def_size = SMA1307_SETTING_DEFAULT_SIZE; sma1307->set.def = devm_kzalloc(sma1307->dev, sma1307->set.def_size * sizeof(int), GFP_KERNEL); memcpy(sma1307->set.def, &data[sma1307->set.header_size], sma1307->set.def_size * sizeof(int)); /* MODE */ offset = sma1307->set.header_size + sma1307->set.def_size; sma1307->set.mode_size = DIV_ROUND_CLOSEST(size - offset, num_mode + 1); for (int i = 0; i < num_mode; i++) { sma1307->set.mode_set[i] = devm_kzalloc(sma1307->dev, sma1307->set.mode_size * 2 * sizeof(int), GFP_KERNEL); for (int j = 0; j < sma1307->set.mode_size; j++) { sma1307->set.mode_set[i][2 * j] = data[offset + ((num_mode + 1) * j)]; sma1307->set.mode_set[i][2 * j + 1] = data[offset + ((num_mode + 1) * j + i + 1)]; } } kfree(data); sma1307->set.status = true; } static void sma1307_reset(struct snd_soc_component *component) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); unsigned int status = 0; regmap_read(sma1307->regmap, SMA1307_FF_DEVICE_INDEX, &status); sma1307->rev_num = status & SMA1307_REV_NUM_STATUS; dev_dbg(component->dev, "%s: SMA1307 Revision %d\n", __func__, sma1307->rev_num); regmap_read(sma1307->regmap, SMA1307_99_OTP_TRM2, &sma1307->otp_trm2); regmap_read(sma1307->regmap, SMA1307_9A_OTP_TRM3, &sma1307->otp_trm3); if ((sma1307->otp_trm2 & SMA1307_OTP_STAT_MASK) != SMA1307_OTP_STAT_1) dev_warn(component->dev, "%s: SMA1307 OTP Status Fail\n", __func__); /* Register Initial Value Setting */ sma1307_setting_loaded(sma1307, setting_file); if (sma1307->set.status) sma1307_set_binary(component); else sma1307_set_default(component); regmap_update_bits(sma1307->regmap, SMA1307_93_INT_CTRL, SMA1307_DIS_INT_MASK, SMA1307_HIGH_Z_INT); regmap_write(sma1307->regmap, SMA1307_0A_SPK_VOL, sma1307->init_vol); } static void sma1307_set_binary(struct snd_soc_component *component) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); int i = 0, mode = 0; for (i = 0; i < (sma1307->set.def_size); i++) { if (sma1307_writeable_register(sma1307->dev, i) && ((i < SMA1307_97_OTP_TRM0) || (i > SMA1307_9A_OTP_TRM3))) { regmap_write(sma1307->regmap, i, sma1307->set.def[i]); } } for (i = 0; i < (sma1307->set.mode_size); i++) { if (sma1307_writeable_register(sma1307->dev, i) && ((i < SMA1307_97_OTP_TRM0) || (i > SMA1307_9A_OTP_TRM3))) { mode = sma1307->binary_mode; regmap_write(sma1307->regmap, sma1307->set.mode_set[mode][2 * i], sma1307->set.mode_set[mode][2 * i + 1]); } } } static void sma1307_set_default(struct snd_soc_component *component) { struct sma1307_priv *sma1307 = snd_soc_component_get_drvdata(component); int i = 0; for (i = 0; i < (unsigned int)ARRAY_SIZE(sma1307_reg_def); i++) regmap_write(sma1307->regmap, sma1307_reg_def[i].reg, sma1307_reg_def[i].def); if (!strcmp(sma1307->name, DEVICE_NAME_SMA1307AQ)) sma1307->data->init(sma1307->regmap); } static int sma1307_probe(struct snd_soc_component *component) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); snd_soc_dapm_sync(dapm); sma1307_amp_component = component; snd_soc_add_component_controls(component, sma1307_binary_mode_control, ARRAY_SIZE(sma1307_binary_mode_control)); sma1307_reset(component); return 0; } static const struct snd_soc_component_driver sma1307_component = { .probe = sma1307_probe, .controls = sma1307_snd_controls, .num_controls = ARRAY_SIZE(sma1307_snd_controls), .dapm_widgets = sma1307_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(sma1307_dapm_widgets), .dapm_routes = sma1307_audio_map, .num_dapm_routes = ARRAY_SIZE(sma1307_audio_map), }; static const struct regmap_config sma_i2c_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = SMA1307_FF_DEVICE_INDEX, .readable_reg = sma1307_readable_register, .writeable_reg = sma1307_writeable_register, .volatile_reg = sma1307_volatile_register, .reg_defaults = sma1307_reg_def, .num_reg_defaults = ARRAY_SIZE(sma1307_reg_def), }; static void sma1307aq_init(struct regmap *regmap) { /* Guidelines for driving 4ohm load */ /* Brown Out Protection */ regmap_write(regmap, SMA1307_02_BROWN_OUT_PROT1, 0x62); regmap_write(regmap, SMA1307_03_BROWN_OUT_PROT2, 0x5D); regmap_write(regmap, SMA1307_04_BROWN_OUT_PROT3, 0x57); regmap_write(regmap, SMA1307_05_BROWN_OUT_PROT8, 0x54); regmap_write(regmap, SMA1307_06_BROWN_OUT_PROT9, 0x51); regmap_write(regmap, SMA1307_07_BROWN_OUT_PROT10, 0x4D); regmap_write(regmap, SMA1307_08_BROWN_OUT_PROT11, 0x4B); regmap_write(regmap, SMA1307_27_BROWN_OUT_PROT4, 0x3C); regmap_write(regmap, SMA1307_28_BROWN_OUT_PROT5, 0x5B); regmap_write(regmap, SMA1307_29_BROWN_OUT_PROT12, 0x78); regmap_write(regmap, SMA1307_2A_BROWN_OUT_PROT13, 0x96); regmap_write(regmap, SMA1307_2B_BROWN_OUT_PROT14, 0xB4); regmap_write(regmap, SMA1307_2C_BROWN_OUT_PROT15, 0xD3); /* FDPEC Gain */ regmap_write(regmap, SMA1307_35_FDPEC_CTRL0, 0x16); /* FLT Vdd */ regmap_write(regmap, SMA1307_92_FDPEC_CTRL1, 0xA0); /* Boost Max */ regmap_write(regmap, SMA1307_AB_BOOST_CTRL4, 0x0F); } static const struct sma1307_data sma1307aq_data = { .name = DEVICE_NAME_SMA1307AQ, .init = sma1307aq_init, }; static int sma1307_i2c_probe(struct i2c_client *client) { struct sma1307_priv *sma1307; const struct sma1307_data *data; int ret = 0; unsigned int device_info; sma1307 = devm_kzalloc(&client->dev, sizeof(*sma1307), GFP_KERNEL); if (!sma1307) return -ENOMEM; sma1307->regmap = devm_regmap_init_i2c(client, &sma_i2c_regmap); if (IS_ERR(sma1307->regmap)) { return dev_err_probe(&client->dev, PTR_ERR(sma1307->regmap), "%s: failed to allocate register map\n", __func__); } data = device_get_match_data(&client->dev); if (!data) return -ENODEV; sma1307->data = data; /* set initial value as normal AMP IC status */ sma1307->name = client->name; sma1307->format = SND_SOC_DAIFMT_I2S; sma1307->sys_clk_id = SMA1307_PLL_CLKIN_BCLK; sma1307->num_of_pll_matches = ARRAY_SIZE(sma1307_pll_matches); sma1307->check_fault_period = CHECK_PERIOD_TIME; sma1307->check_fault_status = true; sma1307->init_vol = 0x32; sma1307->cur_vol = sma1307->init_vol; sma1307->sw_ot1_prot = true; mutex_init(&sma1307->default_lock); INIT_DELAYED_WORK(&sma1307->check_fault_work, sma1307_check_fault_worker); sma1307->dev = &client->dev; sma1307->kobj = &client->dev.kobj; i2c_set_clientdata(client, sma1307); sma1307->pll_matches = sma1307_pll_matches; regmap_read(sma1307->regmap, SMA1307_FF_DEVICE_INDEX, &device_info); if ((device_info & 0xF8) != SMA1307_DEVICE_ID) { dev_err(&client->dev, "%s: device initialization error (0x%02X)", __func__, device_info); return -ENODEV; } dev_dbg(&client->dev, "%s: chip version 0x%02X\n", __func__, device_info); i2c_set_clientdata(client, sma1307); ret = devm_snd_soc_register_component(&client->dev, &sma1307_component, sma1307_dai, 1); if (ret) { dev_err(&client->dev, "%s: failed to register component\n", __func__); return ret; } return ret; } static void sma1307_i2c_remove(struct i2c_client *client) { struct sma1307_priv *sma1307 = (struct sma1307_priv *)i2c_get_clientdata(client); cancel_delayed_work_sync(&sma1307->check_fault_work); } static const struct i2c_device_id sma1307_i2c_id[] = { { "sma1307a", 0 }, { "sma1307aq", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, sma1307_i2c_id); static const struct of_device_id sma1307_of_match[] = { { .compatible = "irondevice,sma1307a", }, { .compatible = "irondevice,sma1307aq", .data = &sma1307aq_data //AEC-Q100 Qualificated }, { } }; MODULE_DEVICE_TABLE(of, sma1307_of_match); static struct i2c_driver sma1307_i2c_driver = { .driver = { .name = "sma1307", .of_match_table = sma1307_of_match, }, .probe = sma1307_i2c_probe, .remove = sma1307_i2c_remove, .id_table = sma1307_i2c_id, }; module_i2c_driver(sma1307_i2c_driver); MODULE_DESCRIPTION("ALSA SoC SMA1307 driver"); MODULE_AUTHOR("Gyuhwa Park, <[email protected]>"); MODULE_AUTHOR("KS Jo, <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ #ifndef __KSMBD_TRANSPORT_IPC_H__ #define __KSMBD_TRANSPORT_IPC_H__ #include <linux/wait.h> #define KSMBD_IPC_MAX_PAYLOAD 4096 struct ksmbd_login_response * ksmbd_ipc_login_request(const char *account); struct ksmbd_login_response_ext * ksmbd_ipc_login_request_ext(const char *account); struct ksmbd_session; struct ksmbd_share_config; struct ksmbd_tree_connect; struct sockaddr; struct ksmbd_tree_connect_response * ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess, struct ksmbd_share_config *share, struct ksmbd_tree_connect *tree_conn, struct sockaddr *peer_addr); int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id, unsigned long long connect_id); int ksmbd_ipc_logout_request(const char *account, int flags); struct ksmbd_share_config_response * ksmbd_ipc_share_config_request(const char *name); struct ksmbd_spnego_authen_response * ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len); int ksmbd_ipc_id_alloc(void); void ksmbd_rpc_id_free(int handle); struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle); struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle); struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle, void *payload, size_t payload_sz); struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle); struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle, void *payload, size_t payload_sz); struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload, size_t payload_sz); void ksmbd_ipc_release(void); void ksmbd_ipc_soft_reset(void); int ksmbd_ipc_init(void); #endif /* __KSMBD_TRANSPORT_IPC_H__ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Jack abstraction layer * * Copyright 2008 Wolfson Microelectronics */ #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/debugfs.h> #include <sound/jack.h> #include <sound/core.h> #include <sound/control.h> struct snd_jack_kctl { struct snd_kcontrol *kctl; struct list_head list; /* list of controls belong to the same jack */ unsigned int mask_bits; /* only masked status bits are reported via kctl */ struct snd_jack *jack; /* pointer to struct snd_jack */ bool sw_inject_enable; /* allow to inject plug event via debugfs */ #ifdef CONFIG_SND_JACK_INJECTION_DEBUG struct dentry *jack_debugfs_root; /* jack_kctl debugfs root */ #endif }; #ifdef CONFIG_SND_JACK_INPUT_DEV static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = { SW_HEADPHONE_INSERT, SW_MICROPHONE_INSERT, SW_LINEOUT_INSERT, SW_JACK_PHYSICAL_INSERT, SW_VIDEOOUT_INSERT, SW_LINEIN_INSERT, }; #endif /* CONFIG_SND_JACK_INPUT_DEV */ static void snd_jack_remove_debugfs(struct snd_jack *jack); static int snd_jack_dev_disconnect(struct snd_device *device) { struct snd_jack *jack = device->device_data; snd_jack_remove_debugfs(jack); #ifdef CONFIG_SND_JACK_INPUT_DEV guard(mutex)(&jack->input_dev_lock); if (!jack->input_dev) return 0; /* If the input device is registered with the input subsystem * then we need to use a different deallocator. */ if (jack->registered) input_unregister_device(jack->input_dev); else input_free_device(jack->input_dev); jack->input_dev = NULL; #endif /* CONFIG_SND_JACK_INPUT_DEV */ return 0; } static int snd_jack_dev_free(struct snd_device *device) { struct snd_jack *jack = device->device_data; struct snd_card *card = device->card; struct snd_jack_kctl *jack_kctl, *tmp_jack_kctl; list_for_each_entry_safe(jack_kctl, tmp_jack_kctl, &jack->kctl_list, list) { list_del_init(&jack_kctl->list); snd_ctl_remove(card, jack_kctl->kctl); } if (jack->private_free) jack->private_free(jack); snd_jack_dev_disconnect(device); kfree(jack->id); kfree(jack); return 0; } #ifdef CONFIG_SND_JACK_INPUT_DEV static int snd_jack_dev_register(struct snd_device *device) { struct snd_jack *jack = device->device_data; struct snd_card *card = device->card; int err, i; snprintf(jack->name, sizeof(jack->name), "%s %s", card->shortname, jack->id); guard(mutex)(&jack->input_dev_lock); if (!jack->input_dev) return 0; jack->input_dev->name = jack->name; /* Default to the sound card device. */ if (!jack->input_dev->dev.parent) jack->input_dev->dev.parent = snd_card_get_device_link(card); /* Add capabilities for any keys that are enabled */ for (i = 0; i < ARRAY_SIZE(jack->key); i++) { int testbit = SND_JACK_BTN_0 >> i; if (!(jack->type & testbit)) continue; if (!jack->key[i]) jack->key[i] = BTN_0 + i; input_set_capability(jack->input_dev, EV_KEY, jack->key[i]); } err = input_register_device(jack->input_dev); if (err == 0) jack->registered = 1; return err; } #endif /* CONFIG_SND_JACK_INPUT_DEV */ #ifdef CONFIG_SND_JACK_INJECTION_DEBUG static void snd_jack_inject_report(struct snd_jack_kctl *jack_kctl, int status) { struct snd_jack *jack; #ifdef CONFIG_SND_JACK_INPUT_DEV int i; #endif if (!jack_kctl) return; jack = jack_kctl->jack; if (jack_kctl->sw_inject_enable) snd_kctl_jack_report(jack->card, jack_kctl->kctl, status & jack_kctl->mask_bits); #ifdef CONFIG_SND_JACK_INPUT_DEV if (!jack->input_dev) return; for (i = 0; i < ARRAY_SIZE(jack->key); i++) { int testbit = ((SND_JACK_BTN_0 >> i) & jack_kctl->mask_bits); if (jack->type & testbit) input_report_key(jack->input_dev, jack->key[i], status & testbit); } for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) { int testbit = ((1 << i) & jack_kctl->mask_bits); if (jack->type & testbit) input_report_switch(jack->input_dev, jack_switch_types[i], status & testbit); } input_sync(jack->input_dev); #endif /* CONFIG_SND_JACK_INPUT_DEV */ } static ssize_t sw_inject_enable_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; int len, ret; char buf[128]; len = scnprintf(buf, sizeof(buf), "%s: %s\t\t%s: %i\n", "Jack", jack_kctl->kctl->id.name, "Inject Enabled", jack_kctl->sw_inject_enable); ret = simple_read_from_buffer(to, count, ppos, buf, len); return ret; } static ssize_t sw_inject_enable_write(struct file *file, const char __user *from, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; int ret, err; unsigned long enable; char buf[8] = { 0 }; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, from, count); err = kstrtoul(buf, 0, &enable); if (err) return err; if (jack_kctl->sw_inject_enable == (!!enable)) return ret; jack_kctl->sw_inject_enable = !!enable; if (!jack_kctl->sw_inject_enable) snd_jack_report(jack_kctl->jack, jack_kctl->jack->hw_status_cache); return ret; } static ssize_t jackin_inject_write(struct file *file, const char __user *from, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; int ret, err; unsigned long enable; char buf[8] = { 0 }; if (!jack_kctl->sw_inject_enable) return -EINVAL; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, from, count); err = kstrtoul(buf, 0, &enable); if (err) return err; snd_jack_inject_report(jack_kctl, !!enable ? jack_kctl->mask_bits : 0); return ret; } static ssize_t jack_kctl_id_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; char buf[64]; int len, ret; len = scnprintf(buf, sizeof(buf), "%s\n", jack_kctl->kctl->id.name); ret = simple_read_from_buffer(to, count, ppos, buf, len); return ret; } /* the bit definition is aligned with snd_jack_types in jack.h */ static const char * const jack_events_name[] = { "HEADPHONE(0x0001)", "MICROPHONE(0x0002)", "LINEOUT(0x0004)", "MECHANICAL(0x0008)", "VIDEOOUT(0x0010)", "LINEIN(0x0020)", "", "", "", "BTN_5(0x0200)", "BTN_4(0x0400)", "BTN_3(0x0800)", "BTN_2(0x1000)", "BTN_1(0x2000)", "BTN_0(0x4000)", "", }; /* the recommended buffer size is 256 */ static int parse_mask_bits(unsigned int mask_bits, char *buf, size_t buf_size) { int i; scnprintf(buf, buf_size, "0x%04x", mask_bits); for (i = 0; i < ARRAY_SIZE(jack_events_name); i++) if (mask_bits & (1 << i)) { strlcat(buf, " ", buf_size); strlcat(buf, jack_events_name[i], buf_size); } strlcat(buf, "\n", buf_size); return strlen(buf); } static ssize_t jack_kctl_mask_bits_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; char buf[256]; int len, ret; len = parse_mask_bits(jack_kctl->mask_bits, buf, sizeof(buf)); ret = simple_read_from_buffer(to, count, ppos, buf, len); return ret; } static ssize_t jack_kctl_status_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; char buf[16]; int len, ret; len = scnprintf(buf, sizeof(buf), "%s\n", jack_kctl->kctl->private_value ? "Plugged" : "Unplugged"); ret = simple_read_from_buffer(to, count, ppos, buf, len); return ret; } #ifdef CONFIG_SND_JACK_INPUT_DEV static ssize_t jack_type_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct snd_jack_kctl *jack_kctl = file->private_data; char buf[256]; int len, ret; len = parse_mask_bits(jack_kctl->jack->type, buf, sizeof(buf)); ret = simple_read_from_buffer(to, count, ppos, buf, len); return ret; } static const struct file_operations jack_type_fops = { .open = simple_open, .read = jack_type_read, .llseek = default_llseek, }; #endif static const struct file_operations sw_inject_enable_fops = { .open = simple_open, .read = sw_inject_enable_read, .write = sw_inject_enable_write, .llseek = default_llseek, }; static const struct file_operations jackin_inject_fops = { .open = simple_open, .write = jackin_inject_write, .llseek = default_llseek, }; static const struct file_operations jack_kctl_id_fops = { .open = simple_open, .read = jack_kctl_id_read, .llseek = default_llseek, }; static const struct file_operations jack_kctl_mask_bits_fops = { .open = simple_open, .read = jack_kctl_mask_bits_read, .llseek = default_llseek, }; static const struct file_operations jack_kctl_status_fops = { .open = simple_open, .read = jack_kctl_status_read, .llseek = default_llseek, }; static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack, struct snd_jack_kctl *jack_kctl) { char *tname; int i; /* Don't create injection interface for Phantom jacks */ if (strstr(jack_kctl->kctl->id.name, "Phantom")) return 0; tname = kstrdup(jack_kctl->kctl->id.name, GFP_KERNEL); if (!tname) return -ENOMEM; /* replace the chars which are not suitable for folder's name with _ */ for (i = 0; tname[i]; i++) if (!isalnum(tname[i])) tname[i] = '_'; jack_kctl->jack_debugfs_root = debugfs_create_dir(tname, jack->card->debugfs_root); kfree(tname); debugfs_create_file("sw_inject_enable", 0644, jack_kctl->jack_debugfs_root, jack_kctl, &sw_inject_enable_fops); debugfs_create_file("jackin_inject", 0200, jack_kctl->jack_debugfs_root, jack_kctl, &jackin_inject_fops); debugfs_create_file("kctl_id", 0444, jack_kctl->jack_debugfs_root, jack_kctl, &jack_kctl_id_fops); debugfs_create_file("mask_bits", 0444, jack_kctl->jack_debugfs_root, jack_kctl, &jack_kctl_mask_bits_fops); debugfs_create_file("status", 0444, jack_kctl->jack_debugfs_root, jack_kctl, &jack_kctl_status_fops); #ifdef CONFIG_SND_JACK_INPUT_DEV debugfs_create_file("type", 0444, jack_kctl->jack_debugfs_root, jack_kctl, &jack_type_fops); #endif return 0; } static void snd_jack_remove_debugfs(struct snd_jack *jack) { struct snd_jack_kctl *jack_kctl; list_for_each_entry(jack_kctl, &jack->kctl_list, list) { debugfs_remove(jack_kctl->jack_debugfs_root); jack_kctl->jack_debugfs_root = NULL; } } #else /* CONFIG_SND_JACK_INJECTION_DEBUG */ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack, struct snd_jack_kctl *jack_kctl) { return 0; } static void snd_jack_remove_debugfs(struct snd_jack *jack) { } #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl) { struct snd_jack_kctl *jack_kctl; jack_kctl = kctl->private_data; if (jack_kctl) { list_del(&jack_kctl->list); kfree(jack_kctl); } } static void snd_jack_kctl_add(struct snd_jack *jack, struct snd_jack_kctl *jack_kctl) { jack_kctl->jack = jack; list_add_tail(&jack_kctl->list, &jack->kctl_list); snd_jack_debugfs_add_inject_node(jack, jack_kctl); } static struct snd_jack_kctl * snd_jack_kctl_new(struct snd_card *card, const char *name, unsigned int mask) { struct snd_kcontrol *kctl; struct snd_jack_kctl *jack_kctl; int err; kctl = snd_kctl_jack_new(name, card); if (!kctl) return NULL; err = snd_ctl_add(card, kctl); if (err < 0) return NULL; jack_kctl = kzalloc(sizeof(*jack_kctl), GFP_KERNEL); if (!jack_kctl) goto error; jack_kctl->kctl = kctl; jack_kctl->mask_bits = mask; kctl->private_data = jack_kctl; kctl->private_free = snd_jack_kctl_private_free; return jack_kctl; error: snd_ctl_free_one(kctl); return NULL; } /** * snd_jack_add_new_kctl - Create a new snd_jack_kctl and add it to jack * @jack: the jack instance which the kctl will attaching to * @name: the name for the snd_kcontrol object * @mask: a bitmask of enum snd_jack_type values that can be detected * by this snd_jack_kctl object. * * Creates a new snd_kcontrol object and adds it to the jack kctl_list. * * Return: Zero if successful, or a negative error code on failure. */ int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask) { struct snd_jack_kctl *jack_kctl; jack_kctl = snd_jack_kctl_new(jack->card, name, mask); if (!jack_kctl) return -ENOMEM; snd_jack_kctl_add(jack, jack_kctl); return 0; } EXPORT_SYMBOL(snd_jack_add_new_kctl); /** * snd_jack_new - Create a new jack * @card: the card instance * @id: an identifying string for this jack * @type: a bitmask of enum snd_jack_type values that can be detected by * this jack * @jjack: Used to provide the allocated jack object to the caller. * @initial_kctl: if true, create a kcontrol and add it to the jack list. * @phantom_jack: Don't create a input device for phantom jacks. * * Creates a new jack object. * * Return: Zero if successful, or a negative error code on failure. * On success @jjack will be initialised. */ int snd_jack_new(struct snd_card *card, const char *id, int type, struct snd_jack **jjack, bool initial_kctl, bool phantom_jack) { struct snd_jack *jack; struct snd_jack_kctl *jack_kctl = NULL; int err; static const struct snd_device_ops ops = { .dev_free = snd_jack_dev_free, #ifdef CONFIG_SND_JACK_INPUT_DEV .dev_register = snd_jack_dev_register, #endif /* CONFIG_SND_JACK_INPUT_DEV */ .dev_disconnect = snd_jack_dev_disconnect, }; if (initial_kctl) { jack_kctl = snd_jack_kctl_new(card, id, type); if (!jack_kctl) return -ENOMEM; } jack = kzalloc(sizeof(struct snd_jack), GFP_KERNEL); if (jack == NULL) return -ENOMEM; jack->id = kstrdup(id, GFP_KERNEL); if (jack->id == NULL) { kfree(jack); return -ENOMEM; } #ifdef CONFIG_SND_JACK_INPUT_DEV mutex_init(&jack->input_dev_lock); /* don't create input device for phantom jack */ if (!phantom_jack) { int i; jack->input_dev = input_allocate_device(); if (jack->input_dev == NULL) { err = -ENOMEM; goto fail_input; } jack->input_dev->phys = "ALSA"; jack->type = type; for (i = 0; i < SND_JACK_SWITCH_TYPES; i++) if (type & (1 << i)) input_set_capability(jack->input_dev, EV_SW, jack_switch_types[i]); } #endif /* CONFIG_SND_JACK_INPUT_DEV */ err = snd_device_new(card, SNDRV_DEV_JACK, jack, &ops); if (err < 0) goto fail_input; jack->card = card; INIT_LIST_HEAD(&jack->kctl_list); if (initial_kctl) snd_jack_kctl_add(jack, jack_kctl); *jjack = jack; return 0; fail_input: #ifdef CONFIG_SND_JACK_INPUT_DEV input_free_device(jack->input_dev); #endif kfree(jack->id); kfree(jack); return err; } EXPORT_SYMBOL(snd_jack_new); #ifdef CONFIG_SND_JACK_INPUT_DEV /** * snd_jack_set_parent - Set the parent device for a jack * * @jack: The jack to configure * @parent: The device to set as parent for the jack. * * Set the parent for the jack devices in the device tree. This * function is only valid prior to registration of the jack. If no * parent is configured then the parent device will be the sound card. */ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent) { WARN_ON(jack->registered); guard(mutex)(&jack->input_dev_lock); if (jack->input_dev) jack->input_dev->dev.parent = parent; } EXPORT_SYMBOL(snd_jack_set_parent); /** * snd_jack_set_key - Set a key mapping on a jack * * @jack: The jack to configure * @type: Jack report type for this key * @keytype: Input layer key type to be reported * * Map a SND_JACK_BTN_* button type to an input layer key, allowing * reporting of keys on accessories via the jack abstraction. If no * mapping is provided but keys are enabled in the jack type then * BTN_n numeric buttons will be reported. * * If jacks are not reporting via the input API this call will have no * effect. * * Note that this is intended to be use by simple devices with small * numbers of keys that can be reported. It is also possible to * access the input device directly - devices with complex input * capabilities on accessories should consider doing this rather than * using this abstraction. * * This function may only be called prior to registration of the jack. * * Return: Zero if successful, or a negative error code on failure. */ int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type, int keytype) { int key = fls(SND_JACK_BTN_0) - fls(type); WARN_ON(jack->registered); if (!keytype || key >= ARRAY_SIZE(jack->key)) return -EINVAL; jack->type |= type; jack->key[key] = keytype; return 0; } EXPORT_SYMBOL(snd_jack_set_key); #endif /* CONFIG_SND_JACK_INPUT_DEV */ /** * snd_jack_report - Report the current status of a jack * Note: This function uses mutexes and should be called from a * context which can sleep (such as a workqueue). * * @jack: The jack to report status for * @status: The current status of the jack */ void snd_jack_report(struct snd_jack *jack, int status) { struct snd_jack_kctl *jack_kctl; unsigned int mask_bits = 0; #ifdef CONFIG_SND_JACK_INPUT_DEV struct input_dev *idev; int i; #endif if (!jack) return; jack->hw_status_cache = status; list_for_each_entry(jack_kctl, &jack->kctl_list, list) if (jack_kctl->sw_inject_enable) mask_bits |= jack_kctl->mask_bits; else snd_kctl_jack_report(jack->card, jack_kctl->kctl, status & jack_kctl->mask_bits); #ifdef CONFIG_SND_JACK_INPUT_DEV idev = input_get_device(jack->input_dev); if (!idev) return; for (i = 0; i < ARRAY_SIZE(jack->key); i++) { int testbit = ((SND_JACK_BTN_0 >> i) & ~mask_bits); if (jack->type & testbit) input_report_key(idev, jack->key[i], status & testbit); } for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) { int testbit = ((1 << i) & ~mask_bits); if (jack->type & testbit) input_report_switch(idev, jack_switch_types[i], status & testbit); } input_sync(idev); input_put_device(idev); #endif /* CONFIG_SND_JACK_INPUT_DEV */ } EXPORT_SYMBOL(snd_jack_report);
// SPDX-License-Identifier: GPL-2.0+ /* speakup_keyhelp.c * help module for speakup * *written by David Borowski. * * Copyright (C) 2003 David Borowski. */ #include <linux/keyboard.h> #include "spk_priv.h" #include "speakup.h" #define MAXFUNCS 130 #define MAXKEYS 256 static const int num_key_names = MSG_KEYNAMES_END - MSG_KEYNAMES_START + 1; static u_short key_offsets[MAXFUNCS], key_data[MAXKEYS]; static u_short masks[] = { 32, 16, 8, 4, 2, 1 }; static short letter_offsets[26] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static u_char funcvals[] = { ATTRIB_BLEEP_DEC, ATTRIB_BLEEP_INC, BLEEPS_DEC, BLEEPS_INC, SAY_FIRST_CHAR, SAY_LAST_CHAR, SAY_CHAR, SAY_CHAR_NUM, SAY_NEXT_CHAR, SAY_PHONETIC_CHAR, SAY_PREV_CHAR, SPEAKUP_PARKED, SPEAKUP_CUT, EDIT_DELIM, EDIT_EXNUM, EDIT_MOST, EDIT_REPEAT, EDIT_SOME, SPEAKUP_GOTO, BOTTOM_EDGE, LEFT_EDGE, RIGHT_EDGE, TOP_EDGE, SPEAKUP_HELP, SAY_LINE, SAY_NEXT_LINE, SAY_PREV_LINE, SAY_LINE_INDENT, SPEAKUP_PASTE, PITCH_DEC, PITCH_INC, PUNCT_DEC, PUNCT_INC, PUNC_LEVEL_DEC, PUNC_LEVEL_INC, SPEAKUP_QUIET, RATE_DEC, RATE_INC, READING_PUNC_DEC, READING_PUNC_INC, SAY_ATTRIBUTES, SAY_FROM_LEFT, SAY_FROM_TOP, SAY_POSITION, SAY_SCREEN, SAY_TO_BOTTOM, SAY_TO_RIGHT, SPK_KEY, SPK_LOCK, SPEAKUP_OFF, SPEECH_KILL, SPELL_DELAY_DEC, SPELL_DELAY_INC, SPELL_WORD, SPELL_PHONETIC, TONE_DEC, TONE_INC, VOICE_DEC, VOICE_INC, VOL_DEC, VOL_INC, CLEAR_WIN, SAY_WIN, SET_WIN, ENABLE_WIN, SAY_WORD, SAY_NEXT_WORD, SAY_PREV_WORD, 0 }; static u_char *state_tbl; static int cur_item, nstates; static void build_key_data(void) { u_char *kp, counters[MAXFUNCS], ch, ch1; u_short *p_key, key; int i, offset = 1; nstates = (int)(state_tbl[-1]); memset(counters, 0, sizeof(counters)); memset(key_offsets, 0, sizeof(key_offsets)); kp = state_tbl + nstates + 1; while (*kp++) { /* count occurrences of each function */ for (i = 0; i < nstates; i++, kp++) { if (!*kp) continue; if ((state_tbl[i] & 16) != 0 && *kp == SPK_KEY) continue; counters[*kp]++; } } for (i = 0; i < MAXFUNCS; i++) { if (counters[i] == 0) continue; key_offsets[i] = offset; offset += (counters[i] + 1); if (offset >= MAXKEYS) break; } /* leave counters set so high keycodes come first. * this is done so num pad and other extended keys maps are spoken before * the alpha with speakup type mapping. */ kp = state_tbl + nstates + 1; while ((ch = *kp++)) { for (i = 0; i < nstates; i++) { ch1 = *kp++; if (!ch1) continue; if ((state_tbl[i] & 16) != 0 && ch1 == SPK_KEY) continue; key = (state_tbl[i] << 8) + ch; counters[ch1]--; offset = key_offsets[ch1]; if (!offset) continue; p_key = key_data + offset + counters[ch1]; *p_key = key; } } } static void say_key(int key) { int i, state = key >> 8; key &= 0xff; for (i = 0; i < 6; i++) { if (state & masks[i]) synth_printf(" %s", spk_msg_get(MSG_STATES_START + i)); } if ((key > 0) && (key <= num_key_names)) synth_printf(" %s\n", spk_msg_get(MSG_KEYNAMES_START + (key - 1))); } static int help_init(void) { char start = SPACE; int i; int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1; state_tbl = spk_our_keys[0] + SHIFT_TBL_SIZE + 2; for (i = 0; i < num_funcs; i++) { char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i); if (start == *cur_funcname) continue; start = *cur_funcname; letter_offsets[(start & 31) - 1] = i; } return 0; } int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key) { int i, n; char *name; u_char func, *kp; u_short *p_keys, val; if (letter_offsets[0] == -1) help_init(); if (type == KT_LATIN) { if (ch == SPACE) { spk_special_handler = NULL; synth_printf("%s\n", spk_msg_get(MSG_LEAVING_HELP)); return 1; } ch |= 32; /* lower case */ if (ch < 'a' || ch > 'z') return -1; if (letter_offsets[ch - 'a'] == -1) { synth_printf(spk_msg_get(MSG_NO_COMMAND), ch); synth_printf("\n"); return 1; } cur_item = letter_offsets[ch - 'a']; } else if (type == KT_CUR) { if (ch == 0 && (MSG_FUNCNAMES_START + cur_item + 1) <= MSG_FUNCNAMES_END) cur_item++; else if (ch == 3 && cur_item > 0) cur_item--; else return -1; } else if (type == KT_SPKUP && ch == SPEAKUP_HELP && !spk_special_handler) { spk_special_handler = spk_handle_help; synth_printf("%s\n", spk_msg_get(MSG_HELP_INFO)); build_key_data(); /* rebuild each time in case new mapping */ return 1; } else { name = NULL; if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) { synth_printf("%s\n", spk_msg_get(MSG_KEYNAMES_START + key - 1)); return 1; } for (i = 0; funcvals[i] != 0 && !name; i++) { if (ch == funcvals[i]) name = spk_msg_get(MSG_FUNCNAMES_START + i); } if (!name) return -1; kp = spk_our_keys[key] + 1; for (i = 0; i < nstates; i++) { if (ch == kp[i]) break; } key += (state_tbl[i] << 8); say_key(key); synth_printf(spk_msg_get(MSG_KEYDESC), name); synth_printf("\n"); return 1; } name = spk_msg_get(MSG_FUNCNAMES_START + cur_item); func = funcvals[cur_item]; synth_printf("%s", name); if (key_offsets[func] == 0) { synth_printf(" %s\n", spk_msg_get(MSG_IS_UNASSIGNED)); return 1; } p_keys = key_data + key_offsets[func]; for (n = 0; p_keys[n]; n++) { val = p_keys[n]; if (n > 0) synth_printf("%s ", spk_msg_get(MSG_DISJUNCTION)); say_key(val); } return 1; }
// SPDX-License-Identifier: MIT /* * Copyright © 2019 Intel Corporation */ #include <uapi/drm/i915_drm.h> #include "intel_memory_region.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_lmem.h" #include "i915_drv.h" void __iomem * i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, unsigned long n, unsigned long size) { resource_size_t offset; GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); offset = i915_gem_object_get_dma_address(obj, n); offset -= obj->mm.region->region.start; return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); } /** * i915_gem_object_is_lmem - Whether the object is resident in * lmem * @obj: The object to check. * * Even if an object is allowed to migrate and change memory region, * this function checks whether it will always be present in lmem when * valid *or* if that's not the case, whether it's currently resident in lmem. * For migratable and evictable objects, the latter only makes sense when * the object is locked. * * Return: Whether the object migratable but resident in lmem, or not * migratable and will be present in lmem when valid. */ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP if (i915_gem_object_migratable(obj) && i915_gem_object_evictable(obj)) assert_object_held(obj); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); } /** * __i915_gem_object_is_lmem - Whether the object is resident in * lmem while in the fence signaling critical path. * @obj: The object to check. * * This function is intended to be called from within the fence signaling * path where the fence, or a pin, keeps the object from being migrated. For * example during gpu reset or similar. * * Return: Whether the object is resident in lmem. */ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) { struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) && i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); } /** * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the * minimum page size for the backing pages. * @i915: The i915 instance. * @size: The size in bytes for the object. Note that we need to round the size * up depending on the @page_size. The final object size can be fished out from * the drm GEM object. * @page_size: The requested minimum page size in bytes for this object. This is * useful if we need something bigger than the regions min_page_size due to some * hw restriction, or in some very specialised cases where it needs to be * smaller, where the internal fragmentation cost is too great when rounding up * the object size. * @flags: The optional BO allocation flags. * * Note that this interface assumes you know what you are doing when forcing the * @page_size. If this is smaller than the regions min_page_size then it can * never be inserted into any GTT, otherwise it might lead to undefined * behaviour. * * Return: The object pointer, which might be an ERR_PTR in the case of failure. */ struct drm_i915_gem_object * __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915, resource_size_t size, resource_size_t page_size, unsigned int flags) { return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0], size, page_size, flags); } struct drm_i915_gem_object * i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, const void *data, size_t size) { struct drm_i915_gem_object *obj; void *map; obj = i915_gem_object_create_lmem(i915, round_up(size, PAGE_SIZE), I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) return obj; map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(map)) { i915_gem_object_put(obj); return map; } memcpy(map, data, size); i915_gem_object_flush_map(obj); __i915_gem_object_release_map(obj); return obj; } struct drm_i915_gem_object * i915_gem_object_create_lmem(struct drm_i915_private *i915, resource_size_t size, unsigned int flags) { return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0], size, 0, flags); }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2003-2006, Cluster File Systems, Inc, [email protected] * Written by Alex Tomas <[email protected]> */ #ifndef _EXT4_EXTENTS #define _EXT4_EXTENTS #include "ext4.h" /* * With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks * becomes very small, so index split, in-depth growing and * other hard changes happen much more often. * This is for debug purposes only. */ #define AGGRESSIVE_TEST_ /* * With EXTENTS_STATS defined, the number of blocks and extents * are collected in the truncate path. They'll be shown at * umount time. */ #define EXTENTS_STATS__ /* * If CHECK_BINSEARCH is defined, then the results of the binary search * will also be checked by linear search. */ #define CHECK_BINSEARCH__ /* * If EXT_STATS is defined then stats numbers are collected. * These number will be displayed at umount time. */ #define EXT_STATS_ /* * ext4_inode has i_block array (60 bytes total). * The first 12 bytes store ext4_extent_header; * the remainder stores an array of ext4_extent. * For non-inode extent blocks, ext4_extent_tail * follows the array. */ /* * This is the extent tail on-disk structure. * All other extent structures are 12 bytes long. It turns out that * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which * covers all valid ext4 block sizes. Therefore, this tail structure can be * crammed into the end of the block without having to rebalance the tree. */ struct ext4_extent_tail { __le32 et_checksum; /* crc32c(uuid+inum+extent_block) */ }; /* * This is the extent on-disk structure. * It's used at the bottom of the tree. */ struct ext4_extent { __le32 ee_block; /* first logical block extent covers */ __le16 ee_len; /* number of blocks covered by extent */ __le16 ee_start_hi; /* high 16 bits of physical block */ __le32 ee_start_lo; /* low 32 bits of physical block */ }; /* * This is index on-disk structure. * It's used at all the levels except the bottom. */ struct ext4_extent_idx { __le32 ei_block; /* index covers logical blocks from 'block' */ __le32 ei_leaf_lo; /* pointer to the physical block of the next * * level. leaf or next index could be there */ __le16 ei_leaf_hi; /* high 16 bits of physical block */ __u16 ei_unused; }; /* * Each block (leaves and indexes), even inode-stored has header. */ struct ext4_extent_header { __le16 eh_magic; /* probably will support different formats */ __le16 eh_entries; /* number of valid entries */ __le16 eh_max; /* capacity of store in entries */ __le16 eh_depth; /* has tree real underlying blocks? */ __le32 eh_generation; /* generation of the tree */ }; #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) #define EXT4_MAX_EXTENT_DEPTH 5 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ (sizeof(struct ext4_extent_header) + \ (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max))) static inline struct ext4_extent_tail * find_ext4_extent_tail(struct ext4_extent_header *eh) { return (struct ext4_extent_tail *)(((void *)eh) + EXT4_EXTENT_TAIL_OFFSET(eh)); } /* * Array of ext4_ext_path contains path to some extent. * Creation/lookup routines use it for traversal/splitting/etc. * Truncate uses it to simulate recursive walking. */ struct ext4_ext_path { ext4_fsblk_t p_block; __u16 p_depth; __u16 p_maxdepth; struct ext4_extent *p_ext; struct ext4_extent_idx *p_idx; struct ext4_extent_header *p_hdr; struct buffer_head *p_bh; }; /* * Used to record a portion of a cluster found at the beginning or end * of an extent while traversing the extent tree during space removal. * A partial cluster may be removed if it does not contain blocks shared * with extents that aren't being deleted (tofree state). Otherwise, * it cannot be removed (nofree state). */ struct partial_cluster { ext4_fsblk_t pclu; /* physical cluster number */ ext4_lblk_t lblk; /* logical block number within logical cluster */ enum {initial, tofree, nofree} state; }; /* * structure for external API */ /* * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an * initialized extent. This is 2^15 and not (2^16 - 1), since we use the * MSB of ee_len field in the extent datastructure to signify if this * particular extent is an initialized extent or an unwritten (i.e. * preallocated). * EXT_UNWRITTEN_MAX_LEN is the maximum number of blocks we can have in an * unwritten extent. * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an * unwritten one. In other words, if MSB of ee_len is set, it is an * unwritten extent with only one special scenario when ee_len = 0x8000. * In this case we can not have an unwritten extent of zero length and * thus we make it as a special case of initialized extent with 0x8000 length. * This way we get better extent-to-group alignment for initialized extents. * Hence, the maximum number of blocks we can have in an *initialized* * extent is 2^15 (32768) and in an *unwritten* extent is 2^15-1 (32767). */ #define EXT_INIT_MAX_LEN (1UL << 15) #define EXT_UNWRITTEN_MAX_LEN (EXT_INIT_MAX_LEN - 1) #define EXT_FIRST_EXTENT(__hdr__) \ ((struct ext4_extent *) (((char *) (__hdr__)) + \ sizeof(struct ext4_extent_header))) #define EXT_FIRST_INDEX(__hdr__) \ ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \ sizeof(struct ext4_extent_header))) #define EXT_HAS_FREE_INDEX(__path__) \ (le16_to_cpu((__path__)->p_hdr->eh_entries) \ < le16_to_cpu((__path__)->p_hdr->eh_max)) #define EXT_LAST_EXTENT(__hdr__) \ (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_MAX_EXTENT(__hdr__) \ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ : NULL) #define EXT_MAX_INDEX(__hdr__) \ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ : NULL) static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) { return (struct ext4_extent_header *) EXT4_I(inode)->i_data; } static inline struct ext4_extent_header *ext_block_hdr(struct buffer_head *bh) { return (struct ext4_extent_header *) bh->b_data; } static inline unsigned short ext_depth(struct inode *inode) { return le16_to_cpu(ext_inode_hdr(inode)->eh_depth); } static inline void ext4_ext_mark_unwritten(struct ext4_extent *ext) { /* We can not have an unwritten extent of zero length! */ BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0); ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN); } static inline int ext4_ext_is_unwritten(struct ext4_extent *ext) { /* Extent with ee_len of 0x8000 is treated as an initialized extent */ return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN); } static inline int ext4_ext_get_actual_len(struct ext4_extent *ext) { return (le16_to_cpu(ext->ee_len) <= EXT_INIT_MAX_LEN ? le16_to_cpu(ext->ee_len) : (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN)); } static inline void ext4_ext_mark_initialized(struct ext4_extent *ext) { ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext)); } /* * ext4_ext_pblock: * combine low and high parts of physical block number into ext4_fsblk_t */ static inline ext4_fsblk_t ext4_ext_pblock(struct ext4_extent *ex) { ext4_fsblk_t block; block = le32_to_cpu(ex->ee_start_lo); block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1; return block; } /* * ext4_idx_pblock: * combine low and high parts of a leaf physical block number into ext4_fsblk_t */ static inline ext4_fsblk_t ext4_idx_pblock(struct ext4_extent_idx *ix) { ext4_fsblk_t block; block = le32_to_cpu(ix->ei_leaf_lo); block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1; return block; } /* * ext4_ext_store_pblock: * stores a large physical block number into an extent struct, * breaking it into parts */ static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) { ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); } /* * ext4_idx_store_pblock: * stores a large physical block number into an index struct, * breaking it into parts */ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) { ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); } #endif /* _EXT4_EXTENTS */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /******************************************************************************* * IBM Virtual SCSI Target Driver * Copyright (C) 2003-2005 Dave Boutcher ([email protected]) IBM Corp. * Santiago Leon ([email protected]) IBM Corp. * Linda Xie ([email protected]) IBM Corp. * * Copyright (C) 2005-2011 FUJITA Tomonori <[email protected]> * Copyright (C) 2010 Nicholas A. Bellinger <[email protected]> * Copyright (C) 2016 Bryant G. Ly <[email protected]> IBM Corp. * * Authors: Bryant G. Ly <[email protected]> * Authors: Michael Cyr <[email protected]> * ****************************************************************************/ #ifndef __H_IBMVSCSI_TGT #define __H_IBMVSCSI_TGT #include <linux/interrupt.h> #include "libsrp.h" #define SYS_ID_NAME_LEN 64 #define PARTITION_NAMELEN 96 #define IBMVSCSIS_NAMELEN 32 #define MSG_HI 0 #define MSG_LOW 1 #define MAX_CMD_Q_PAGES 4 #define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq)) /* in terms of number of elements */ #define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE #define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES) #define SRP_VIOLATION 0x102 /* general error code */ /* * SRP buffer formats defined as of 16.a supported by this driver. */ #define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \ (SRP_DATA_DESC_INDIRECT << 1)) #define SCSI_LUN_ADDR_METHOD_FLAT 1 struct dma_window { u32 liobn; /* Unique per vdevice */ u64 tce_base; /* Physical location of the TCE table */ u64 tce_size; /* Size of the TCE table in bytes */ }; struct target_dds { u64 unit_id; /* 64 bit will force alignment */ #define NUM_DMA_WINDOWS 2 #define LOCAL 0 #define REMOTE 1 struct dma_window window[NUM_DMA_WINDOWS]; /* root node property "ibm,partition-no" */ uint partition_num; char partition_name[PARTITION_NAMELEN]; }; #define MAX_NUM_PORTS 1 #define MAX_H_COPY_RDMA (128 * 1024) #define MAX_EYE 64 /* Return codes */ #define ADAPT_SUCCESS 0L /* choose error codes that do not conflict with PHYP */ #define ERROR -40L struct format_code { u8 reserved; u8 buffers; }; struct client_info { #define SRP_VERSION "16.a" char srp_version[8]; /* root node property ibm,partition-name */ char partition_name[PARTITION_NAMELEN]; /* root node property ibm,partition-no */ u32 partition_number; /* initially 1 */ u32 mad_version; u32 os_type; }; /* * Changing this constant changes the number of seconds to wait before * considering the client will never service its queue again. */ #define SECONDS_TO_CONSIDER_FAILED 30 /* * These constants set the polling period used to determine if the client * has freed at least one element in the response queue. */ #define WAIT_SECONDS 1 #define WAIT_NANO_SECONDS 5000 #define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \ SECONDS_TO_CONSIDER_FAILED) /* * general purpose timer control block * which can be used for multiple functions */ struct timer_cb { struct hrtimer timer; /* * how long has it been since the client * serviced the queue. The variable is incrmented * in the service_wait_q routine and cleared * in send messages */ int timer_pops; /* the timer is started */ bool started; }; struct cmd_queue { /* kva */ struct viosrp_crq *base_addr; dma_addr_t crq_token; /* used to maintain index */ uint mask; /* current element */ uint index; int size; }; #define SCSOLNT_RESP_SHIFT 1 #define UCSOLNT_RESP_SHIFT 2 #define SCSOLNT BIT(SCSOLNT_RESP_SHIFT) #define UCSOLNT BIT(UCSOLNT_RESP_SHIFT) enum cmd_type { SCSI_CDB = 0x01, TASK_MANAGEMENT = 0x02, /* MAD or addressed to port 0 */ ADAPTER_MAD = 0x04, UNSET_TYPE = 0x08, }; struct iu_rsp { u8 format; u8 sol_not; u16 len; /* tag is just to help client identify cmd, so don't translate be/le */ u64 tag; }; struct ibmvscsis_cmd { struct list_head list; /* Used for TCM Core operations */ struct se_cmd se_cmd; struct iu_entry *iue; struct iu_rsp rsp; struct work_struct work; struct scsi_info *adapter; struct ibmvscsis_cmd *abort_cmd; /* Sense buffer that will be mapped into outgoing status */ unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; u64 init_time; #define CMD_FAST_FAIL BIT(0) #define DELAY_SEND BIT(1) u32 flags; char type; }; struct ibmvscsis_nexus { struct se_session *se_sess; }; struct ibmvscsis_tport { /* SCSI protocol the tport is providing */ u8 tport_proto_id; /* ASCII formatted WWPN for SRP Target port */ char tport_name[IBMVSCSIS_NAMELEN]; /* Returned by ibmvscsis_make_tport() */ struct se_wwn tport_wwn; /* Returned by ibmvscsis_make_tpg() */ struct se_portal_group se_tpg; /* ibmvscsis port target portal group tag for TCM */ u16 tport_tpgt; /* Pointer to TCM session for I_T Nexus */ struct ibmvscsis_nexus *ibmv_nexus; bool enabled; bool releasing; }; struct scsi_info { struct list_head list; char eye[MAX_EYE]; /* commands waiting for space on repsonse queue */ struct list_head waiting_rsp; #define NO_QUEUE 0x00 #define WAIT_ENABLED 0X01 #define WAIT_CONNECTION 0x04 /* have established a connection */ #define CONNECTED 0x08 /* at least one port is processing SRP IU */ #define SRP_PROCESSING 0x10 /* remove request received */ #define UNCONFIGURING 0x20 /* disconnect by letting adapter go idle, no error */ #define WAIT_IDLE 0x40 /* disconnecting to clear an error */ #define ERR_DISCONNECT 0x80 /* disconnect to clear error state, then come back up */ #define ERR_DISCONNECT_RECONNECT 0x100 /* disconnected after clearing an error */ #define ERR_DISCONNECTED 0x200 /* A series of errors caused unexpected errors */ #define UNDEFINED 0x400 u16 state; int fast_fail; struct target_dds dds; char *cmd_pool; /* list of free commands */ struct list_head free_cmd; /* command elements ready for scheduler */ struct list_head schedule_q; /* commands sent to TCM */ struct list_head active_q; caddr_t *map_buf; /* ioba of map buffer */ dma_addr_t map_ioba; /* allowable number of outstanding SRP requests */ int request_limit; /* extra credit */ int credit; /* outstanding transactions against credit limit */ int debit; /* allow only one outstanding mad request */ #define PROCESSING_MAD 0x00002 /* Waiting to go idle */ #define WAIT_FOR_IDLE 0x00004 /* H_REG_CRQ called */ #define CRQ_CLOSED 0x00010 /* detected that client has failed */ #define CLIENT_FAILED 0x00040 /* detected that transport event occurred */ #define TRANS_EVENT 0x00080 /* don't attempt to send anything to the client */ #define RESPONSE_Q_DOWN 0x00100 /* request made to schedule disconnect handler */ #define SCHEDULE_DISCONNECT 0x00400 /* disconnect handler is scheduled */ #define DISCONNECT_SCHEDULED 0x00800 /* remove function is sleeping */ #define CFG_SLEEPING 0x01000 /* Register for Prepare for Suspend Transport Events */ #define PREP_FOR_SUSPEND_ENABLED 0x02000 /* Prepare for Suspend event sent */ #define PREP_FOR_SUSPEND_PENDING 0x04000 /* Resume from Suspend event sent */ #define PREP_FOR_SUSPEND_ABORTED 0x08000 /* Prepare for Suspend event overwrote another CRQ entry */ #define PREP_FOR_SUSPEND_OVERWRITE 0x10000 u32 flags; /* adapter lock */ spinlock_t intr_lock; /* information needed to manage command queue */ struct cmd_queue cmd_q; /* used in hcall to copy response back into srp buffer */ u64 empty_iu_id; /* used in crq, to tag what iu the response is for */ u64 empty_iu_tag; uint new_state; uint resume_state; /* control block for the response queue timer */ struct timer_cb rsp_q_timer; /* keep last client to enable proper accounting */ struct client_info client_data; /* what can this client do */ u32 client_cap; /* * The following two fields capture state and flag changes that * can occur when the lock is given up. In the orginal design, * the lock was held during calls into phyp; * however, phyp did not meet PAPR architecture. This is * a work around. */ u16 phyp_acr_state; u32 phyp_acr_flags; struct workqueue_struct *work_q; struct completion wait_idle; struct completion unconfig; struct device dev; struct vio_dev *dma_dev; struct srp_target target; struct ibmvscsis_tport tport; struct tasklet_struct work_task; struct work_struct proc_work; }; /* * Provide a constant that allows software to detect the adapter is * disconnecting from the client from one of several states. */ #define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \ ERR_DISCONNECT) /* * Provide a constant that can be used with interrupt handling that * essentially lets the interrupt handler know that all requests should * be thrown out, */ #define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \ ERR_DISCONNECTED | WAIT_IDLE) /* * If any of these flag bits are set then do not allow the interrupt * handler to schedule the off level handler. */ #define BLOCK (DISCONNECT_SCHEDULED) /* State and transition events that stop the interrupt handler */ #define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ ((VSCSI)->flags & BLOCK)) #define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \ PREP_FOR_SUSPEND_PENDING | \ PREP_FOR_SUSPEND_ABORTED | \ PREP_FOR_SUSPEND_OVERWRITE) /* flag bit that are not reset during disconnect */ #define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS) #define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) #define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8) #define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) #ifndef H_GET_PARTNER_INFO #define H_GET_PARTNER_INFO 0x0000000000000008LL #endif #ifndef H_ENABLE_PREPARE_FOR_SUSPEND #define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL #endif #ifndef H_READY_FOR_SUSPEND #define H_READY_FOR_SUSPEND 0x000000000000001ELL #endif #define h_copy_rdma(l, sa, sb, da, db) \ plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) #define h_vioctl(u, o, a, u1, u2, u3, u4) \ plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2) #define h_reg_crq(ua, tok, sz) \ plpar_hcall_norets(H_REG_CRQ, ua, tok, sz) #define h_free_crq(ua) \ plpar_hcall_norets(H_FREE_CRQ, ua) #define h_send_crq(ua, d1, d2) \ plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2) #endif
/* SPDX-License-Identifier: GPL-2.0 * * Header file for the CDX Bus * * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. */ #ifndef _CDX_H_ #define _CDX_H_ #include <linux/cdx/cdx_bus.h> /** * struct cdx_dev_params - CDX device parameters * @cdx: CDX controller associated with the device * @parent: Associated CDX Bus device * @vendor: Vendor ID for CDX device * @device: Device ID for CDX device * @subsys_vendor: Sub vendor ID for CDX device * @subsys_device: Sub device ID for CDX device * @bus_num: Bus number for this CDX device * @dev_num: Device number for this device * @res: array of MMIO region entries * @res_count: number of valid MMIO regions * @req_id: Requestor ID associated with CDX device * @class: Class of the CDX Device * @revision: Revision of the CDX device * @msi_dev_id: MSI device ID associated with CDX device * @num_msi: Number of MSI's supported by the device */ struct cdx_dev_params { struct cdx_controller *cdx; struct device *parent; u16 vendor; u16 device; u16 subsys_vendor; u16 subsys_device; u8 bus_num; u8 dev_num; struct resource res[MAX_CDX_DEV_RESOURCES]; u8 res_count; u32 req_id; u32 class; u8 revision; u32 msi_dev_id; u32 num_msi; }; /** * cdx_register_controller - Register a CDX controller and its ports * on the CDX bus. * @cdx: The CDX controller to register * * Return: -errno on failure, 0 on success. */ int cdx_register_controller(struct cdx_controller *cdx); /** * cdx_unregister_controller - Unregister a CDX controller * @cdx: The CDX controller to unregister */ void cdx_unregister_controller(struct cdx_controller *cdx); /** * cdx_device_add - Add a CDX device. This function adds a CDX device * on the CDX bus as per the device parameters provided * by caller. It also creates and registers an associated * Linux generic device. * @dev_params: device parameters associated with the device to be created. * * Return: -errno on failure, 0 on success. */ int cdx_device_add(struct cdx_dev_params *dev_params); /** * cdx_bus_add - Add a CDX bus. This function adds a bus on the CDX bus * subsystem. It creates a CDX device for the corresponding bus and * also registers an associated Linux generic device. * @cdx: Associated CDX controller * @us_num: Bus number * * Return: associated Linux generic device pointer on success or NULL on failure. */ struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num); /** * cdx_msi_domain_init - Init the CDX bus MSI domain. * @dev: Device of the CDX bus controller * * Return: CDX MSI domain, NULL on failure */ struct irq_domain *cdx_msi_domain_init(struct device *dev); #endif /* _CDX_H_ */
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) * Google virtual Ethernet (gve) driver * * Copyright (C) 2015-2021 Google, Inc. */ /* GVE DQO Descriptor formats */ #ifndef _GVE_DESC_DQO_H_ #define _GVE_DESC_DQO_H_ #include <linux/build_bug.h> #define GVE_TX_MAX_HDR_SIZE_DQO 255 #define GVE_TX_MIN_TSO_MSS_DQO 88 #ifndef __LITTLE_ENDIAN_BITFIELD #error "Only little endian supported" #endif /* Basic TX descriptor (DTYPE 0x0C) */ struct gve_tx_pkt_desc_dqo { __le64 buf_addr; /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */ u8 dtype: 5; /* Denotes the last descriptor of a packet. */ u8 end_of_packet: 1; u8 checksum_offload_enable: 1; /* If set, will generate a descriptor completion for this descriptor. */ u8 report_event: 1; u8 reserved0; __le16 reserved1; /* The TX completion associated with this packet will contain this tag. */ __le16 compl_tag; u16 buf_size: 14; u16 reserved2: 2; } __packed; static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16); #define GVE_TX_PKT_DESC_DTYPE_DQO 0xc #define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1) /* Maximum number of data descriptors allowed per packet, or per-TSO segment. */ #define GVE_TX_MAX_DATA_DESCS 10 /* Min gap between tail and head to avoid cacheline overlap */ #define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4 /* "report_event" on TX packet descriptors may only be reported on the last * descriptor of a TX packet, and they must be spaced apart with at least this * value. */ #define GVE_TX_MIN_RE_INTERVAL 32 struct gve_tx_context_cmd_dtype { u8 dtype: 5; u8 tso: 1; u8 reserved1: 2; u8 reserved2; }; static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2); /* TX Native TSO Context DTYPE (0x05) * * "flex" fields allow the driver to send additional packet context to HW. */ struct gve_tx_tso_context_desc_dqo { /* The L4 payload bytes that should be segmented. */ u32 tso_total_len: 24; u32 flex10: 8; /* Max segment size in TSO excluding headers. */ u16 mss: 14; u16 reserved: 2; u8 header_len; /* Header length to use for TSO offload */ u8 flex11; struct gve_tx_context_cmd_dtype cmd_dtype; u8 flex0; u8 flex5; u8 flex6; u8 flex7; u8 flex8; u8 flex9; } __packed; static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16); #define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5 /* General context descriptor for sending metadata. */ struct gve_tx_general_context_desc_dqo { u8 flex4; u8 flex5; u8 flex6; u8 flex7; u8 flex8; u8 flex9; u8 flex10; u8 flex11; struct gve_tx_context_cmd_dtype cmd_dtype; u16 reserved; u8 flex0; u8 flex1; u8 flex2; u8 flex3; } __packed; static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16); #define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4 /* Logical structure of metadata which is packed into context descriptor flex * fields. */ struct gve_tx_metadata_dqo { union { struct { u8 version; /* If `skb->l4_hash` is set, this value should be * derived from `skb->hash`. * * A zero value means no l4_hash was associated with the * skb. */ u16 path_hash: 15; /* Should be set to 1 if the flow associated with the * skb had a rehash from the TCP stack. */ u16 rehash_event: 1; } __packed; u8 bytes[12]; }; } __packed; static_assert(sizeof(struct gve_tx_metadata_dqo) == 12); #define GVE_TX_METADATA_VERSION_DQO 0 /* TX completion descriptor */ struct gve_tx_compl_desc { /* For types 0-4 this is the TX queue ID associated with this * completion. */ u16 id: 11; /* See: GVE_COMPL_TYPE_DQO* */ u16 type: 3; u16 reserved0: 1; /* Flipped by HW to notify the descriptor is populated. */ u16 generation: 1; union { /* For descriptor completions, this is the last index fetched * by HW + 1. */ __le16 tx_head; /* For packet completions, this is the completion tag set on the * TX packet descriptors. */ __le16 completion_tag; }; __le32 reserved1; } __packed; static_assert(sizeof(struct gve_tx_compl_desc) == 8); #define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */ #define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */ #define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */ #define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */ /* The most significant bit in the completion tag can change the completion * type from packet completion to miss path completion. */ #define GVE_ALT_MISS_COMPL_BIT BIT(15) /* Descriptor to post buffers to HW on buffer queue. */ struct gve_rx_desc_dqo { __le16 buf_id; /* ID returned in Rx completion descriptor */ __le16 reserved0; __le32 reserved1; __le64 buf_addr; /* DMA address of the buffer */ __le64 header_buf_addr; __le64 reserved2; } __packed; static_assert(sizeof(struct gve_rx_desc_dqo) == 32); /* Descriptor for HW to notify SW of new packets received on RX queue. */ struct gve_rx_compl_desc_dqo { /* Must be 1 */ u8 rxdid: 4; u8 reserved0: 4; /* Packet originated from this system rather than the network. */ u8 loopback: 1; /* Set when IPv6 packet contains a destination options header or routing * header. */ u8 ipv6_ex_add: 1; /* Invalid packet was received. */ u8 rx_error: 1; u8 reserved1: 5; u16 packet_type: 10; u16 ip_hdr_err: 1; u16 udp_len_err: 1; u16 raw_cs_invalid: 1; u16 reserved2: 3; u16 packet_len: 14; /* Flipped by HW to notify the descriptor is populated. */ u16 generation: 1; /* Should be zero. */ u16 buffer_queue_id: 1; u16 header_len: 10; u16 rsc: 1; u16 split_header: 1; u16 reserved3: 4; u8 descriptor_done: 1; u8 end_of_packet: 1; u8 header_buffer_overflow: 1; u8 l3_l4_processed: 1; u8 csum_ip_err: 1; u8 csum_l4_err: 1; u8 csum_external_ip_err: 1; u8 csum_external_udp_err: 1; u8 status_error1; __le16 reserved5; __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */ union { /* Packet checksum. */ __le16 raw_cs; /* Segment length for RSC packets. */ __le16 rsc_seg_len; }; __le32 hash; __le32 reserved6; __le64 reserved7; } __packed; static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32); /* Ringing the doorbell too often can hurt performance. * * HW requires this value to be at least 8. */ #define GVE_RX_BUF_THRESH_DQO 32 #endif /* _GVE_DESC_DQO_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. * * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. * It was taken from the frle-0.22 device driver. * As the file doesn't have a copyright notice, in the file * nicstarmac.copyright I put the copyright notice from the * frle-0.22 device driver. * Some code is based on the nicstar driver by M. Welsh. * * Author: Rui Prior ([email protected]) * PowerPC support by Jay Talbott ([email protected]) April 1999 * * * (C) INESC 1999 */ /* * IMPORTANT INFORMATION * * There are currently three types of spinlocks: * * 1 - Per card interrupt spinlock (to protect structures and such) * 2 - Per SCQ scq spinlock * 3 - Per card resource spinlock (to access registers, etc.) * * These must NEVER be grabbed in reverse order. * */ /* Header files */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/idr.h> #include <asm/io.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/etherdevice.h> #include "nicstar.h" #ifdef CONFIG_ATM_NICSTAR_USE_SUNI #include "suni.h" #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ /* Additional code */ #include "nicstarmac.c" /* Configurable parameters */ #undef PHY_LOOPBACK #undef TX_DEBUG #undef RX_DEBUG #undef GENERAL_DEBUG #undef EXTRA_DEBUG /* Do not touch these */ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) #else #define TXPRINTK(args...) #endif /* TX_DEBUG */ #ifdef RX_DEBUG #define RXPRINTK(args...) printk(args) #else #define RXPRINTK(args...) #endif /* RX_DEBUG */ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else #define PRINTK(args...) do {} while (0) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG #define XPRINTK(args...) printk(args) #else #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ /* Macros */ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif #define scq_virt_to_bus(scq, p) \ (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) /* Function declarations */ static u32 ns_read_sram(ns_dev * card, u32 sram_address); static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count); static int ns_init_card(int i, struct pci_dev *pcidev); static void ns_init_card_error(ns_dev * card, int error); static scq_info *get_scq(ns_dev *card, int size, u32 scd); static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); static void fill_tst(ns_dev * card, int n, vc_map * vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb); static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb, bool may_sleep); static void process_tsq(ns_dev * card); static void drain_scq(ns_dev * card, scq_info * scq, int pos); static void process_rsq(ns_dev * card); static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb); #endif static void ns_poll(struct timer_list *unused); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); /* Global variables */ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; static const struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, .send = ns_send, .send_bh = ns_send_bh, .phy_put = ns_phy_put, .phy_get = ns_phy_get, .proc_read = ns_proc_read, .owner = THIS_MODULE, }; static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_DESCRIPTION("ATM NIC driver for IDT 77201/77211 \"NICStAR\" and Fore ForeRunnerLE."); MODULE_LICENSE("GPL"); /* Functions */ static int nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { static int index = -1; unsigned int error; index++; cards[index] = NULL; error = ns_init_card(index, pcidev); if (error) { cards[index--] = NULL; /* don't increment index */ goto err_out; } return 0; err_out: return -ENODEV; } static void nicstar_remove_one(struct pci_dev *pcidev) { int i, j; ns_dev *card = pci_get_drvdata(pcidev); struct sk_buff *hb; struct sk_buff *iovb; struct sk_buff *lb; struct sk_buff *sb; i = card->index; if (cards[i] == NULL) return; if (card->atmdev->phy && card->atmdev->phy->stop) card->atmdev->phy->stop(card->atmdev); /* Stop everything */ writel(0x00000000, card->membase + CFG); /* De-register device */ atm_dev_deregister(card->atmdev); /* Disable PCI device */ pci_disable_device(pcidev); /* Free up resources */ j = 0; PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { dev_kfree_skb_any(hb); j++; } PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); j = 0; PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { dev_kfree_skb_any(iovb); j++; } PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); for (j = 0; j < NS_FRSCD_NUM; j++) { if (card->scd2vc[j] != NULL) free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); } idr_destroy(&card->idr); dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, card->rsq.org, card->rsq.dma); dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, card->tsq.org, card->tsq.dma); free_irq(card->pcidev->irq, card); iounmap(card->membase); kfree(card); } static const struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { .name = "nicstar", .id_table = nicstar_pci_tbl, .probe = nicstar_init_one, .remove = nicstar_remove_one, }; static int __init nicstar_init(void) { unsigned error = 0; /* Initialized to remove compile warning */ XPRINTK("nicstar: nicstar_init() called.\n"); error = pci_register_driver(&nicstar_driver); TXPRINTK("nicstar: TX debug enabled.\n"); RXPRINTK("nicstar: RX debug enabled.\n"); PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ XPRINTK("nicstar: nicstar_init() returned.\n"); if (!error) { timer_setup(&ns_timer, ns_poll, 0); ns_timer.expires = jiffies + NS_POLL_PERIOD; add_timer(&ns_timer); } return error; } static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); del_timer_sync(&ns_timer); pci_unregister_driver(&nicstar_driver); XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } static u32 ns_read_sram(ns_dev * card, u32 sram_address) { unsigned long flags; u32 data; sram_address <<= 2; sram_address &= 0x0007FFFC; /* address must be dword aligned */ sram_address |= 0x50000000; /* SRAM read command */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(sram_address, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); spin_unlock_irqrestore(&card->res_lock, flags); return data; } static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count) { unsigned long flags; int i, c; count--; /* count range now is 0..3 instead of 1..4 */ c = count; c <<= 2; /* to use increments of 4 */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; for (i = 0; i <= c; i += 4) writel(*(value++), card->membase + i); /* Note: DR# registers are the first 4 dwords in nicstar's memspace, so card->membase + DR0 == card->membase */ sram_address <<= 2; sram_address &= 0x0007FFFC; sram_address |= (0x40000000 | count); writel(sram_address, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static int ns_init_card(int i, struct pci_dev *pcidev) { int j; struct ns_dev *card = NULL; unsigned char pci_latency; unsigned error; u32 data; u32 u32d[4]; u32 ns_cfg_rctsize; int bcount; unsigned long membase; error = 0; if (pci_enable_device(pcidev)) { printk("nicstar%d: can't enable PCI device\n", i); error = 2; ns_init_card_error(card, error); return error; } if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) { printk(KERN_WARNING "nicstar%d: No suitable DMA available.\n", i); error = 2; ns_init_card_error(card, error); return error; } card = kmalloc(sizeof(*card), GFP_KERNEL); if (!card) { printk ("nicstar%d: can't allocate memory for device structure.\n", i); error = 2; ns_init_card_error(card, error); return error; } cards[i] = card; spin_lock_init(&card->int_lock); spin_lock_init(&card->res_lock); pci_set_drvdata(pcidev, card); card->index = i; card->atmdev = NULL; card->pcidev = pcidev; membase = pci_resource_start(pcidev, 1); card->membase = ioremap(membase, NS_IOREMAP_SIZE); if (!card->membase) { printk("nicstar%d: can't ioremap() membase.\n", i); error = 3; ns_init_card_error(card, error); return error; } PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); pci_set_master(pcidev); if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { printk("nicstar%d: can't read PCI latency timer.\n", i); error = 6; ns_init_card_error(card, error); return error; } #ifdef NS_PCI_LATENCY if (pci_latency < NS_PCI_LATENCY) { PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); for (j = 1; j < 4; j++) { if (pci_write_config_byte (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) break; } if (j == 4) { printk ("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); error = 7; ns_init_card_error(card, error); return error; } } #endif /* NS_PCI_LATENCY */ /* Clear timer overflow */ data = readl(card->membase + STAT); if (data & NS_STAT_TMROF) writel(NS_STAT_TMROF, card->membase + STAT); /* Software reset */ writel(NS_CFG_SWRST, card->membase + CFG); NS_DELAY; writel(0x00000000, card->membase + CFG); /* PHY reset */ writel(0x00000008, card->membase + GP); NS_DELAY; writel(0x00000001, card->membase + GP); NS_DELAY; while (CMD_BUSY(card)) ; writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ NS_DELAY; /* Detect PHY type */ while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); switch (data) { case 0x00000009: printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); card->max_pcr = ATM_25_PCR; while (CMD_BUSY(card)) ; writel(0x00000008, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); /* Clear an eventual pending interrupt */ writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000022, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; case 0x00000030: case 0x00000031: printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000002, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; default: printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); error = 8; ns_init_card_error(card, error); return error; } writel(0x00000000, card->membase + GP); /* Determine SRAM size */ data = 0x76543210; ns_write_sram(card, 0x1C003, &data, 1); data = 0x89ABCDEF; ns_write_sram(card, 0x14003, &data, 1); if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && ns_read_sram(card, 0x1C003) == 0x76543210) card->sram_size = 128; else card->sram_size = 32; PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) if (card->sram_size == 128) printk ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); #elif (NS_MAX_RCTSIZE == 16384) if (card->sram_size == 32) { printk ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); card->rct_size = 4096; } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif card->vpibits = NS_VPIBITS; if (card->rct_size == 4096) card->vcibits = 12 - NS_VPIBITS; else /* card->rct_size == 16384 */ card->vcibits = 14 - NS_VPIBITS; /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ if (mac[i] == NULL) nicstar_init_eprom(card->membase); /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); card->intcnt = 0; if (request_irq (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); error = 9; ns_init_card_error(card, error); return error; } /* Initialize TSQ */ card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, &card->tsq.dma, GFP_KERNEL); if (card->tsq.org == NULL) { printk("nicstar%d: can't allocate TSQ.\n", i); error = 10; ns_init_card_error(card, error); return error; } card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); card->tsq.next = card->tsq.base; card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) ns_tsi_init(card->tsq.base + j); writel(0x00000000, card->membase + TSQH); writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); /* Initialize RSQ */ card->rsq.org = dma_alloc_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, &card->rsq.dma, GFP_KERNEL); if (card->rsq.org == NULL) { printk("nicstar%d: can't allocate RSQ.\n", i); error = 11; ns_init_card_error(card, error); return error; } card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); card->rsq.next = card->rsq.base; card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) ns_rsqe_init(card->rsq.base + j); writel(0x00000000, card->membase + RSQH); writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); /* Initialize SCQ0, the only VBR SCQ used */ card->scq1 = NULL; card->scq2 = NULL; card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); if (card->scq0 == NULL) { printk("nicstar%d: can't get SCQ0.\n", i); error = 12; ns_init_card_error(card, error); return error; } u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, NS_VRSCD0, u32d, 4); ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ card->scq0->scd = NS_VRSCD0; PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); /* Initialize TSTs */ card->tst_addr = NS_TST0; card->tst_free_entries = NS_TST_NUM_ENTRIES; data = NS_TST_OPCODE_VARIABLE; for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST0 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST1 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) card->tste2vc[j] = NULL; writel(NS_TST0 << 2, card->membase + TSTB); /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT u32d[0] = NS_RCTE_RAWCELLINTEN; #else u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ u32d[1] = 0x00000000; u32d[2] = 0x00000000; u32d[3] = 0xFFFFFFFF; for (j = 0; j < card->rct_size; j++) ns_write_sram(card, j * 4, u32d, 4); memset(card->vcmap, 0, sizeof(card->vcmap)); for (j = 0; j < NS_FRSCD_NUM; j++) card->scd2vc[j] = NULL; /* Initialize buffer levels */ card->sbnr.min = MIN_SB; card->sbnr.init = NUM_SB; card->sbnr.max = MAX_SB; card->lbnr.min = MIN_LB; card->lbnr.init = NUM_LB; card->lbnr.max = MAX_LB; card->iovnr.min = MIN_IOVB; card->iovnr.init = NUM_IOVB; card->iovnr.max = MAX_IOVB; card->hbnr.min = MIN_HB; card->hbnr.init = NUM_HB; card->hbnr.max = MAX_HB; card->sm_handle = NULL; card->sm_addr = 0x00000000; card->lg_handle = NULL; card->lg_addr = 0x00000000; card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ idr_init(&card->idr); /* Pre-allocate some huge buffers */ skb_queue_head_init(&card->hbpool.queue); card->hbpool.count = 0; for (j = 0; j < NUM_HB; j++) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) { printk ("nicstar%d: can't allocate %dth of %d huge buffers.\n", i, j, NUM_HB); error = 13; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } /* Allocate large buffers */ skb_queue_head_init(&card->lbpool.queue); card->lbpool.count = 0; /* Not used */ for (j = 0; j < NUM_LB; j++) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) { printk ("nicstar%d: can't allocate %dth of %d large buffers.\n", i, j, NUM_LB); error = 14; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); /* Due to the implementation of push_rxbufs() this is 1, not 0 */ if (j == 1) { card->rcbuf = lb; card->rawcell = (struct ns_rcqe *) lb->data; card->rawch = NS_PRV_DMA(lb); } } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { printk ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", i, j, bcount); error = 14; ns_init_card_error(card, error); return error; } /* Allocate small buffers */ skb_queue_head_init(&card->sbpool.queue); card->sbpool.count = 0; /* Not used */ for (j = 0; j < NUM_SB; j++) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) { printk ("nicstar%d: can't allocate %dth of %d small buffers.\n", i, j, NUM_SB); error = 15; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { printk ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", i, j, bcount); error = 15; ns_init_card_error(card, error); return error; } /* Allocate iovec buffers */ skb_queue_head_init(&card->iovpool.queue); card->iovpool.count = 0; for (j = 0; j < NUM_IOVB; j++) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) { printk ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", i, j, NUM_IOVB); error = 16; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } /* Configure NICStAR */ if (card->rct_size == 4096) ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; else /* (card->rct_size == 16384) */ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; card->efbie = 1; /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; ns_init_card_error(card, error); return error; } if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, card->atmdev->esi, 6); } } printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); card->atmdev->dev_data = card; card->atmdev->ci_range.vpi_bits = card->vpibits; card->atmdev->ci_range.vci_bits = card->vcibits; card->atmdev->link_rate = card->max_pcr; card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI if (card->max_pcr == ATM_OC3_PCR) suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 if (card->max_pcr == ATM_25_PCR) idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ if (card->atmdev->phy && card->atmdev->phy->start) card->atmdev->phy->start(card->atmdev); writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ NS_CFG_PHYIE, card->membase + CFG); num_cards++; return error; } static void ns_init_card_error(ns_dev *card, int error) { if (error >= 17) { writel(0x00000000, card->membase + CFG); } if (error >= 16) { struct sk_buff *iovb; while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) dev_kfree_skb_any(iovb); } if (error >= 15) { struct sk_buff *sb; while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); } if (error >= 14) { struct sk_buff *lb; while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); } if (error >= 13) { struct sk_buff *hb; while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) dev_kfree_skb_any(hb); } if (error >= 12) { dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, card->rsq.org, card->rsq.dma); } if (error >= 11) { dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, card->tsq.org, card->tsq.dma); } if (error >= 10) { free_irq(card->pcidev->irq, card); } if (error >= 4) { iounmap(card->membase); } if (error >= 3) { pci_disable_device(card->pcidev); kfree(card); } } static scq_info *get_scq(ns_dev *card, int size, u32 scd) { scq_info *scq; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; scq = kmalloc(sizeof(*scq), GFP_KERNEL); if (!scq) return NULL; scq->org = dma_alloc_coherent(&card->pcidev->dev, 2 * size, &scq->dma, GFP_KERNEL); if (!scq->org) { kfree(scq); return NULL; } scq->skb = kcalloc(size / NS_SCQE_SIZE, sizeof(*scq->skb), GFP_KERNEL); if (!scq->skb) { dma_free_coherent(&card->pcidev->dev, 2 * size, scq->org, scq->dma); kfree(scq); return NULL; } scq->num_entries = size / NS_SCQE_SIZE; scq->base = PTR_ALIGN(scq->org, size); scq->next = scq->base; scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); return scq; } /* For variable rate SCQ vcc must be NULL */ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) { int i; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { vcc = ATM_SKB(scq->skb[i])->vcc; if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } else { /* vcc must be != NULL */ if (vcc == NULL) { printk ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); for (i = 0; i < scq->num_entries; i++) dev_kfree_skb_any(scq->skb[i]); } else for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } } kfree(scq->skb); dma_free_coherent(&card->pcidev->dev, 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? VBR_SCQSIZE : CBR_SCQSIZE), scq->org, scq->dma); kfree(scq); } /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ static void push_rxbufs(ns_dev * card, struct sk_buff *skb) { struct sk_buff *handle1, *handle2; int id1, id2; u32 addr1, addr2; u32 stat; unsigned long flags; /* *BARF* */ handle2 = NULL; addr2 = 0; handle1 = skb; addr1 = dma_map_single(&card->pcidev->dev, skb->data, (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), DMA_TO_DEVICE); NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ #ifdef GENERAL_DEBUG if (!addr1) printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); #endif /* GENERAL_DEBUG */ stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (!addr2) { if (card->sm_addr) { addr2 = card->sm_addr; handle2 = card->sm_handle; card->sm_addr = 0x00000000; card->sm_handle = NULL; } else { /* (!sm_addr) */ card->sm_addr = addr1; card->sm_handle = handle1; } } } else { /* buf_type == BUF_LG */ if (!addr2) { if (card->lg_addr) { addr2 = card->lg_addr; handle2 = card->lg_handle; card->lg_addr = 0x00000000; card->lg_handle = NULL; } else { /* (!lg_addr) */ card->lg_addr = addr1; card->lg_handle = handle1; } } } if (addr2) { if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (card->sbfqc >= card->sbnr.max) { skb_unlink(handle1, &card->sbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->sbpool.queue); dev_kfree_skb_any(handle2); return; } else card->sbfqc += 2; } else { /* (buf_type == BUF_LG) */ if (card->lbfqc >= card->lbnr.max) { skb_unlink(handle1, &card->lbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->lbpool.queue); dev_kfree_skb_any(handle2); return; } else card->lbfqc += 2; } id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); if (id1 < 0) goto out; id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); if (id2 < 0) goto out; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(addr2, card->membase + DR3); writel(id2, card->membase + DR2); writel(addr1, card->membase + DR1); writel(id1, card->membase + DR0); writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), addr1, addr2); } if (!card->efbie && card->sbfqc >= card->sbnr.min && card->lbfqc >= card->lbnr.min) { card->efbie = 1; writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); } out: return; } static irqreturn_t ns_irq_handler(int irq, void *dev_id) { u32 stat_r; ns_dev *card; struct atm_dev *dev; unsigned long flags; card = (ns_dev *) dev_id; dev = card->atmdev; card->intcnt++; PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); spin_lock_irqsave(&card->int_lock, flags); stat_r = readl(card->membase + STAT); /* Transmit Status Indicator has been written to T. S. Queue */ if (stat_r & NS_STAT_TSIF) { TXPRINTK("nicstar%d: TSI interrupt\n", card->index); process_tsq(card); writel(NS_STAT_TSIF, card->membase + STAT); } /* Incomplete CS-PDU has been transmitted */ if (stat_r & NS_STAT_TXICP) { writel(NS_STAT_TXICP, card->membase + STAT); TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", card->index); } /* Transmit Status Queue 7/8 full */ if (stat_r & NS_STAT_TSQF) { writel(NS_STAT_TSQF, card->membase + STAT); PRINTK("nicstar%d: TSQ full.\n", card->index); process_tsq(card); } /* Timer overflow */ if (stat_r & NS_STAT_TMROF) { writel(NS_STAT_TMROF, card->membase + STAT); PRINTK("nicstar%d: Timer overflow.\n", card->index); } /* PHY device interrupt signal active */ if (stat_r & NS_STAT_PHYI) { writel(NS_STAT_PHYI, card->membase + STAT); PRINTK("nicstar%d: PHY interrupt.\n", card->index); if (dev->phy && dev->phy->interrupt) { dev->phy->interrupt(dev); } } /* Small Buffer Queue is full */ if (stat_r & NS_STAT_SFBQF) { writel(NS_STAT_SFBQF, card->membase + STAT); printk("nicstar%d: Small free buffer queue is full.\n", card->index); } /* Large Buffer Queue is full */ if (stat_r & NS_STAT_LFBQF) { writel(NS_STAT_LFBQF, card->membase + STAT); printk("nicstar%d: Large free buffer queue is full.\n", card->index); } /* Receive Status Queue is full */ if (stat_r & NS_STAT_RSQF) { writel(NS_STAT_RSQF, card->membase + STAT); printk("nicstar%d: RSQ full.\n", card->index); process_rsq(card); } /* Complete CS-PDU received */ if (stat_r & NS_STAT_EOPDU) { RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); process_rsq(card); writel(NS_STAT_EOPDU, card->membase + STAT); } /* Raw cell received */ if (stat_r & NS_STAT_RAWCF) { writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT printk("nicstar%d: Raw cell received and no support yet...\n", card->index); #endif /* RCQ_SUPPORT */ /* NOTE: the following procedure may keep a raw cell pending until the next interrupt. As this preliminary support is only meant to avoid buffer leakage, this is not an issue. */ while (readl(card->membase + RAWCT) != card->rawch) { if (ns_rcqe_islast(card->rawcell)) { struct sk_buff *oldbuf; oldbuf = card->rcbuf; card->rcbuf = idr_find(&card->idr, ns_rcqe_nextbufhandle(card->rawcell)); card->rawch = NS_PRV_DMA(card->rcbuf); card->rawcell = (struct ns_rcqe *) card->rcbuf->data; recycle_rx_buf(card, oldbuf); } else { card->rawch += NS_RCQE_SIZE; card->rawcell++; } } } /* Small buffer queue is empty */ if (stat_r & NS_STAT_SFBQE) { int i; struct sk_buff *sb; writel(NS_STAT_SFBQE, card->membase + STAT); printk("nicstar%d: Small free buffer queue empty.\n", card->index); for (i = 0; i < card->sbnr.min; i++) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (sb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } card->sbfqc = i; process_rsq(card); } /* Large buffer queue empty */ if (stat_r & NS_STAT_LFBQE) { int i; struct sk_buff *lb; writel(NS_STAT_LFBQE, card->membase + STAT); printk("nicstar%d: Large free buffer queue empty.\n", card->index); for (i = 0; i < card->lbnr.min; i++) { lb = dev_alloc_skb(NS_LGSKBSIZE); if (lb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } card->lbfqc = i; process_rsq(card); } /* Receive Status Queue is 7/8 full */ if (stat_r & NS_STAT_RSQAF) { writel(NS_STAT_RSQAF, card->membase + STAT); RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); process_rsq(card); } spin_unlock_irqrestore(&card->int_lock, flags); PRINTK("nicstar%d: end of interrupt service\n", card->index); return IRQ_HANDLED; } static int ns_open(struct atm_vcc *vcc) { ns_dev *card; vc_map *vc; unsigned long tmpl, modl; int tcr, tcra; /* target cell rate, and absolute value */ int n = 0; /* Number of entries in the TST. Initialized to remove the compiler warning. */ u32 u32d[4]; int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler warning. How I wish compilers were clever enough to tell which variables can truly be used uninitialized... */ int inuse; /* tx or rx vc already in use by another vcc */ short vpi = vcc->vpi; int vci = vcc->vci; card = (ns_dev *) vcc->dev->dev_data; PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, vci); if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { PRINTK("nicstar%d: unsupported AAL.\n", card->index); return -EINVAL; } vc = &(card->vcmap[vpi << card->vcibits | vci]); vcc->dev_data = vc; inuse = 0; if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) inuse = 1; if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) inuse += 2; if (inuse) { printk("nicstar%d: %s vci already in use.\n", card->index, inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); return -EINVAL; } set_bit(ATM_VF_ADDR, &vcc->flags); /* NOTE: You are not allowed to modify an open connection's QOS. To change that, remove the ATM_VF_PARTIAL flag checking. There may be other changes needed to do that. */ if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { scq_info *scq; set_bit(ATM_VF_PARTIAL, &vcc->flags); if (vcc->qos.txtp.traffic_class == ATM_CBR) { /* Check requested cell rate and availability of SCD */ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && vcc->qos.txtp.min_pcr == 0) { PRINTK ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } tcr = atm_pcr_goal(&(vcc->qos.txtp)); tcra = tcr >= 0 ? tcr : -tcr; PRINTK("nicstar%d: target cell rate = %d.\n", card->index, vcc->qos.txtp.max_pcr); tmpl = (unsigned long)tcra *(unsigned long) NS_TST_NUM_ENTRIES; modl = tmpl % card->max_pcr; n = (int)(tmpl / card->max_pcr); if (tcr > 0) { if (modl > 0) n++; } else if (tcr == 0) { if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) { PRINTK ("nicstar%d: no CBR bandwidth free.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } } if (n == 0) { printk ("nicstar%d: selected bandwidth < granularity.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } if (n > (card->tst_free_entries - NS_TST_RESERVED)) { PRINTK ("nicstar%d: not enough free CBR bandwidth.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } else card->tst_free_entries -= n; XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { if (card->scd2vc[frscdi] == NULL) { card->scd2vc[frscdi] = vc; break; } } if (frscdi == NS_FRSCD_NUM) { PRINTK ("nicstar%d: no SCD available for CBR channel.\n", card->index); card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EBUSY; } vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); if (scq == NULL) { PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); card->scd2vc[frscdi] = NULL; card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -ENOMEM; } vc->scq = scq; u32d[0] = scq_virt_to_bus(scq, scq->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, vc->cbr_scd, u32d, 4); fill_tst(card, n, vc); } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->cbr_scd = 0x00000000; vc->scq = card->scq0; } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 1; vc->tx_vcc = vcc; vc->tbd_count = 0; } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 status; vc->rx = 1; vc->rx_vcc = vcc; vc->rx_iov = NULL; /* Open the connection in hardware */ if (vcc->qos.aal == ATM_AAL5) status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; else /* vcc->qos.aal == ATM_AAL0 */ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * NS_RCT_ENTRY_SIZE, &status, 1); } } set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void ns_close(struct atm_vcc *vcc) { vc_map *vc; ns_dev *card; u32 data; int i; vc = vcc->dev_data; card = vcc->dev->dev_data; PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, (int)vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 addr; unsigned long flags; addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); vc->rx = 0; if (vc->rx_iov != NULL) { struct sk_buff *iovb; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); PRINTK ("nicstar%d: closing a VC with pending rx buffers.\n", card->index); iovb = vc->rx_iov; recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); NS_PRV_IOVCNT(iovb) = 0; spin_lock_irqsave(&card->int_lock, flags); recycle_iov_buf(card, iovb); spin_unlock_irqrestore(&card->int_lock, flags); vc->rx_iov = NULL; } } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 0; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { unsigned long flags; ns_scqe *scqep; scq_info *scq; scq = vc->scq; for (;;) { spin_lock_irqsave(&scq->lock, flags); scqep = scq->next; if (scqep == scq->base) scqep = scq->last; else scqep--; if (scqep == scq->tail) { spin_unlock_irqrestore(&scq->lock, flags); break; } /* If the last entry is not a TSR, place one in the SCQ in order to be able to completely drain it and then close. */ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { ns_scqe tsr; u32 scdi, scqi; u32 data; int index; tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; if (scq->next == scq->last) scq->next = scq->base; else scq->next++; data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); } spin_unlock_irqrestore(&scq->lock, flags); schedule(); } /* Free all TST entries */ data = NS_TST_OPCODE_VARIABLE; for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { if (card->tste2vc[i] == vc) { ns_write_sram(card, card->tst_addr + i, &data, 1); card->tste2vc[i] = NULL; card->tst_free_entries++; } } card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; free_scq(card, vc->scq, vcc); } /* remove all references to vcc before deleting it */ if (vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned long flags; scq_info *scq = card->scq0; spin_lock_irqsave(&scq->lock, flags); for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { ATM_SKB(scq->skb[i])->vcc = NULL; atm_return(vcc, scq->skb[i]->truesize); PRINTK ("nicstar: deleted pending vcc mapping\n"); } } spin_unlock_irqrestore(&scq->lock, flags); } vcc->dev_data = NULL; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); #ifdef RX_DEBUG { u32 stat, cfg; stat = readl(card->membase + STAT); cfg = readl(card->membase + CFG); printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); printk ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", card->tsq.base, card->tsq.next, card->tsq.last, readl(card->membase + TSQT)); printk ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", card->rsq.base, card->rsq.next, card->rsq.last, readl(card->membase + RSQT)); printk("Empty free buffer queue interrupt %s \n", card->efbie ? "enabled" : "disabled"); printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", ns_stat_sfbqc_get(stat), card->sbpool.count, ns_stat_lfbqc_get(stat), card->lbpool.count); printk("hbpool.count = %d iovpool.count = %d \n", card->hbpool.count, card->iovpool.count); } #endif /* RX_DEBUG */ } static void fill_tst(ns_dev * card, int n, vc_map * vc) { u32 new_tst; unsigned long cl; int e, r; u32 data; /* It would be very complicated to keep the two TSTs synchronized while assuring that writes are only made to the inactive TST. So, for now I will use only one TST. If problems occur, I will change this again */ new_tst = card->tst_addr; /* Fill procedure */ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { if (card->tste2vc[e] == NULL) break; } if (e == NS_TST_NUM_ENTRIES) { printk("nicstar%d: No free TST entries found. \n", card->index); return; } r = n; cl = NS_TST_NUM_ENTRIES; data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); while (r > 0) { if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { card->tste2vc[e] = vc; ns_write_sram(card, new_tst + e, &data, 1); cl -= NS_TST_NUM_ENTRIES; r--; } if (++e == NS_TST_NUM_ENTRIES) { e = 0; } cl += n; } /* End of fill procedure */ data = ns_tste_make(NS_TST_OPCODE_END, new_tst); ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); card->tst_addr = new_tst; } static int _ns_send(struct atm_vcc *vcc, struct sk_buff *skb, bool may_sleep) { ns_dev *card; vc_map *vc; scq_info *scq; unsigned long buflen; ns_scqe scqe; u32 flags; /* TBD flags, not CPU flags */ card = vcc->dev->dev_data; TXPRINTK("nicstar%d: ns_send() called.\n", card->index); if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (vcc->qos.aal == ATM_AAL5) { buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ flags = NS_TBD_AAL5; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); scqe.word_3 = cpu_to_le32(skb->len); scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, ATM_SKB(skb)-> atm_options & ATM_ATMOPT_CLP ? 1 : 0); flags |= NS_TBD_EOPDU; } else { /* (vcc->qos.aal == ATM_AAL0) */ buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ flags = NS_TBD_AAL0; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); scqe.word_3 = cpu_to_le32(0x00000000); if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ flags |= NS_TBD_EOPDU; scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); /* Force the VPI/VCI to be the same as in VCC struct */ scqe.word_4 |= cpu_to_le32((((u32) vcc-> vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); } if (vcc->qos.txtp.traffic_class == ATM_CBR) { scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); scq = ((vc_map *) vcc->dev_data)->scq; } else { scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); scq = card->scq0; } if (push_scqe(card, vc, scq, &scqe, skb, may_sleep) != 0) { atomic_inc(&vcc->stats->tx_err); dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); return -EIO; } atomic_inc(&vcc->stats->tx); return 0; } static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { return _ns_send(vcc, skb, true); } static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb) { return _ns_send(vcc, skb, false); } static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb, bool may_sleep) { unsigned long flags; ns_scqe tsr; u32 scdi, scqi; int scq_is_vbr; u32 data; int index; spin_lock_irqsave(&scq->lock, flags); while (scq->tail == scq->next) { if (!may_sleep) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TBD.\n", card->index); return 1; } scq->full = 1; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); if (scq->full) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Timeout pushing TBD.\n", card->index); return 1; } } *scq->next = *tbd; index = (int)(scq->next - scq->base); scq->skb[index] = skb; XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", card->index, skb, index); XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count++; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { scq->tbd_count++; scq_is_vbr = 1; } else scq_is_vbr = 0; if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) { int has_run = 0; while (scq->tail == scq->next) { if (!may_sleep) { data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TSR.\n", card->index); return 0; } scq->full = 1; if (has_run++) break; wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, scq->tail != scq->next, scq->lock, SCQFULL_TIMEOUT); } if (!scq->full) { tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); if (scq_is_vbr) scdi = NS_TSR_SCDISVBR; else scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; XPRINTK ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count = 0; scq->tbd_count = 0; } else PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); } data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); return 0; } static void process_tsq(ns_dev * card) { u32 scdi; scq_info *scq; ns_tsi *previous = NULL, *one_ahead, *two_ahead; int serviced_entries; /* flag indicating at least on entry was serviced */ serviced_entries = 0; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || !ns_tsi_isempty(two_ahead)) /* At most two empty, as stated in the 77201 errata */ { serviced_entries = 1; /* Skip the one or two possible empty entries */ while (ns_tsi_isempty(card->tsq.next)) { if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; } if (!ns_tsi_tmrof(card->tsq.next)) { scdi = ns_tsi_getscdindex(card->tsq.next); if (scdi == NS_TSI_SCDISVBR) scq = card->scq0; else { if (card->scd2vc[scdi] == NULL) { printk ("nicstar%d: could not find VC from SCD index.\n", card->index); ns_tsi_init(card->tsq.next); return; } scq = card->scd2vc[scdi]->scq; } drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); scq->full = 0; wake_up_interruptible(&(scq->scqfull_waitq)); } ns_tsi_init(card->tsq.next); previous = card->tsq.next; if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; } if (serviced_entries) writel(PTR_DIFF(previous, card->tsq.base), card->membase + TSQH); } static void drain_scq(ns_dev * card, scq_info * scq, int pos) { struct atm_vcc *vcc; struct sk_buff *skb; int i; unsigned long flags; XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", card->index, scq, pos); if (pos >= scq->num_entries) { printk("nicstar%d: Bad index on drain_scq().\n", card->index); return; } spin_lock_irqsave(&scq->lock, flags); i = (int)(scq->tail - scq->base); if (++i == scq->num_entries) i = 0; while (i != pos) { skb = scq->skb[i]; XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", card->index, skb, i); if (skb != NULL) { dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, DMA_TO_DEVICE); vcc = ATM_SKB(skb)->vcc; if (vcc && vcc->pop != NULL) { vcc->pop(vcc, skb); } else { dev_kfree_skb_irq(skb); } scq->skb[i] = NULL; } if (++i == scq->num_entries) i = 0; } scq->tail = scq->base + pos; spin_unlock_irqrestore(&scq->lock, flags); } static void process_rsq(ns_dev * card) { ns_rsqe *previous; if (!ns_rsqe_valid(card->rsq.next)) return; do { dequeue_rx(card, card->rsq.next); ns_rsqe_init(card->rsq.next); previous = card->rsq.next; if (card->rsq.next == card->rsq.last) card->rsq.next = card->rsq.base; else card->rsq.next++; } while (ns_rsqe_valid(card->rsq.next)); writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); } static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) { u32 vpi, vci; vc_map *vc; struct sk_buff *iovb; struct iovec *iov; struct atm_vcc *vcc; struct sk_buff *skb; unsigned short aal5_len; int len; u32 stat; u32 id; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); skb = idr_remove(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR "nicstar%d: skb not found!\n", card->index); return; } dma_sync_single_for_cpu(&card->pcidev->dev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), DMA_FROM_DEVICE); dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), DMA_FROM_DEVICE); vpi = ns_rsqe_vpi(rsqe); vci = ns_rsqe_vci(rsqe); if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vc = &(card->vcmap[vpi << card->vcibits | vci]); if (!vc->rx) { RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vcc = vc->rx_vcc; if (vcc->qos.aal == ATM_AAL0) { struct sk_buff *sb; unsigned char *cell; int i; cell = skb->data; for (i = ns_rsqe_cellcount(rsqe); i; i--) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (!sb) { printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); atomic_add(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } /* Rebuild the header */ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } recycle_rx_buf(card, skb); return; } /* To reach this point, the AAL layer can only be AAL5 */ if ((iovb = vc->rx_iov) == NULL) { iovb = skb_dequeue(&(card->iovpool.queue)); if (iovb == NULL) { /* No buffers in the queue */ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; } else if (--card->iovpool.count < card->iovnr.min) { struct sk_buff *new_iovb; if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, new_iovb); card->iovpool.count++; } } vc->rx_iov = iovb; NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); } iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; iov->iov_base = (void *)skb; iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; iovb->len += iov->iov_len; #ifdef EXTRA_DEBUG if (NS_PRV_IOVCNT(iovb) == 1) { if (NS_PRV_BUFTYPE(skb) != BUF_SM) { printk ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ if (NS_PRV_BUFTYPE(skb) != BUF_LG) { printk ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } #endif /* EXTRA_DEBUG */ if (ns_rsqe_eopdu(rsqe)) { /* This works correctly regardless of the endianness of the host */ unsigned char *L1L2 = (unsigned char *) (skb->data + iov->iov_len - 6); aal5_len = L1L2[0] << 8 | L1L2[1]; len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; if (ns_rsqe_crcerr(rsqe) || len + 8 > iovb->len || len + (47 + 8) < iovb->len) { printk("nicstar%d: AAL5 CRC error", card->index); if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) printk(" - PDU size mismatch.\n"); else printk(".\n"); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } /* By this point we (hopefully) have a complete SDU without errors. */ if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; sb = (struct sk_buff *)(iov - 1)->iov_base; /* skb points to a large buffer */ if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, skb); } else { /* len > NS_SMBUFSIZE, the usual case */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); skb_push(skb, NS_SMBUFSIZE); skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, sb); } } else { /* Must push a huge buffer */ struct sk_buff *hb, *sb, *lb; int remaining, tocopy; int j; hb = skb_dequeue(&(card->hbpool.queue)); if (hb == NULL) { /* No buffers in the queue */ hb = dev_alloc_skb(NS_HBUFSIZE); if (hb == NULL) { printk ("nicstar%d: Out of huge buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } else if (card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } NS_PRV_BUFTYPE(hb) = BUF_NONE; } else if (--card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } if (card->hbpool.count < card->hbnr.min) { if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } } iov = (struct iovec *)iovb->data; if (!atm_charge(vcc, hb->truesize)) { recycle_iovec_rx_bufs(card, iov, NS_PRV_IOVCNT(iovb)); if (card->hbpool.count < card->hbnr.max) { skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } else dev_kfree_skb_any(hb); atomic_inc(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; /* Free the small buffer */ push_rxbufs(card, sb); /* Copy all large buffers to the huge buffer and free them */ for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { lb = (struct sk_buff *)iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); skb_copy_from_linear_data(lb, skb_tail_pointer (hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; push_rxbufs(card, lb); } #ifdef EXTRA_DEBUG if (remaining != 0 || hb->len != len) printk ("nicstar%d: Huge buffer len mismatch.\n", card->index); #endif /* EXTRA_DEBUG */ ATM_SKB(hb)->vcc = vcc; __net_timestamp(hb); vcc->push(vcc, hb); atomic_inc(&vcc->stats->rx); } } vc->rx_iov = NULL; recycle_iov_buf(card, iovb); } } static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) { if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { printk("nicstar%d: What kind of rx buffer is this?\n", card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) { while (count-- > 0) recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); } static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) { if (card->iovpool.count < card->iovnr.max) { skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } else dev_kfree_skb_any(iovb); } static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) { skb_unlink(sb, &card->sbpool.queue); if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } } static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) { skb_unlink(lb, &card->lbpool.queue); if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } } static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) { u32 stat; ns_dev *card; int left; left = (int)*pos; card = (ns_dev *) dev->dev_data; stat = readl(card->membase + STAT); if (!left--) return sprintf(page, "Pool count min init max \n"); if (!left--) return sprintf(page, "Small %5d %5d %5d %5d \n", ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, card->sbnr.max); if (!left--) return sprintf(page, "Large %5d %5d %5d %5d \n", ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, card->lbnr.max); if (!left--) return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, card->hbnr.min, card->hbnr.init, card->hbnr.max); if (!left--) return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, card->iovnr.min, card->iovnr.init, card->iovnr.max); if (!left--) { int retval; retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); card->intcnt = 0; return retval; } #if 0 /* Dump 25.6 Mbps PHY registers */ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it here just in case it's needed for debugging. */ if (card->max_pcr == ATM_25_PCR && !left--) { u32 phy_regs[4]; u32 i; for (i = 0; i < 4; i++) { while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); while (CMD_BUSY(card)) ; phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; } return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 /* Dump TST */ if (left-- < NS_TST_NUM_ENTRIES) { if (card->tste2vc[left + 1] == NULL) return sprintf(page, "%5d - VBR/UBR \n", left + 1); else return sprintf(page, "%5d - %d %d \n", left + 1, card->tste2vc[left + 1]->tx_vcc->vpi, card->tste2vc[left + 1]->tx_vcc->vci); } #endif /* 0 */ return 0; } static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) { ns_dev *card; pool_levels pl; long btype; unsigned long flags; card = dev->dev_data; switch (cmd) { case NS_GETPSTAT: if (get_user (pl.buftype, &((pool_levels __user *) arg)->buftype)) return -EFAULT; switch (pl.buftype) { case NS_BUFTYPE_SMALL: pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); pl.level.min = card->sbnr.min; pl.level.init = card->sbnr.init; pl.level.max = card->sbnr.max; break; case NS_BUFTYPE_LARGE: pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); pl.level.min = card->lbnr.min; pl.level.init = card->lbnr.init; pl.level.max = card->lbnr.max; break; case NS_BUFTYPE_HUGE: pl.count = card->hbpool.count; pl.level.min = card->hbnr.min; pl.level.init = card->hbnr.init; pl.level.max = card->hbnr.max; break; case NS_BUFTYPE_IOVEC: pl.count = card->iovpool.count; pl.level.min = card->iovnr.min; pl.level.init = card->iovnr.init; pl.level.max = card->iovnr.max; break; default: return -ENOIOCTLCMD; } if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) return (sizeof(pl)); else return -EFAULT; case NS_SETBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) return -EFAULT; if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) return -EINVAL; if (pl.level.min == 0) return -EINVAL; switch (pl.buftype) { case NS_BUFTYPE_SMALL: if (pl.level.max > TOP_SB) return -EINVAL; card->sbnr.min = pl.level.min; card->sbnr.init = pl.level.init; card->sbnr.max = pl.level.max; break; case NS_BUFTYPE_LARGE: if (pl.level.max > TOP_LB) return -EINVAL; card->lbnr.min = pl.level.min; card->lbnr.init = pl.level.init; card->lbnr.max = pl.level.max; break; case NS_BUFTYPE_HUGE: if (pl.level.max > TOP_HB) return -EINVAL; card->hbnr.min = pl.level.min; card->hbnr.init = pl.level.init; card->hbnr.max = pl.level.max; break; case NS_BUFTYPE_IOVEC: if (pl.level.max > TOP_IOVB) return -EINVAL; card->iovnr.min = pl.level.min; card->iovnr.init = pl.level.init; card->iovnr.max = pl.level.max; break; default: return -EINVAL; } return 0; case NS_ADJBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; btype = (long)arg; /* a long is the same size as a pointer or bigger */ switch (btype) { case NS_BUFTYPE_SMALL: while (card->sbfqc < card->sbnr.init) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } break; case NS_BUFTYPE_LARGE: while (card->lbfqc < card->lbnr.init) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } break; case NS_BUFTYPE_HUGE: while (card->hbpool.count > card->hbnr.init) { struct sk_buff *hb; spin_lock_irqsave(&card->int_lock, flags); hb = skb_dequeue(&card->hbpool.queue); card->hbpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (hb == NULL) printk ("nicstar%d: huge buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(hb); } while (card->hbpool.count < card->hbnr.init) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(hb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; case NS_BUFTYPE_IOVEC: while (card->iovpool.count > card->iovnr.init) { struct sk_buff *iovb; spin_lock_irqsave(&card->int_lock, flags); iovb = skb_dequeue(&card->iovpool.queue); card->iovpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (iovb == NULL) printk ("nicstar%d: iovec buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(iovb); } while (card->iovpool.count < card->iovnr.init) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(iovb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; default: return -EINVAL; } return 0; default: if (dev->phy && dev->phy->ioctl) { return dev->phy->ioctl(dev, cmd, arg); } else { printk("nicstar%d: %s == NULL \n", card->index, dev->phy ? "dev->phy->ioctl" : "dev->phy"); return -ENOIOCTLCMD; } } } #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb) { printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); } #endif /* EXTRA_DEBUG */ static void ns_poll(struct timer_list *unused) { int i; ns_dev *card; unsigned long flags; u32 stat_r, stat_w; PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; if (!spin_trylock_irqsave(&card->int_lock, flags)) { /* Probably it isn't worth spinning */ continue; } stat_w = 0; stat_r = readl(card->membase + STAT); if (stat_r & NS_STAT_TSIF) stat_w |= NS_STAT_TSIF; if (stat_r & NS_STAT_EOPDU) stat_w |= NS_STAT_EOPDU; process_tsq(card); process_rsq(card); writel(stat_w, card->membase + STAT); spin_unlock_irqrestore(&card->int_lock, flags); } mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); PRINTK("nicstar: Leaving ns_poll().\n"); } static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { ns_dev *card; unsigned long flags; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel((u32) value, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { ns_dev *card; unsigned long flags; u32 data; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0) & 0x000000FF; spin_unlock_irqrestore(&card->res_lock, flags); return (unsigned char)data; } module_init(nicstar_init); module_exit(nicstar_cleanup);
// SPDX-License-Identifier: GPL-2.0-only /* * A simple wrapper around refcount. An allocated sched_core_cookie's * address is used to compute the cookie of the task. */ struct sched_core_cookie { refcount_t refcnt; }; static unsigned long sched_core_alloc_cookie(void) { struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); if (!ck) return 0; refcount_set(&ck->refcnt, 1); sched_core_get(); return (unsigned long)ck; } static void sched_core_put_cookie(unsigned long cookie) { struct sched_core_cookie *ptr = (void *)cookie; if (ptr && refcount_dec_and_test(&ptr->refcnt)) { kfree(ptr); sched_core_put(); } } static unsigned long sched_core_get_cookie(unsigned long cookie) { struct sched_core_cookie *ptr = (void *)cookie; if (ptr) refcount_inc(&ptr->refcnt); return cookie; } /* * sched_core_update_cookie - replace the cookie on a task * @p: the task to update * @cookie: the new cookie * * Effectively exchange the task cookie; caller is responsible for lifetimes on * both ends. * * Returns: the old cookie */ static unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie) { unsigned long old_cookie; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(p, &rf); /* * Since creating a cookie implies sched_core_get(), and we cannot set * a cookie until after we've created it, similarly, we cannot destroy * a cookie until after we've removed it, we must have core scheduling * enabled here. */ SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); if (sched_core_enqueued(p)) sched_core_dequeue(rq, p, DEQUEUE_SAVE); old_cookie = p->core_cookie; p->core_cookie = cookie; /* * Consider the cases: !prev_cookie and !cookie. */ if (cookie && task_on_rq_queued(p)) sched_core_enqueue(rq, p); /* * If task is currently running, it may not be compatible anymore after * the cookie change, so enter the scheduler on its CPU to schedule it * away. * * Note that it is possible that as a result of this cookie change, the * core has now entered/left forced idle state. Defer accounting to the * next scheduling edge, rather than always forcing a reschedule here. */ if (task_on_cpu(rq, p)) resched_curr(rq); task_rq_unlock(rq, p, &rf); return old_cookie; } static unsigned long sched_core_clone_cookie(struct task_struct *p) { unsigned long cookie, flags; raw_spin_lock_irqsave(&p->pi_lock, flags); cookie = sched_core_get_cookie(p->core_cookie); raw_spin_unlock_irqrestore(&p->pi_lock, flags); return cookie; } void sched_core_fork(struct task_struct *p) { RB_CLEAR_NODE(&p->core_node); p->core_cookie = sched_core_clone_cookie(current); } void sched_core_free(struct task_struct *p) { sched_core_put_cookie(p->core_cookie); } static void __sched_core_set(struct task_struct *p, unsigned long cookie) { cookie = sched_core_get_cookie(cookie); cookie = sched_core_update_cookie(p, cookie); sched_core_put_cookie(cookie); } /* Called from prctl interface: PR_SCHED_CORE */ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, unsigned long uaddr) { unsigned long cookie = 0, id = 0; struct task_struct *task, *p; struct pid *grp; int err = 0; if (!static_branch_likely(&sched_smt_present)) return -ENODEV; BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID); BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD_GROUP != PIDTYPE_TGID); BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_PROCESS_GROUP != PIDTYPE_PGID); if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || (cmd != PR_SCHED_CORE_GET && uaddr)) return -EINVAL; rcu_read_lock(); if (pid == 0) { task = current; } else { task = find_task_by_vpid(pid); if (!task) { rcu_read_unlock(); return -ESRCH; } } get_task_struct(task); rcu_read_unlock(); /* * Check if this process has the right to modify the specified * process. Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { err = -EPERM; goto out; } switch (cmd) { case PR_SCHED_CORE_GET: if (type != PIDTYPE_PID || uaddr & 7) { err = -EINVAL; goto out; } cookie = sched_core_clone_cookie(task); if (cookie) { /* XXX improve ? */ ptr_to_hashval((void *)cookie, &id); } err = put_user(id, (u64 __user *)uaddr); goto out; case PR_SCHED_CORE_CREATE: cookie = sched_core_alloc_cookie(); if (!cookie) { err = -ENOMEM; goto out; } break; case PR_SCHED_CORE_SHARE_TO: cookie = sched_core_clone_cookie(current); break; case PR_SCHED_CORE_SHARE_FROM: if (type != PIDTYPE_PID) { err = -EINVAL; goto out; } cookie = sched_core_clone_cookie(task); __sched_core_set(current, cookie); goto out; default: err = -EINVAL; goto out; } if (type == PIDTYPE_PID) { __sched_core_set(task, cookie); goto out; } read_lock(&tasklist_lock); grp = task_pid_type(task, type); do_each_pid_thread(grp, type, p) { if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) { err = -EPERM; goto out_tasklist; } } while_each_pid_thread(grp, type, p); do_each_pid_thread(grp, type, p) { __sched_core_set(p, cookie); } while_each_pid_thread(grp, type, p); out_tasklist: read_unlock(&tasklist_lock); out: sched_core_put_cookie(cookie); put_task_struct(task); return err; } #ifdef CONFIG_SCHEDSTATS /* REQUIRES: rq->core's clock recently updated. */ void __sched_core_account_forceidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); struct rq *rq_i; struct task_struct *p; int i; lockdep_assert_rq_held(rq); WARN_ON_ONCE(!rq->core->core_forceidle_count); if (rq->core->core_forceidle_start == 0) return; delta = now - rq->core->core_forceidle_start; if (unlikely((s64)delta <= 0)) return; rq->core->core_forceidle_start = now; if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) { /* can't be forced idle without a running task */ } else if (rq->core->core_forceidle_count > 1 || rq->core->core_forceidle_occupation > 1) { /* * For larger SMT configurations, we need to scale the charged * forced idle amount since there can be more than one forced * idle sibling and more than one running cookied task. */ delta *= rq->core->core_forceidle_count; delta = div_u64(delta, rq->core->core_forceidle_occupation); } for_each_cpu(i, smt_mask) { rq_i = cpu_rq(i); p = rq_i->core_pick ?: rq_i->curr; if (p == rq_i->idle) continue; /* * Note: this will account forceidle to the current CPU, even * if it comes from our SMT sibling. */ __account_forceidle_time(p, delta); } } void __sched_core_tick(struct rq *rq) { if (!rq->core->core_forceidle_count) return; if (rq != rq->core) update_rq_clock(rq->core); __sched_core_account_forceidle(rq); } #endif /* CONFIG_SCHEDSTATS */
// SPDX-License-Identifier: GPL-2.0-or-later /* * LM4857 AMP driver * * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * [email protected] * Copyright 2011 Lars-Peter Clausen <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/tlv.h> static const struct reg_default lm4857_default_regs[] = { { 0x0, 0x00 }, { 0x1, 0x00 }, { 0x2, 0x00 }, { 0x3, 0x00 }, }; /* The register offsets in the cache array */ #define LM4857_MVOL 0 #define LM4857_LVOL 1 #define LM4857_RVOL 2 #define LM4857_CTRL 3 /* the shifts required to set these bits */ #define LM4857_3D 5 #define LM4857_WAKEUP 5 #define LM4857_EPGAIN 4 static const unsigned int lm4857_mode_values[] = { 0, 6, 7, 8, 9, }; static const char * const lm4857_mode_texts[] = { "Off", "Earpiece", "Loudspeaker", "Loudspeaker + Headphone", "Headphone", }; static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(lm4857_mode_enum, LM4857_CTRL, 0, 0xf, lm4857_mode_texts, lm4857_mode_values); static const struct snd_kcontrol_new lm4857_mode_ctrl = SOC_DAPM_ENUM("Mode", lm4857_mode_enum); static const struct snd_soc_dapm_widget lm4857_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN"), SND_SOC_DAPM_DEMUX("Mode", SND_SOC_NOPM, 0, 0, &lm4857_mode_ctrl), SND_SOC_DAPM_OUTPUT("LS"), SND_SOC_DAPM_OUTPUT("HP"), SND_SOC_DAPM_OUTPUT("EP"), }; static const DECLARE_TLV_DB_SCALE(stereo_tlv, -4050, 150, 0); static const DECLARE_TLV_DB_SCALE(mono_tlv, -3450, 150, 0); static const struct snd_kcontrol_new lm4857_controls[] = { SOC_SINGLE_TLV("Left Playback Volume", LM4857_LVOL, 0, 31, 0, stereo_tlv), SOC_SINGLE_TLV("Right Playback Volume", LM4857_RVOL, 0, 31, 0, stereo_tlv), SOC_SINGLE_TLV("Mono Playback Volume", LM4857_MVOL, 0, 31, 0, mono_tlv), SOC_SINGLE("Spk 3D Playback Switch", LM4857_LVOL, LM4857_3D, 1, 0), SOC_SINGLE("HP 3D Playback Switch", LM4857_RVOL, LM4857_3D, 1, 0), SOC_SINGLE("Fast Wakeup Playback Switch", LM4857_CTRL, LM4857_WAKEUP, 1, 0), SOC_SINGLE("Earpiece 6dB Playback Switch", LM4857_CTRL, LM4857_EPGAIN, 1, 0), }; static const struct snd_soc_dapm_route lm4857_routes[] = { { "Mode", NULL, "IN" }, { "LS", "Loudspeaker", "Mode" }, { "LS", "Loudspeaker + Headphone", "Mode" }, { "HP", "Headphone", "Mode" }, { "HP", "Loudspeaker + Headphone", "Mode" }, { "EP", "Earpiece", "Mode" }, }; static const struct snd_soc_component_driver lm4857_component_driver = { .controls = lm4857_controls, .num_controls = ARRAY_SIZE(lm4857_controls), .dapm_widgets = lm4857_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(lm4857_dapm_widgets), .dapm_routes = lm4857_routes, .num_dapm_routes = ARRAY_SIZE(lm4857_routes), }; static const struct regmap_config lm4857_regmap_config = { .val_bits = 6, .reg_bits = 2, .max_register = LM4857_CTRL, .cache_type = REGCACHE_FLAT, .reg_defaults = lm4857_default_regs, .num_reg_defaults = ARRAY_SIZE(lm4857_default_regs), }; static int lm4857_i2c_probe(struct i2c_client *i2c) { struct regmap *regmap; regmap = devm_regmap_init_i2c(i2c, &lm4857_regmap_config); if (IS_ERR(regmap)) return PTR_ERR(regmap); return devm_snd_soc_register_component(&i2c->dev, &lm4857_component_driver, NULL, 0); } static const struct i2c_device_id lm4857_i2c_id[] = { { "lm4857" }, { } }; MODULE_DEVICE_TABLE(i2c, lm4857_i2c_id); static struct i2c_driver lm4857_i2c_driver = { .driver = { .name = "lm4857", }, .probe = lm4857_i2c_probe, .id_table = lm4857_i2c_id, }; module_i2c_driver(lm4857_i2c_driver); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("LM4857 amplifier driver"); MODULE_LICENSE("GPL");